struct cl_object_header {
/** Standard lu_object_header. cl_object::co_lu::lo_header points
* here. */
- struct lu_object_header coh_lu;
+ struct lu_object_header coh_lu;
/** \name locks
* \todo XXX move locks below to the separate cache-lines, they are
* mostly useless otherwise.
*/
/** @{ */
/** Lock protecting lock list. */
- spinlock_t coh_lock_guard;
- /** @} locks */
- /** List of cl_lock's granted for this object. */
- cfs_list_t coh_locks;
+ spinlock_t coh_lock_guard;
+ /** @} locks */
+ /** List of cl_lock's granted for this object. */
+ struct list_head coh_locks;
/**
* Parent object. It is assumed that an object has a well-defined
* Helper macro: iterate over all layers of the object \a obj, assigning every
* layer top-to-bottom to \a slice.
*/
-#define cl_object_for_each(slice, obj) \
- cfs_list_for_each_entry((slice), \
- &(obj)->co_lu.lo_header->loh_layers, \
- co_lu.lo_linkage)
+#define cl_object_for_each(slice, obj) \
+ list_for_each_entry((slice), \
+ &(obj)->co_lu.lo_header->loh_layers,\
+ co_lu.lo_linkage)
+
/**
* Helper macro: iterate over all layers of the object \a obj, assigning every
* layer bottom-to-top to \a slice.
*/
-#define cl_object_for_each_reverse(slice, obj) \
- cfs_list_for_each_entry_reverse((slice), \
- &(obj)->co_lu.lo_header->loh_layers, \
- co_lu.lo_linkage)
+#define cl_object_for_each_reverse(slice, obj) \
+ list_for_each_entry_reverse((slice), \
+ &(obj)->co_lu.lo_header->loh_layers,\
+ co_lu.lo_linkage)
/** @} cl_object */
#define CL_PAGE_EOF ((pgoff_t)~0ull)
struct cl_object *cpl_obj;
const struct cl_page_operations *cpl_ops;
/** Linkage into cl_page::cp_layers. Immutable after creation. */
- cfs_list_t cpl_linkage;
+ struct list_head cpl_linkage;
};
/**
* List of enclosed locks, so far. Locks are linked here through
* cl_lock::cll_inclosure.
*/
- cfs_list_t clc_list;
+ struct list_head clc_list;
/**
* True iff closure is in a `wait' mode. This determines what
* cl_lock_enclosure() does when a lock L to be added to the closure
*/
struct cl_lock {
/** Reference counter. */
- atomic_t cll_ref;
+ atomic_t cll_ref;
/** List of slices. Immutable after creation. */
- cfs_list_t cll_layers;
- /**
- * Linkage into cl_lock::cll_descr::cld_obj::coh_locks list. Protected
- * by cl_lock::cll_descr::cld_obj::coh_lock_guard.
- */
- cfs_list_t cll_linkage;
- /**
- * Parameters of this lock. Protected by
- * cl_lock::cll_descr::cld_obj::coh_lock_guard nested within
- * cl_lock::cll_guard. Modified only on lock creation and in
- * cl_lock_modify().
- */
+ struct list_head cll_layers;
+ /**
+ * Linkage into cl_lock::cll_descr::cld_obj::coh_locks list. Protected
+ * by cl_lock::cll_descr::cld_obj::coh_lock_guard.
+ */
+ struct list_head cll_linkage;
+ /**
+ * Parameters of this lock. Protected by
+ * cl_lock::cll_descr::cld_obj::coh_lock_guard nested within
+ * cl_lock::cll_guard. Modified only on lock creation and in
+ * cl_lock_modify().
+ */
struct cl_lock_descr cll_descr;
/** Protected by cl_lock::cll_guard. */
enum cl_lock_state cll_state;
*
* \see cl_lock_closure
*/
- cfs_list_t cll_inclosure;
+ struct list_head cll_inclosure;
/**
* Confict lock at queuing time.
*/
struct cl_object *cls_obj;
const struct cl_lock_operations *cls_ops;
/** Linkage into cl_lock::cll_layers. Immutable after creation. */
- cfs_list_t cls_linkage;
+ struct list_head cls_linkage;
};
/**
* @{
*/
struct cl_page_list {
- unsigned pl_nr;
- cfs_list_t pl_pages;
- struct task_struct *pl_owner;
+ unsigned pl_nr;
+ struct list_head pl_pages;
+ struct task_struct *pl_owner;
};
/**
* \see vvp_io, lov_io, osc_io, ccc_io
*/
struct cl_io_slice {
- struct cl_io *cis_io;
- /** corresponding object slice. Immutable after creation. */
- struct cl_object *cis_obj;
- /** io operations. Immutable after creation. */
- const struct cl_io_operations *cis_iop;
- /**
- * linkage into a list of all slices for a given cl_io, hanging off
- * cl_io::ci_layers. Immutable after creation.
- */
- cfs_list_t cis_linkage;
+ struct cl_io *cis_io;
+ /** corresponding object slice. Immutable after creation. */
+ struct cl_object *cis_obj;
+ /** io operations. Immutable after creation. */
+ const struct cl_io_operations *cis_iop;
+ /**
+ * linkage into a list of all slices for a given cl_io, hanging off
+ * cl_io::ci_layers. Immutable after creation.
+ */
+ struct list_head cis_linkage;
};
typedef void (*cl_commit_cbt)(const struct lu_env *, struct cl_io *,
* same lock can be part of multiple io's simultaneously.
*/
struct cl_io_lock_link {
- /** linkage into one of cl_lockset lists. */
- cfs_list_t cill_linkage;
- struct cl_lock_descr cill_descr;
- struct cl_lock *cill_lock;
- /** optional destructor */
- void (*cill_fini)(const struct lu_env *env,
- struct cl_io_lock_link *link);
+ /** linkage into one of cl_lockset lists. */
+ struct list_head cill_linkage;
+ struct cl_lock_descr cill_descr;
+ struct cl_lock *cill_lock;
+ /** optional destructor */
+ void (*cill_fini)(const struct lu_env *env,
+ struct cl_io_lock_link *link);
};
/**
* enqueued.
*/
struct cl_lockset {
- /** locks to be acquired. */
- cfs_list_t cls_todo;
- /** locks currently being processed. */
- cfs_list_t cls_curr;
- /** locks acquired. */
- cfs_list_t cls_done;
+ /** locks to be acquired. */
+ struct list_head cls_todo;
+ /** locks currently being processed. */
+ struct list_head cls_curr;
+ /** locks acquired. */
+ struct list_head cls_done;
};
/**
*/
struct cl_io *ci_parent;
/** List of slices. Immutable after creation. */
- cfs_list_t ci_layers;
+ struct list_head ci_layers;
/** list of locks (to be) acquired by this io. */
struct cl_lockset ci_lockset;
/** lock requirements, this is just a help info for sublayers. */
* req's pages.
*/
struct cl_req {
- enum cl_req_type crq_type;
- /** A list of pages being transfered */
- cfs_list_t crq_pages;
- /** Number of pages in cl_req::crq_pages */
- unsigned crq_nrpages;
- /** An array of objects which pages are in ->crq_pages */
- struct cl_req_obj *crq_o;
- /** Number of elements in cl_req::crq_objs[] */
- unsigned crq_nrobjs;
- cfs_list_t crq_layers;
+ enum cl_req_type crq_type;
+ /** A list of pages being transfered */
+ struct list_head crq_pages;
+ /** Number of pages in cl_req::crq_pages */
+ unsigned crq_nrpages;
+ /** An array of objects which pages are in ->crq_pages */
+ struct cl_req_obj *crq_o;
+ /** Number of elements in cl_req::crq_objs[] */
+ unsigned crq_nrobjs;
+ struct list_head crq_layers;
};
/**
* Per-layer state for request.
*/
struct cl_req_slice {
- struct cl_req *crs_req;
- struct cl_device *crs_dev;
- cfs_list_t crs_linkage;
- const struct cl_req_operations *crs_ops;
+ struct cl_req *crs_req;
+ struct cl_device *crs_dev;
+ struct list_head crs_linkage;
+ const struct cl_req_operations *crs_ops;
};
/* @} cl_req */
int cl_lock_state_wait (const struct lu_env *env, struct cl_lock *lock);
void cl_lock_state_set (const struct lu_env *env, struct cl_lock *lock,
enum cl_lock_state state);
-int cl_queue_match (const cfs_list_t *queue,
- const struct cl_lock_descr *need);
+int cl_queue_match(const struct list_head *queue,
+ const struct cl_lock_descr *need);
void cl_lock_mutex_get (const struct lu_env *env, struct cl_lock *lock);
int cl_lock_mutex_try (const struct lu_env *env, struct cl_lock *lock);
*/
static inline struct cl_page *cl_page_list_last(struct cl_page_list *plist)
{
- LASSERT(plist->pl_nr > 0);
- return cfs_list_entry(plist->pl_pages.prev, struct cl_page, cp_batch);
+ LASSERT(plist->pl_nr > 0);
+ return list_entry(plist->pl_pages.prev, struct cl_page, cp_batch);
}
static inline struct cl_page *cl_page_list_first(struct cl_page_list *plist)
{
LASSERT(plist->pl_nr > 0);
- return cfs_list_entry(plist->pl_pages.next, struct cl_page, cp_batch);
+ return list_entry(plist->pl_pages.next, struct cl_page, cp_batch);
}
/**
* Iterate over pages in a page list.
*/
#define cl_page_list_for_each(page, list) \
- cfs_list_for_each_entry((page), &(list)->pl_pages, cp_batch)
+ list_for_each_entry((page), &(list)->pl_pages, cp_batch)
/**
* Iterate over pages in a page list, taking possible removals into account.
*/
#define cl_page_list_for_each_safe(page, temp, list) \
- cfs_list_for_each_entry_safe((page), (temp), &(list)->pl_pages, cp_batch)
+ list_for_each_entry_safe((page), (temp), &(list)->pl_pages, cp_batch)
void cl_page_list_init (struct cl_page_list *plist);
void cl_page_list_add (struct cl_page_list *plist, struct cl_page *page);
#define MAX_COMMIT_CB_STR_LEN 32
struct dt_txn_commit_cb {
- cfs_list_t dcb_linkage;
- dt_cb_t dcb_func;
- __u32 dcb_magic;
- char dcb_name[MAX_COMMIT_CB_STR_LEN];
+ struct list_head dcb_linkage;
+ dt_cb_t dcb_func;
+ __u32 dcb_magic;
+ char dcb_name[MAX_COMMIT_CB_STR_LEN];
};
/**
* way, because callbacks are supposed to be added/deleted only during
* single-threaded start-up shut-down procedures.
*/
- cfs_list_t dd_txn_callbacks;
+ struct list_head dd_txn_callbacks;
unsigned int dd_record_fid_accessed:1;
};
*/
struct local_oid_storage {
/* all initialized llog systems on this node linked by this */
- cfs_list_t los_list;
+ struct list_head los_list;
/* how many handle's reference this los has */
atomic_t los_refcount;
int (*dtc_txn_stop)(const struct lu_env *env,
struct thandle *txn, void *cookie);
void (*dtc_txn_commit)(struct thandle *txn, void *cookie);
- void *dtc_cookie;
- __u32 dtc_tag;
- cfs_list_t dtc_linkage;
+ void *dtc_cookie;
+ __u32 dtc_tag;
+ struct list_head dtc_linkage;
};
void dt_txn_callback_add(struct dt_device *dev, struct dt_txn_callback *cb);
*
* \see ccc_page::cpg_pending_linkage
*/
- cfs_list_t cob_pending_list;
+ struct list_head cob_pending_list;
/**
* Access this counter is protected by inode->i_sem. Now that
/**
* List of entities(OSCs) for this LRU cache
*/
- cfs_list_t ccc_lru;
+ struct list_head ccc_lru;
/**
* Max # of LRU entries
*/
typedef void (*cntr_init_callback)(struct lprocfs_stats *stats);
struct obd_job_stats {
- cfs_hash_t *ojs_hash;
- cfs_list_t ojs_list;
- rwlock_t ojs_lock; /* protect the obj_list */
- cntr_init_callback ojs_cntr_init_fn;
- int ojs_cntr_num;
- int ojs_cleanup_interval;
- time_t ojs_last_cleanup;
+ cfs_hash_t *ojs_hash;
+ struct list_head ojs_list;
+ rwlock_t ojs_lock; /* protect the obj_list */
+ cntr_init_callback ojs_cntr_init_fn;
+ int ojs_cntr_num;
+ int ojs_cleanup_interval;
+ time_t ojs_last_cleanup;
};
#ifdef LPROCFS
/**
* Link the device to the site.
**/
- cfs_list_t ld_linkage;
+ struct list_head ld_linkage;
};
struct lu_device_type_operations;
/**
* Linkage into list of all layers.
*/
- cfs_list_t lo_linkage;
+ struct list_head lo_linkage;
/**
* Link to the device, for debugging.
*/
/**
* number of busy object on this bucket
*/
- long lsb_busy;
+ long lsb_busy;
/**
* LRU list, updated on each access to object. Protected by
* bucket lock of lu_site::ls_obj_hash.
* moved to the lu_site::ls_lru.prev (this is due to the non-existence
* of list_for_each_entry_safe_reverse()).
*/
- cfs_list_t lsb_lru;
+ struct list_head lsb_lru;
/**
* Wait-queue signaled when an object in this site is ultimately
* destroyed (lu_object_free()). It is used by lu_object_find() to
*
* \see htable_lookup().
*/
- wait_queue_head_t lsb_marche_funebre;
+ wait_queue_head_t lsb_marche_funebre;
};
enum {
/**
* objects hash table
*/
- cfs_hash_t *ls_obj_hash;
+ cfs_hash_t *ls_obj_hash;
/**
* index of bucket on hash table while purging
*/
- int ls_purge_start;
- /**
- * Top-level device for this stack.
- */
- struct lu_device *ls_top_dev;
+ int ls_purge_start;
+ /**
+ * Top-level device for this stack.
+ */
+ struct lu_device *ls_top_dev;
/**
* Bottom-level device for this stack
*/
struct lu_device *ls_bottom_dev;
- /**
- * Linkage into global list of sites.
- */
- cfs_list_t ls_linkage;
- /**
- * List for lu device for this site, protected
- * by ls_ld_lock.
- **/
- cfs_list_t ls_ld_linkage;
+ /**
+ * Linkage into global list of sites.
+ */
+ struct list_head ls_linkage;
+ /**
+ * List for lu device for this site, protected
+ * by ls_ld_lock.
+ **/
+ struct list_head ls_ld_linkage;
spinlock_t ls_ld_lock;
/**
* Lock to serialize site purge.
*/
static inline struct lu_object *lu_object_top(struct lu_object_header *h)
{
- LASSERT(!cfs_list_empty(&h->loh_layers));
- return container_of0(h->loh_layers.next, struct lu_object, lo_linkage);
+ LASSERT(!list_empty(&h->loh_layers));
+ return container_of0(h->loh_layers.next, struct lu_object, lo_linkage);
}
/**
* Pointer to an array with key values. Internal implementation
* detail.
*/
- void **lc_value;
- /**
- * Linkage into a list of all remembered contexts. Only
- * `non-transient' contexts, i.e., ones created for service threads
- * are placed here.
- */
- cfs_list_t lc_remember;
- /**
- * Version counter used to skip calls to lu_context_refill() when no
- * keys were registered.
- */
- unsigned lc_version;
+ void **lc_value;
+ /**
+ * Linkage into a list of all remembered contexts. Only
+ * `non-transient' contexts, i.e., ones created for service threads
+ * are placed here.
+ */
+ struct list_head lc_remember;
+ /**
+ * Version counter used to skip calls to lu_context_refill() when no
+ * keys were registered.
+ */
+ unsigned lc_version;
/**
* Debugging cookie.
*/
- unsigned lc_cookie;
+ unsigned lc_cookie;
};
/**
* Spin-lock protecting lu_ref::lf_list.
*/
spinlock_t lf_guard;
- /**
- * List of all outstanding references (each represented by struct
- * lu_ref_link), pointing to this object.
- */
- cfs_list_t lf_list;
+ /**
+ * List of all outstanding references (each represented by struct
+ * lu_ref_link), pointing to this object.
+ */
+ struct list_head lf_list;
/**
* # of links.
*/
/**
* Linkage into a global list of all lu_ref's (lu_ref_refs).
*/
- cfs_list_t lf_linkage;
+ struct list_head lf_linkage;
};
struct lu_ref_link {
struct lu_ref *ll_ref;
- cfs_list_t ll_linkage;
+ struct list_head ll_linkage;
const char *ll_scope;
const void *ll_source;
};
}
struct client_capa {
- struct inode *inode;
- cfs_list_t lli_list; /* link to lli_oss_capas */
+ struct inode *inode;
+ struct list_head lli_list; /* link to lli_oss_capas */
};
struct target_capa {
- cfs_hlist_node_t c_hash; /* link to capa hash */
+ struct hlist_node c_hash; /* link to capa hash */
};
struct obd_capa {
- cfs_list_t c_list; /* link to capa_list */
+ struct list_head c_list; /* link to capa_list */
struct lustre_capa c_capa; /* capa */
atomic_t c_refc; /* ref count */
typedef int (* renew_capa_cb_t)(struct obd_capa *, struct lustre_capa *);
/* obdclass/capa.c */
-extern cfs_list_t capa_list[];
+extern struct list_head capa_list[];
extern spinlock_t capa_lock;
extern int capa_count[];
extern struct kmem_cache *capa_cachep;
-cfs_hlist_head_t *init_capa_hash(void);
-void cleanup_capa_hash(cfs_hlist_head_t *hash);
+struct hlist_head *init_capa_hash(void);
+void cleanup_capa_hash(struct hlist_head *hash);
-struct obd_capa *capa_add(cfs_hlist_head_t *hash,
+struct obd_capa *capa_add(struct hlist_head *hash,
struct lustre_capa *capa);
-struct obd_capa *capa_lookup(cfs_hlist_head_t *hash,
+struct obd_capa *capa_lookup(struct hlist_head *hash,
struct lustre_capa *capa, int alive);
int capa_hmac(__u8 *hmac, struct lustre_capa *capa, __u8 *key);
if (unlikely(!ocapa))
return ERR_PTR(-ENOMEM);
- CFS_INIT_LIST_HEAD(&ocapa->c_list);
+ INIT_LIST_HEAD(&ocapa->c_list);
atomic_set(&ocapa->c_refc, 1);
spin_lock_init(&ocapa->c_lock);
ocapa->c_site = site;
- if (ocapa->c_site == CAPA_SITE_CLIENT)
- CFS_INIT_LIST_HEAD(&ocapa->u.cli.lli_list);
- else
- CFS_INIT_HLIST_NODE(&ocapa->u.tgt.c_hash);
+ if (ocapa->c_site == CAPA_SITE_CLIENT)
+ INIT_LIST_HEAD(&ocapa->u.cli.lli_list);
+ else
+ INIT_HLIST_NODE(&ocapa->u.tgt.c_hash);
- return ocapa;
+ return ocapa;
#else
- return ERR_PTR(-EOPNOTSUPP);
+ return ERR_PTR(-EOPNOTSUPP);
#endif
}
}
if (atomic_dec_and_test(&ocapa->c_refc)) {
- LASSERT(cfs_list_empty(&ocapa->c_list));
+ LASSERT(list_empty(&ocapa->c_list));
if (ocapa->c_site == CAPA_SITE_CLIENT) {
- LASSERT(cfs_list_empty(&ocapa->u.cli.lli_list));
+ LASSERT(list_empty(&ocapa->u.cli.lli_list));
} else {
- cfs_hlist_node_t *hnode;
+ struct hlist_node *hnode;
hnode = &ocapa->u.tgt.c_hash;
LASSERT(!hnode->next && !hnode->pprev);
}
struct filter_capa_key {
- cfs_list_t k_list;
- struct lustre_capa_key k_key;
+ struct list_head k_list;
+ struct lustre_capa_key k_key;
};
enum lc_auth_id {
/****************** mount lookup info *********************/
struct lustre_mount_info {
- char *lmi_name;
- struct super_block *lmi_sb;
- cfs_list_t lmi_list_chain;
+ char *lmi_name;
+ struct super_block *lmi_sb;
+ struct list_head lmi_list_chain;
};
/****************** prototypes *********************/
* Position in global namespace list linking all namespaces on
* the node.
*/
- cfs_list_t ns_list_chain;
+ struct list_head ns_list_chain;
/**
* List of unused locks for this namespace. This list is also called
* to release from the head of this list.
* Locks are linked via l_lru field in \see struct ldlm_lock.
*/
- cfs_list_t ns_unused_list;
+ struct list_head ns_unused_list;
/** Number of locks in the LRU list above */
int ns_nr_unused;
/** Work list for sending GL ASTs to multiple locks. */
struct ldlm_glimpse_work {
struct ldlm_lock *gl_lock; /* lock to glimpse */
- cfs_list_t gl_list; /* linkage to other gl work structs */
+ struct list_head gl_list; /* linkage to other gl work structs */
__u32 gl_flags;/* see LDLM_GL_WORK_* below */
union ldlm_gl_desc *gl_desc; /* glimpse descriptor to be packed in
* glimpse callback request */
/** Interval node data for each LDLM_EXTENT lock. */
struct ldlm_interval {
struct interval_node li_node; /* node for tree management */
- cfs_list_t li_group; /* the locks which have the same
+ struct list_head li_group; /* the locks which have the same
* policy - group of the policy */
};
#define to_ldlm_interval(n) container_of(n, struct ldlm_interval, li_node)
* List item for client side LRU list.
* Protected by ns_lock in struct ldlm_namespace.
*/
- cfs_list_t l_lru;
+ struct list_head l_lru;
/**
* Linkage to resource's lock queues according to current lock state.
* (could be granted, waiting or converting)
* Protected by lr_lock in struct ldlm_resource.
*/
- cfs_list_t l_res_link;
+ struct list_head l_res_link;
/**
* Tree node for ldlm_extent.
*/
* Per export hash of locks.
* Protected by per-bucket exp->exp_lock_hash locks.
*/
- cfs_hlist_node_t l_exp_hash;
+ struct hlist_node l_exp_hash;
/**
* Per export hash of flock locks.
* Protected by per-bucket exp->exp_flock_hash locks.
*/
- cfs_hlist_node_t l_exp_flock_hash;
+ struct hlist_node l_exp_flock_hash;
/**
* Requested mode.
* Protected by lr_lock.
* expired_lock_thread.elt_expired_locks for further processing.
* Protected by elt_lock.
*/
- cfs_list_t l_pending_chain;
+ struct list_head l_pending_chain;
/**
* Set when lock is sent a blocking AST. Time in seconds when timeout
*/
int l_bl_ast_run;
/** List item ldlm_add_ast_work_item() for case of blocking ASTs. */
- cfs_list_t l_bl_ast;
+ struct list_head l_bl_ast;
/** List item ldlm_add_ast_work_item() for case of completion ASTs. */
- cfs_list_t l_cp_ast;
+ struct list_head l_cp_ast;
/** For ldlm_add_ast_work_item() for "revoke" AST used in COS. */
- cfs_list_t l_rk_ast;
+ struct list_head l_rk_ast;
/**
* Pointer to a conflicting lock that caused blocking AST to be sent
* Protected by lr_lock, linkages to "skip lists".
* For more explanations of skip lists see ldlm/ldlm_inodebits.c
*/
- cfs_list_t l_sl_mode;
- cfs_list_t l_sl_policy;
+ struct list_head l_sl_mode;
+ struct list_head l_sl_policy;
/** Reference tracking structure to debug leaked locks. */
struct lu_ref l_reference;
/** number of export references taken */
int l_exp_refs_nr;
/** link all locks referencing one export */
- cfs_list_t l_exp_refs_link;
+ struct list_head l_exp_refs_link;
/** referenced export object */
struct obd_export *l_exp_refs_target;
#endif
* Lock order of waiting_lists_spinlock, exp_bl_list_lock and res lock
* is: res lock -> exp_bl_list_lock -> wanting_lists_spinlock.
*/
- cfs_list_t l_exp_list;
+ struct list_head l_exp_list;
};
/**
* List item for list in namespace hash.
* protected by ns_lock
*/
- cfs_hlist_node_t lr_hash;
+ struct hlist_node lr_hash;
/** Spinlock to protect locks under this resource. */
spinlock_t lr_lock;
* protected by lr_lock
* @{ */
/** List of locks in granted state */
- cfs_list_t lr_granted;
+ struct list_head lr_granted;
/** List of locks waiting to change their granted mode (converted) */
- cfs_list_t lr_converting;
+ struct list_head lr_converting;
/**
* List of locks that could not be granted due to conflicts and
* that are waiting for conflicts to go away */
- cfs_list_t lr_waiting;
+ struct list_head lr_waiting;
/** @} */
/* XXX No longer needed? Remove ASAP */
}
struct ldlm_ast_work {
- struct ldlm_lock *w_lock;
- int w_blocking;
- struct ldlm_lock_desc w_desc;
- cfs_list_t w_list;
- int w_flags;
- void *w_data;
- int w_datalen;
+ struct ldlm_lock *w_lock;
+ int w_blocking;
+ struct ldlm_lock_desc w_desc;
+ struct list_head w_list;
+ int w_flags;
+ void *w_data;
+ int w_datalen;
};
/**
#endif
typedef int (*ldlm_processing_policy)(struct ldlm_lock *lock, __u64 *flags,
- int first_enq, ldlm_error_t *err,
- cfs_list_t *work_list);
+ int first_enq, ldlm_error_t *err,
+ struct list_head *work_list);
/**
* Return values for lock iterators.
void *data, int flag);
int ldlm_server_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data);
int ldlm_server_glimpse_ast(struct ldlm_lock *lock, void *data);
-int ldlm_glimpse_locks(struct ldlm_resource *res, cfs_list_t *gl_work_list);
+int ldlm_glimpse_locks(struct ldlm_resource *res,
+ struct list_head *gl_work_list);
/** @} ldlm_srv_ast */
/** \defgroup ldlm_handlers Server LDLM handlers
lock; \
})
-#define ldlm_lock_list_put(head, member, count) \
-({ \
- struct ldlm_lock *_lock, *_next; \
- int c = count; \
- cfs_list_for_each_entry_safe(_lock, _next, head, member) { \
- if (c-- == 0) \
- break; \
- cfs_list_del_init(&_lock->member); \
- LDLM_LOCK_RELEASE(_lock); \
- } \
- LASSERT(c <= 0); \
+#define ldlm_lock_list_put(head, member, count) \
+({ \
+ struct ldlm_lock *_lock, *_next; \
+ int c = count; \
+ list_for_each_entry_safe(_lock, _next, head, member) { \
+ if (c-- == 0) \
+ break; \
+ list_del_init(&_lock->member); \
+ LDLM_LOCK_RELEASE(_lock); \
+ } \
+ LASSERT(c <= 0); \
})
struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res);
int ldlm_resource_putref(struct ldlm_resource *res);
void ldlm_resource_add_lock(struct ldlm_resource *res,
- cfs_list_t *head,
- struct ldlm_lock *lock);
+ struct list_head *head,
+ struct ldlm_lock *lock);
void ldlm_resource_unlink_lock(struct ldlm_lock *lock);
void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc);
void ldlm_dump_all_namespaces(ldlm_side_t client, int level);
void *lvb, __u32 lvb_len, enum lvb_type lvb_type,
struct lustre_handle *lockh, int async);
int ldlm_prep_enqueue_req(struct obd_export *exp,
- struct ptlrpc_request *req,
- cfs_list_t *cancels,
- int count);
-int ldlm_prep_elc_req(struct obd_export *exp,
- struct ptlrpc_request *req,
- int version, int opc, int canceloff,
- cfs_list_t *cancels, int count);
+ struct ptlrpc_request *req,
+ struct list_head *cancels,
+ int count);
+int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req,
+ int version, int opc, int canceloff,
+ struct list_head *cancels, int count);
struct ptlrpc_request *ldlm_enqueue_pack(struct obd_export *exp, int lvb_len);
int ldlm_handle_enqueue0(struct ldlm_namespace *ns, struct ptlrpc_request *req,
ldlm_mode_t mode,
ldlm_cancel_flags_t flags,
void *opaque);
-int ldlm_cli_cancel_req(struct obd_export *exp, cfs_list_t *head,
- int count, ldlm_cancel_flags_t flags);
+int ldlm_cli_cancel_req(struct obd_export *exp, struct list_head *head,
+ int count, ldlm_cancel_flags_t flags);
int ldlm_cancel_resource_local(struct ldlm_resource *res,
- cfs_list_t *cancels,
+ struct list_head *cancels,
ldlm_policy_data_t *policy,
ldlm_mode_t mode, __u64 lock_flags,
ldlm_cancel_flags_t cancel_flags, void *opaque);
-int ldlm_cli_cancel_list_local(cfs_list_t *cancels, int count,
+int ldlm_cli_cancel_list_local(struct list_head *cancels, int count,
ldlm_cancel_flags_t flags);
-int ldlm_cli_cancel_list(cfs_list_t *head, int count,
+int ldlm_cli_cancel_list(struct list_head *head, int count,
struct ptlrpc_request *req, ldlm_cancel_flags_t flags);
/** @} ldlm_cli_api */
struct mdt_export_data {
struct tg_export_data med_ted;
/** List of all files opened by client on this MDT */
- cfs_list_t med_open_head;
+ struct list_head med_open_head;
spinlock_t med_open_lock; /* med_open_head, mfd_list */
struct mutex med_idmap_mutex;
struct lustre_idmap_table *med_idmap;
};
struct ec_export_data { /* echo client */
- cfs_list_t eced_locks;
+ struct list_head eced_locks;
};
/* In-memory access to client data from OST struct */
__u64 fed_lastid_gen;
long fed_dirty; /* in bytes */
long fed_grant; /* in bytes */
- cfs_list_t fed_mod_list; /* files being modified */
+ struct list_head fed_mod_list; /* files being modified */
long fed_pending; /* bytes just being written */
/* count of SOFT_SYNC RPCs, which will be reset after
* ofd_soft_sync_limit number of RPCs, and trigger a sync. */
};
struct mgs_export_data {
- cfs_list_t med_clients; /* mgc fs client via this exp */
+ struct list_head med_clients; /* mgc fs client via this exp */
spinlock_t med_lock; /* protect med_clients */
};
* It tracks access patterns to this export on a per-client-NID basis
*/
struct nid_stat {
- lnet_nid_t nid;
- cfs_hlist_node_t nid_hash;
- cfs_list_t nid_list;
+ lnet_nid_t nid;
+ struct hlist_node nid_hash;
+ struct list_head nid_list;
struct obd_device *nid_obd;
struct proc_dir_entry *nid_proc;
struct lprocfs_stats *nid_stats;
struct lprocfs_stats *nid_ldlm_stats;
- atomic_t nid_exp_ref_count; /* for obd_nid_stats_hash
- exp_nid_stats */
+ atomic_t nid_exp_ref_count; /* for obd_nid_stats_hash
+ exp_nid_stats */
};
#define nidstat_getref(nidstat) \
* Subsequent client RPCs contain this handle id to identify
* what export they are talking to.
*/
- struct portals_handle exp_handle;
- atomic_t exp_refcount;
+ struct portals_handle exp_handle;
+ atomic_t exp_refcount;
/**
* Set of counters below is to track where export references are
* kept. The exp_rpc_count is used for reconnect handling also,
* the cb_count and locks_count are for debug purposes only for now.
* The sum of them should be less than exp_refcount by 3
*/
- atomic_t exp_rpc_count; /* RPC references */
- atomic_t exp_cb_count; /* Commit callback references */
+ atomic_t exp_rpc_count; /* RPC references */
+ atomic_t exp_cb_count; /* Commit callback references */
/** Number of queued replay requests to be processes */
- atomic_t exp_replay_count;
- atomic_t exp_locks_count; /** Lock references */
+ atomic_t exp_replay_count;
+ atomic_t exp_locks_count; /** Lock references */
#if LUSTRE_TRACKS_LOCK_EXP_REFS
- cfs_list_t exp_locks_list;
- spinlock_t exp_locks_list_guard;
+ struct list_head exp_locks_list;
+ spinlock_t exp_locks_list_guard;
#endif
/** UUID of client connected to this export */
- struct obd_uuid exp_client_uuid;
+ struct obd_uuid exp_client_uuid;
/** To link all exports on an obd device */
- cfs_list_t exp_obd_chain;
- cfs_hlist_node_t exp_uuid_hash; /** uuid-export hash*/
- cfs_hlist_node_t exp_nid_hash; /** nid-export hash */
+ struct list_head exp_obd_chain;
+ struct hlist_node exp_uuid_hash; /** uuid-export hash*/
+ struct hlist_node exp_nid_hash; /** nid-export hash */
/**
* All exports eligible for ping evictor are linked into a list
* through this field in "most time since last request on this export"
* order
* protected by obd_dev_lock
*/
- cfs_list_t exp_obd_chain_timed;
- /** Obd device of this export */
- struct obd_device *exp_obd;
+ struct list_head exp_obd_chain_timed;
+ /** Obd device of this export */
+ struct obd_device *exp_obd;
/**
* "reverse" import to send requests (e.g. from ldlm) back to client
* exp_lock protect its change
__u32 exp_conn_cnt;
/** Hash list of all ldlm locks granted on this export */
cfs_hash_t *exp_lock_hash;
- /**
+ /**
* Hash list for Posix lock deadlock detection, added with
* ldlm_lock::l_exp_flock_hash.
- */
- cfs_hash_t *exp_flock_hash;
- cfs_list_t exp_outstanding_replies;
- cfs_list_t exp_uncommitted_replies;
- spinlock_t exp_uncommitted_replies_lock;
- /** Last committed transno for this export */
- __u64 exp_last_committed;
- /** When was last request received */
- cfs_time_t exp_last_request_time;
- /** On replay all requests waiting for replay are linked here */
- cfs_list_t exp_req_replay_queue;
+ */
+ cfs_hash_t *exp_flock_hash;
+ struct list_head exp_outstanding_replies;
+ struct list_head exp_uncommitted_replies;
+ spinlock_t exp_uncommitted_replies_lock;
+ /** Last committed transno for this export */
+ __u64 exp_last_committed;
+ /** When was last request received */
+ cfs_time_t exp_last_request_time;
+ /** On replay all requests waiting for replay are linked here */
+ struct list_head exp_req_replay_queue;
/**
* protects exp_flags, exp_outstanding_replies and the change
* of exp_imp_reverse
cfs_time_t exp_flvr_expire[2]; /* seconds */
/** protects exp_hp_rpcs */
- spinlock_t exp_rpc_lock;
- cfs_list_t exp_hp_rpcs; /* (potential) HP RPCs */
- cfs_list_t exp_reg_rpcs; /* RPC being handled */
+ spinlock_t exp_rpc_lock;
+ struct list_head exp_hp_rpcs; /* (potential) HP RPCs */
+ struct list_head exp_reg_rpcs; /* RPC being handled */
- /** blocking dlm lock list, protected by exp_bl_list_lock */
- cfs_list_t exp_bl_list;
- spinlock_t exp_bl_list_lock;
+ /** blocking dlm lock list, protected by exp_bl_list_lock */
+ struct list_head exp_bl_list;
+ spinlock_t exp_bl_list_lock;
/** Target specific data */
union {
};
struct lu_fld_target {
- cfs_list_t ft_chain;
+ struct list_head ft_chain;
struct obd_export *ft_exp;
struct lu_server_fld *ft_srv;
__u64 ft_idx;
};
struct lu_client_fld {
- /**
- * Client side proc entry. */
- cfs_proc_dir_entry_t *lcf_proc_dir;
+ /**
+ * Client side proc entry. */
+ struct proc_dir_entry *lcf_proc_dir;
- /**
- * List of exports client FLD knows about. */
- cfs_list_t lcf_targets;
+ /**
+ * List of exports client FLD knows about. */
+ struct list_head lcf_targets;
/**
* Current hash to be used to chose an export. */
* ldlm_lock. If it's not at the top, you'll want to use container_of()
* to compute the start of the structure based on the handle field. */
struct portals_handle {
- cfs_list_t h_link;
+ struct list_head h_link;
__u64 h_cookie;
const void *h_owner;
struct portals_handle_ops *h_ops;
};
struct lustre_idmap_table {
- spinlock_t lit_lock;
- cfs_list_t lit_idmaps[CFS_IDMAP_N_HASHES][CFS_IDMAP_HASHSIZE];
+ spinlock_t lit_lock;
+ struct list_head lit_idmaps[CFS_IDMAP_N_HASHES][CFS_IDMAP_HASHSIZE];
};
struct lu_ucred;
};
struct ptlrpc_at_array {
- cfs_list_t *paa_reqs_array; /** array to hold requests */
+ struct list_head *paa_reqs_array; /** array to hold requests */
__u32 paa_size; /** the size of array */
__u32 paa_count; /** the total count of reqs */
time_t paa_deadline; /** the earliest deadline of reqs */
* Definition of import connection structure
*/
struct obd_import_conn {
- /** Item for linking connections together */
- cfs_list_t oic_item;
- /** Pointer to actual PortalRPC connection */
+ /** Item for linking connections together */
+ struct list_head oic_item;
+ /** Pointer to actual PortalRPC connection */
struct ptlrpc_connection *oic_conn;
/** uuid of remote side */
struct obd_uuid oic_uuid;
struct ptlrpc_connection *imp_connection;
/** PortalRPC client structure for this import */
struct ptlrpc_client *imp_client;
- /** List element for linking into pinger chain */
- cfs_list_t imp_pinger_chain;
- /** List element for linking into chain for destruction */
- cfs_list_t imp_zombie_chain;
+ /** List element for linking into pinger chain */
+ struct list_head imp_pinger_chain;
+ /** List element for linking into chain for destruction */
+ struct list_head imp_zombie_chain;
/**
* Lists of requests that are retained for replay, waiting for a reply,
* or waiting for recovery to complete, respectively.
* @{
*/
- cfs_list_t imp_replay_list;
- cfs_list_t imp_sending_list;
- cfs_list_t imp_delayed_list;
+ struct list_head imp_replay_list;
+ struct list_head imp_sending_list;
+ struct list_head imp_delayed_list;
/** @} */
/**
* The imp_replay_cursor is for accelerating searching during replay.
* @{
*/
- cfs_list_t imp_committed_list;
- cfs_list_t *imp_replay_cursor;
+ struct list_head imp_committed_list;
+ struct list_head *imp_replay_cursor;
/** @} */
- /** obd device for this import */
- struct obd_device *imp_obd;
+ /** obd device for this import */
+ struct obd_device *imp_obd;
/**
* some seciruty-related fields
__u64 imp_last_success_conn;
/** List of all possible connection for import. */
- cfs_list_t imp_conn_list;
+ struct list_head imp_conn_list;
/**
* Current connection. \a imp_connection is imp_conn_current->oic_conn
*/
};
struct plain_handle_data {
- cfs_list_t phd_entry;
- struct llog_handle *phd_cat_handle;
- struct llog_cookie phd_cookie; /* cookie of this log in its cat */
+ struct list_head phd_entry;
+ struct llog_handle *phd_cat_handle;
+ /* cookie of this log in its cat */
+ struct llog_cookie phd_cookie;
};
struct cat_handle_data {
- cfs_list_t chd_head;
- struct llog_handle *chd_current_log; /* currently open log */
- struct llog_handle *chd_next_log; /* llog to be used next */
+ struct list_head chd_head;
+ struct llog_handle *chd_current_log;/* currently open log */
+ struct llog_handle *chd_next_log; /* llog to be used next */
};
static inline void logid_to_fid(struct llog_logid *id, struct lu_fid *fid)
*/
struct ptlrpc_connection {
/** linkage for connections hash table */
- cfs_hlist_node_t c_hash;
+ struct hlist_node c_hash;
/** Our own lnet nid for this connection */
lnet_nid_t c_self;
/** Remote side nid for this connection */
* returned.
*/
struct ptlrpc_request_set {
- atomic_t set_refcount;
+ atomic_t set_refcount;
/** number of in queue requests */
- atomic_t set_new_count;
+ atomic_t set_new_count;
/** number of uncompleted requests */
- atomic_t set_remaining;
+ atomic_t set_remaining;
/** wait queue to wait on for request events */
- wait_queue_head_t set_waitq;
- wait_queue_head_t *set_wakeup_ptr;
+ wait_queue_head_t set_waitq;
+ wait_queue_head_t *set_wakeup_ptr;
/** List of requests in the set */
- cfs_list_t set_requests;
+ struct list_head set_requests;
/**
* List of completion callbacks to be called when the set is completed
* This is only used if \a set_interpret is NULL.
* Links struct ptlrpc_set_cbdata.
*/
- cfs_list_t set_cblist;
+ struct list_head set_cblist;
/** Completion callback, if only one. */
- set_interpreter_func set_interpret;
+ set_interpreter_func set_interpret;
/** opaq argument passed to completion \a set_interpret callback. */
- void *set_arg;
+ void *set_arg;
/**
* Lock for \a set_new_requests manipulations
* locked so that any old caller can communicate requests to
*/
spinlock_t set_new_req_lock;
/** List of new yet unsent requests. Only used with ptlrpcd now. */
- cfs_list_t set_new_requests;
+ struct list_head set_new_requests;
/** rq_status of requests that have been freed already */
- int set_rc;
+ int set_rc;
/** Additional fields used by the flow control extension */
/** Maximum number of RPCs in flight */
- int set_max_inflight;
+ int set_max_inflight;
/** Callback function used to generate RPCs */
- set_producer_func set_producer;
+ set_producer_func set_producer;
/** opaq argument passed to the producer callback */
- void *set_producer_arg;
+ void *set_producer_arg;
};
/**
* Description of a single ptrlrpc_set callback
*/
struct ptlrpc_set_cbdata {
- /** List linkage item */
- cfs_list_t psc_item;
- /** Pointer to interpreting function */
- set_interpreter_func psc_interpret;
- /** Opaq argument to pass to the callback */
- void *psc_data;
+ /** List linkage item */
+ struct list_head psc_item;
+ /** Pointer to interpreting function */
+ set_interpreter_func psc_interpret;
+ /** Opaq argument to pass to the callback */
+ void *psc_data;
};
struct ptlrpc_bulk_desc;
* added to the state for replay/failover consistency guarantees.
*/
struct ptlrpc_reply_state {
- /** Callback description */
- struct ptlrpc_cb_id rs_cb_id;
- /** Linkage for list of all reply states in a system */
- cfs_list_t rs_list;
- /** Linkage for list of all reply states on same export */
- cfs_list_t rs_exp_list;
- /** Linkage for list of all reply states for same obd */
- cfs_list_t rs_obd_list;
+ /** Callback description */
+ struct ptlrpc_cb_id rs_cb_id;
+ /** Linkage for list of all reply states in a system */
+ struct list_head rs_list;
+ /** Linkage for list of all reply states on same export */
+ struct list_head rs_exp_list;
+ /** Linkage for list of all reply states for same obd */
+ struct list_head rs_obd_list;
#if RS_DEBUG
- cfs_list_t rs_debug_list;
+ struct list_head rs_debug_list;
#endif
- /** A spinlock to protect the reply state flags */
+ /** A spinlock to protect the reply state flags */
spinlock_t rs_lock;
- /** Reply state flags */
+ /** Reply state flags */
unsigned long rs_difficult:1; /* ACK/commit stuff */
unsigned long rs_no_ack:1; /* no ACK, even for
difficult requests */
*/
struct ptlrpc_request_pool {
/** Locks the list */
- spinlock_t prp_lock;
- /** list of ptlrpc_request structs */
- cfs_list_t prp_req_list;
- /** Maximum message size that would fit into a rquest from this pool */
- int prp_rq_size;
- /** Function to allocate more requests for this pool */
- void (*prp_populate)(struct ptlrpc_request_pool *, int);
+ spinlock_t prp_lock;
+ /** list of ptlrpc_request structs */
+ struct list_head prp_req_list;
+ /** Maximum message size that would fit into a rquest from this pool */
+ int prp_rq_size;
+ /** Function to allocate more requests for this pool */
+ void (*prp_populate)(struct ptlrpc_request_pool *, int);
};
struct lu_context;
/**
* List of registered policies
*/
- cfs_list_t nrs_policy_list;
+ struct list_head nrs_policy_list;
/**
* List of policies with queued requests. Policies that have any
* outstanding requests are queued here, and this list is queried
* point transition away from the
* ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state are drained.
*/
- cfs_list_t nrs_policy_queued;
+ struct list_head nrs_policy_queued;
/**
* Service partition for this NRS head
*/
/**
* Link into nrs_core::nrs_policies
*/
- cfs_list_t pd_list;
+ struct list_head pd_list;
/**
* NRS operations for this policy
*/
* Linkage into the NRS head's list of policies,
* ptlrpc_nrs:nrs_policy_list
*/
- cfs_list_t pol_list;
+ struct list_head pol_list;
/**
* Linkage into the NRS head's list of policies with enqueued
* requests ptlrpc_nrs:nrs_policy_queued
*/
- cfs_list_t pol_list_queued;
+ struct list_head pol_list_queued;
/**
* Current state of this policy
*/
/**
* List of queued requests.
*/
- cfs_list_t fh_list;
+ struct list_head fh_list;
/**
* For debugging purposes.
*/
};
struct nrs_fifo_req {
- cfs_list_t fr_list;
+ struct list_head fr_list;
__u64 fr_sequence;
};
*/
struct nrs_crrn_client {
struct ptlrpc_nrs_resource cc_res;
- cfs_hlist_node_t cc_hnode;
+ struct hlist_node cc_hnode;
lnet_nid_t cc_nid;
/**
* The round number against which this client is currently scheduling
*/
struct nrs_orr_object {
struct ptlrpc_nrs_resource oo_res;
- cfs_hlist_node_t oo_hnode;
+ struct hlist_node oo_hnode;
/**
* The round number against which requests are being scheduled for this
* object or OST
*/
struct ptlrpc_request {
/* Request type: one of PTL_RPC_MSG_* */
- int rq_type;
+ int rq_type;
/** Result of request processing */
- int rq_status;
- /**
- * Linkage item through which this request is included into
- * sending/delayed lists on client and into rqbd list on server
- */
- cfs_list_t rq_list;
- /**
- * Server side list of incoming unserved requests sorted by arrival
- * time. Traversed from time to time to notice about to expire
- * requests and sent back "early replies" to clients to let them
- * know server is alive and well, just very busy to service their
- * requests in time
- */
- cfs_list_t rq_timed_list;
- /** server-side history, used for debuging purposes. */
- cfs_list_t rq_history_list;
- /** server-side per-export list */
- cfs_list_t rq_exp_list;
- /** server-side hp handlers */
- struct ptlrpc_hpreq_ops *rq_ops;
+ int rq_status;
+ /**
+ * Linkage item through which this request is included into
+ * sending/delayed lists on client and into rqbd list on server
+ */
+ struct list_head rq_list;
+ /**
+ * Server side list of incoming unserved requests sorted by arrival
+ * time. Traversed from time to time to notice about to expire
+ * requests and sent back "early replies" to clients to let them
+ * know server is alive and well, just very busy to service their
+ * requests in time
+ */
+ struct list_head rq_timed_list;
+ /** server-side history, used for debuging purposes. */
+ struct list_head rq_history_list;
+ /** server-side per-export list */
+ struct list_head rq_exp_list;
+ /** server-side hp handlers */
+ struct ptlrpc_hpreq_ops *rq_ops;
/** initial thread servicing this request */
- struct ptlrpc_thread *rq_svc_thread;
+ struct ptlrpc_thread *rq_svc_thread;
/** history sequence # */
- __u64 rq_history_seq;
+ __u64 rq_history_seq;
/** \addtogroup nrs
* @{
*/
* there.
* Also see \a rq_replay comment above.
*/
- cfs_list_t rq_replay_list;
+ struct list_head rq_replay_list;
- /**
- * security and encryption data
- * @{ */
- struct ptlrpc_cli_ctx *rq_cli_ctx; /**< client's half ctx */
- struct ptlrpc_svc_ctx *rq_svc_ctx; /**< server's half ctx */
- cfs_list_t rq_ctx_chain; /**< link to waited ctx */
+ /**
+ * security and encryption data
+ * @{ */
+ struct ptlrpc_cli_ctx *rq_cli_ctx; /**< client's half ctx */
+ struct ptlrpc_svc_ctx *rq_svc_ctx; /**< server's half ctx */
+ struct list_head rq_ctx_chain; /**< link to waited ctx */
- struct sptlrpc_flavor rq_flvr; /**< for client & server */
- enum lustre_sec_part rq_sp_from;
+ struct sptlrpc_flavor rq_flvr; /**< for client & server */
+ enum lustre_sec_part rq_sp_from;
/* client/server security flags */
unsigned int
/** Per-request waitq introduced by bug 21938 for recovery waiting */
wait_queue_head_t rq_set_waitq;
/** Link item for request set lists */
- cfs_list_t rq_set_chain;
+ struct list_head rq_set_chain;
/** Link back to the request set */
struct ptlrpc_request_set *rq_set;
/** Async completion handler, called when reply is received */
* Structure that defines a single page of a bulk transfer
*/
struct ptlrpc_bulk_page {
- /** Linkage to list of pages in a bulk */
- cfs_list_t bp_link;
- /**
- * Number of bytes in a page to transfer starting from \a bp_pageoffset
- */
- int bp_buflen;
- /** offset within a page */
- int bp_pageoffset;
- /** The page itself */
- struct page *bp_page;
+ /** Linkage to list of pages in a bulk */
+ struct list_head bp_link;
+ /**
+ * Number of bytes in a page to transfer starting from \a bp_pageoffset
+ */
+ int bp_buflen;
+ /** offset within a page */
+ int bp_pageoffset;
+ /** The page itself */
+ struct page *bp_page;
};
#define BULK_GET_SOURCE 0
/**
* List of active threads in svc->srv_threads
*/
- cfs_list_t t_link;
+ struct list_head t_link;
/**
* thread-private data (preallocated memory)
*/
* More than one request can fit into the buffer.
*/
struct ptlrpc_request_buffer_desc {
- /** Link item for rqbds on a service */
- cfs_list_t rqbd_list;
- /** History of requests for this buffer */
- cfs_list_t rqbd_reqs;
- /** Back pointer to service for which this buffer is registered */
- struct ptlrpc_service_part *rqbd_svcpt;
- /** LNet descriptor */
- lnet_handle_md_t rqbd_md_h;
- int rqbd_refcount;
- /** The buffer itself */
- char *rqbd_buffer;
- struct ptlrpc_cb_id rqbd_cbid;
- /**
- * This "embedded" request structure is only used for the
- * last request to fit into the buffer
- */
- struct ptlrpc_request rqbd_req;
+ /** Link item for rqbds on a service */
+ struct list_head rqbd_list;
+ /** History of requests for this buffer */
+ struct list_head rqbd_reqs;
+ /** Back pointer to service for which this buffer is registered */
+ struct ptlrpc_service_part *rqbd_svcpt;
+ /** LNet descriptor */
+ lnet_handle_md_t rqbd_md_h;
+ int rqbd_refcount;
+ /** The buffer itself */
+ char *rqbd_buffer;
+ struct ptlrpc_cb_id rqbd_cbid;
+ /**
+ * This "embedded" request structure is only used for the
+ * last request to fit into the buffer
+ */
+ struct ptlrpc_request rqbd_req;
};
typedef int (*svc_handler_t)(struct ptlrpc_request *req);
struct ptlrpc_service {
/** serialize /proc operations */
spinlock_t srv_lock;
- /** most often accessed fields */
- /** chain thru all services */
- cfs_list_t srv_list;
+ /** most often accessed fields */
+ /** chain thru all services */
+ struct list_head srv_list;
/** service operations table */
struct ptlrpc_service_ops srv_ops;
/** only statically allocated strings here; we don't clean them */
/** only statically allocated strings here; we don't clean them */
char *srv_thread_name;
/** service thread list */
- cfs_list_t srv_threads;
+ struct list_head srv_threads;
/** threads # should be created for each partition on initializing */
int srv_nthrs_cpt_init;
/** limit of threads number for each partition */
/** # running threads */
int scp_nthrs_running;
/** service threads list */
- cfs_list_t scp_threads;
+ struct list_head scp_threads;
/**
* serialize the following fields, used for protecting
/** # incoming reqs */
int scp_nreqs_incoming;
/** request buffers to be reposted */
- cfs_list_t scp_rqbd_idle;
+ struct list_head scp_rqbd_idle;
/** req buffers receiving */
- cfs_list_t scp_rqbd_posted;
+ struct list_head scp_rqbd_posted;
/** incoming reqs */
- cfs_list_t scp_req_incoming;
+ struct list_head scp_req_incoming;
/** timeout before re-posting reqs, in tick */
cfs_duration_t scp_rqbd_timeout;
/**
wait_queue_head_t scp_waitq;
/** request history */
- cfs_list_t scp_hist_reqs;
+ struct list_head scp_hist_reqs;
/** request buffer history */
- cfs_list_t scp_hist_rqbds;
+ struct list_head scp_hist_rqbds;
/** # request buffers in history */
int scp_hist_nrqbds;
/** sequence number for request */
*/
spinlock_t scp_rep_lock __cfs_cacheline_aligned;
/** all the active replies */
- cfs_list_t scp_rep_active;
+ struct list_head scp_rep_active;
#ifndef __KERNEL__
/** replies waiting for service */
- cfs_list_t scp_rep_queue;
+ struct list_head scp_rep_queue;
#endif
/** List of free reply_states */
- cfs_list_t scp_rep_idle;
+ struct list_head scp_rep_idle;
/** waitq to run, when adding stuff to srv_free_rs_list */
wait_queue_head_t scp_rep_waitq;
/** # 'difficult' replies */
int ptlrpc_pinger_add_import(struct obd_import *imp);
int ptlrpc_pinger_del_import(struct obd_import *imp);
int ptlrpc_add_timeout_client(int time, enum timeout_event event,
- timeout_cb_t cb, void *data,
- cfs_list_t *obd_list);
-int ptlrpc_del_timeout_client(cfs_list_t *obd_list,
+ timeout_cb_t cb, void *data,
+ struct list_head *obd_list);
+int ptlrpc_del_timeout_client(struct list_head *obd_list,
enum timeout_event event);
struct ptlrpc_request * ptlrpc_prep_ping(struct obd_import *imp);
int ptlrpc_obd_ping(struct obd_device *obd);
/* attached client members of this nodemap */
struct list_head nm_exports;
/* access by nodemap name */
- cfs_hlist_node_t nm_hash;
+ struct hlist_node nm_hash;
};
void nodemap_activate(const bool value);
struct nrs_tbf_jobid {
char *tj_id;
- cfs_list_t tj_linkage;
+ struct list_head tj_linkage;
};
struct nrs_tbf_client {
/** Resource object for policy instance. */
struct ptlrpc_nrs_resource tc_res;
/** Node in the hash table. */
- cfs_hlist_node_t tc_hnode;
+ struct hlist_node tc_hnode;
/** NID of the client. */
lnet_nid_t tc_nid;
/** Jobid of the client. */
/** Reference number of the client. */
atomic_t tc_ref;
/** Likage to rule. */
- cfs_list_t tc_linkage;
+ struct list_head tc_linkage;
/** Pointer to rule. */
struct nrs_tbf_rule *tc_rule;
/** Generation of the rule matched. */
/** Time check-point. */
__u64 tc_check_time;
/** List of queued requests. */
- cfs_list_t tc_list;
+ struct list_head tc_list;
/** Node in binary heap. */
cfs_binheap_node_t tc_node;
/** Whether the client is in heap. */
* Linkage into LRU list. Protected bucket lock of
* nrs_tbf_head::th_cli_hash.
*/
- cfs_list_t tc_lru;
+ struct list_head tc_lru;
};
#define MAX_TBF_NAME (16)
/** Head belongs to. */
struct nrs_tbf_head *tr_head;
/** Likage to head. */
- cfs_list_t tr_linkage;
+ struct list_head tr_linkage;
/** Nid list of the rule. */
- cfs_list_t tr_nids;
+ struct list_head tr_nids;
/** Nid list string of the rule.*/
char *tr_nids_str;
/** Jobid list of the rule. */
- cfs_list_t tr_jobids;
+ struct list_head tr_jobids;
/** Jobid list string of the rule.*/
char *tr_jobids_str;
/** RPC/s limit. */
/** Token bucket depth. */
__u64 tr_depth;
/** List of client. */
- cfs_list_t tr_cli_list;
+ struct list_head tr_cli_list;
/** Flags of the rule. */
__u32 tr_flags;
/** Usage Reference count taken on the rule. */
* LRU list, updated on each access to client. Protected by
* bucket lock of nrs_tbf_head::th_cli_hash.
*/
- cfs_list_t ntb_lru;
+ struct list_head ntb_lru;
};
/**
/**
* List of rules.
*/
- cfs_list_t th_list;
+ struct list_head th_list;
/**
* Lock to protect the list of rules.
*/
enum nrs_tbf_cmd_type tc_cmd;
char *tc_name;
__u64 tc_rpc_rate;
- cfs_list_t tc_nids;
+ struct list_head tc_nids;
char *tc_nids_str;
- cfs_list_t tc_jobids;
+ struct list_head tc_jobids;
char *tc_jobids_str;
__u32 tc_valid_types;
__u32 tc_rule_flags;
/**
* Linkage to queue.
*/
- cfs_list_t tr_list;
+ struct list_head tr_list;
/**
* Sequence of the request.
*/
PTLRPC_CTX_ERROR)
struct ptlrpc_cli_ctx {
- cfs_hlist_node_t cc_cache; /* linked into ctx cache */
- atomic_t cc_refcount;
+ struct hlist_node cc_cache; /* linked into ctx cache */
+ atomic_t cc_refcount;
struct ptlrpc_sec *cc_sec;
struct ptlrpc_ctx_ops *cc_ops;
- cfs_time_t cc_expire; /* in seconds */
- unsigned int cc_early_expire:1;
- unsigned long cc_flags;
- struct vfs_cred cc_vcred;
+ cfs_time_t cc_expire; /* in seconds */
+ unsigned int cc_early_expire:1;
+ unsigned long cc_flags;
+ struct vfs_cred cc_vcred;
spinlock_t cc_lock;
- cfs_list_t cc_req_list; /* waiting reqs linked here */
- cfs_list_t cc_gc_chain; /* linked to gc chain */
+ struct list_head cc_req_list; /* waiting reqs linked here */
+ struct list_head cc_gc_chain; /* linked to gc chain */
};
/**
struct obd_import *ps_import;
spinlock_t ps_lock;
- /*
- * garbage collection
- */
- cfs_list_t ps_gc_list;
- cfs_time_t ps_gc_interval; /* in seconds */
- cfs_time_t ps_gc_next; /* in seconds */
+ /*
+ * garbage collection
+ */
+ struct list_head ps_gc_list;
+ cfs_time_t ps_gc_interval; /* in seconds */
+ cfs_time_t ps_gc_next; /* in seconds */
};
static inline int sec_is_reverse(struct ptlrpc_sec *sec)
__u32 llod_oid;
int llod_is_index;
const struct dt_index_features *llod_feat;
- cfs_list_t llod_linkage;
+ struct list_head llod_linkage;
};
int lustre_buf2som(void *buf, int rc, struct md_som_data *msd);
};
struct obd_type {
- struct list_head typ_chain;
- struct obd_ops *typ_dt_ops;
- struct md_ops *typ_md_ops;
- struct proc_dir_entry *typ_procroot;
- struct proc_dir_entry *typ_procsym;
- __u32 typ_sym_filter;
- char *typ_name;
- int typ_refcnt;
- struct lu_device_type *typ_lu;
- spinlock_t obd_type_lock;
+ struct list_head typ_chain;
+ struct obd_ops *typ_dt_ops;
+ struct md_ops *typ_md_ops;
+ struct proc_dir_entry *typ_procroot;
+ struct proc_dir_entry *typ_procsym;
+ __u32 typ_sym_filter;
+ char *typ_name;
+ int typ_refcnt;
+ struct lu_device_type *typ_lu;
+ spinlock_t obd_type_lock;
};
struct brw_page {
};
struct timeout_item {
- enum timeout_event ti_event;
- cfs_time_t ti_timeout;
- timeout_cb_t ti_cb;
- void *ti_cb_data;
- cfs_list_t ti_obd_list;
- cfs_list_t ti_chain;
+ enum timeout_event ti_event;
+ cfs_time_t ti_timeout;
+ timeout_cb_t ti_cb;
+ void *ti_cb_data;
+ struct list_head ti_obd_list;
+ struct list_head ti_chain;
};
#define OBD_MAX_RIF_DEFAULT 8
struct mdc_rpc_lock;
struct obd_import;
struct client_obd {
- struct rw_semaphore cl_sem;
+ struct rw_semaphore cl_sem;
struct obd_uuid cl_target_uuid;
struct obd_import *cl_import; /* ptlrpc connection state */
int cl_conn_count;
* be used to add a page into cache. As a solution, we reserve maximum
* grant before trying to dirty a page and unreserve the rest.
* See osc_{reserve|unreserve}_grant for details. */
- long cl_reserved_grant;
- cfs_list_t cl_cache_waiters; /* waiting for cache/grant */
- cfs_time_t cl_next_shrink_grant; /* jiffies */
- cfs_list_t cl_grant_shrink_list; /* Timeout event list */
- int cl_grant_shrink_interval; /* seconds */
+ long cl_reserved_grant;
+ struct list_head cl_cache_waiters; /* waiting for cache/grant */
+ cfs_time_t cl_next_shrink_grant; /* jiffies */
+ struct list_head cl_grant_shrink_list; /* Timeout event list */
+ int cl_grant_shrink_interval; /* seconds */
/* A chunk is an optimal size used by osc_extent to determine
* the extent size. A chunk is max(PAGE_CACHE_SIZE, OST block size) */
- int cl_chunkbits;
- int cl_extent_tax; /* extent overhead, by bytes */
+ int cl_chunkbits;
+ int cl_extent_tax; /* extent overhead, by bytes */
/* keep track of objects that have lois that contain pages which
* have been queued for async brw. this lock also protects the
* NB by Jinshan: though field names are still _loi_, but actually
* osc_object{}s are in the list.
*/
- client_obd_lock_t cl_loi_list_lock;
- cfs_list_t cl_loi_ready_list;
- cfs_list_t cl_loi_hp_ready_list;
- cfs_list_t cl_loi_write_list;
- cfs_list_t cl_loi_read_list;
- int cl_r_in_flight;
- int cl_w_in_flight;
+ client_obd_lock_t cl_loi_list_lock;
+ struct list_head cl_loi_ready_list;
+ struct list_head cl_loi_hp_ready_list;
+ struct list_head cl_loi_write_list;
+ struct list_head cl_loi_read_list;
+ int cl_r_in_flight;
+ int cl_w_in_flight;
/* just a sum of the loi/lop pending numbers to be exported by /proc */
- atomic_t cl_pending_w_pages;
- atomic_t cl_pending_r_pages;
- __u32 cl_max_pages_per_rpc;
- int cl_max_rpcs_in_flight;
- struct obd_histogram cl_read_rpc_hist;
- struct obd_histogram cl_write_rpc_hist;
- struct obd_histogram cl_read_page_hist;
- struct obd_histogram cl_write_page_hist;
- struct obd_histogram cl_read_offset_hist;
- struct obd_histogram cl_write_offset_hist;
+ atomic_t cl_pending_w_pages;
+ atomic_t cl_pending_r_pages;
+ __u32 cl_max_pages_per_rpc;
+ int cl_max_rpcs_in_flight;
+ struct obd_histogram cl_read_rpc_hist;
+ struct obd_histogram cl_write_rpc_hist;
+ struct obd_histogram cl_read_page_hist;
+ struct obd_histogram cl_write_page_hist;
+ struct obd_histogram cl_read_offset_hist;
+ struct obd_histogram cl_write_offset_hist;
/* lru for osc caching pages */
struct cl_client_cache *cl_cache;
- cfs_list_t cl_lru_osc; /* member of cl_cache->ccc_lru */
+ struct list_head cl_lru_osc; /* member of cl_cache->ccc_lru */
atomic_t *cl_lru_left;
atomic_t cl_lru_busy;
atomic_t cl_lru_shrinkers;
atomic_t cl_lru_in_list;
- cfs_list_t cl_lru_list; /* lru page list */
+ struct list_head cl_lru_list; /* lru page list */
client_obd_lock_t cl_lru_list_lock; /* page list protector */
atomic_t cl_unstable_count;
/* number of in flight destroy rpcs is limited to max_rpcs_in_flight */
- atomic_t cl_destroy_in_flight;
- wait_queue_head_t cl_destroy_waitq;
+ atomic_t cl_destroy_in_flight;
+ wait_queue_head_t cl_destroy_waitq;
struct mdc_rpc_lock *cl_rpc_lock;
struct mdc_rpc_lock *cl_close_lock;
/* mgc datastruct */
- struct mutex cl_mgc_mutex;
+ struct mutex cl_mgc_mutex;
struct local_oid_storage *cl_mgc_los;
- struct dt_object *cl_mgc_configs_dir;
- atomic_t cl_mgc_refcount;
- struct obd_export *cl_mgc_mgsexp;
+ struct dt_object *cl_mgc_configs_dir;
+ atomic_t cl_mgc_refcount;
+ struct obd_export *cl_mgc_mgsexp;
/* checksumming for data sent over the network */
unsigned int cl_checksum:1; /* 0 = disabled, 1 = enabled */
};
struct echo_client_obd {
- struct obd_export *ec_exp; /* the local connection to osc/lov */
+ struct obd_export *ec_exp; /* the local connection to osc/lov */
spinlock_t ec_lock;
- cfs_list_t ec_objects;
- cfs_list_t ec_locks;
- int ec_nstripes;
- __u64 ec_unique;
+ struct list_head ec_objects;
+ struct list_head ec_locks;
+ int ec_nstripes;
+ __u64 ec_unique;
};
/* Generic subset of OSTs */
#define OBD_STATFS_CACHE_SECONDS 1
struct lov_tgt_desc {
- cfs_list_t ltd_kill;
+ struct list_head ltd_kill;
struct obd_uuid ltd_uuid;
struct obd_device *ltd_obd;
struct obd_export *ltd_exp;
cfs_hash_t *obd_nid_hash;
/* nid stats body */
cfs_hash_t *obd_nid_stats_hash;
- cfs_list_t obd_nid_stats;
- atomic_t obd_refcount;
- cfs_list_t obd_exports;
- cfs_list_t obd_unlinked_exports;
- cfs_list_t obd_delayed_exports;
+ struct list_head obd_nid_stats;
+ atomic_t obd_refcount;
+ struct list_head obd_exports;
+ struct list_head obd_unlinked_exports;
+ struct list_head obd_delayed_exports;
struct list_head obd_lwp_list;
int obd_num_exports;
spinlock_t obd_nid_lock;
struct obd_notify_upcall obd_upcall;
struct obd_export *obd_self_export;
struct obd_export *obd_lwp_export;
- /* list of exports in LRU order, for ping evictor, with obd_dev_lock */
- cfs_list_t obd_exports_timed;
- time_t obd_eviction_timer; /* for ping evictor */
+ /* list of exports in LRU order, for ping evictor, with obd_dev_lock */
+ struct list_head obd_exports_timed;
+ time_t obd_eviction_timer; /* for ping evictor */
int obd_max_recoverable_clients;
atomic_t obd_connected_clients;
int obd_stale_clients;
/* this lock protects all recovery list_heads, timer and
* obd_next_recovery_transno value */
- spinlock_t obd_recovery_task_lock;
- __u64 obd_next_recovery_transno;
- int obd_replayed_requests;
- int obd_requests_queued_for_recovery;
- wait_queue_head_t obd_next_transno_waitq;
+ spinlock_t obd_recovery_task_lock;
+ __u64 obd_next_recovery_transno;
+ int obd_replayed_requests;
+ int obd_requests_queued_for_recovery;
+ wait_queue_head_t obd_next_transno_waitq;
/* protected by obd_recovery_task_lock */
- struct timer_list obd_recovery_timer;
- time_t obd_recovery_start; /* seconds */
- time_t obd_recovery_end; /* seconds, for lprocfs_status */
- int obd_recovery_time_hard;
- int obd_recovery_timeout;
- int obd_recovery_ir_factor;
+ struct timer_list obd_recovery_timer;
+ /* seconds */
+ time_t obd_recovery_start;
+ /* seconds, for lprocfs_status */
+ time_t obd_recovery_end;
+ int obd_recovery_time_hard;
+ int obd_recovery_timeout;
+ int obd_recovery_ir_factor;
/* new recovery stuff from CMD2 */
- struct target_recovery_data obd_recovery_data;
- int obd_replayed_locks;
- atomic_t obd_req_replay_clients;
- atomic_t obd_lock_replay_clients;
+ struct target_recovery_data obd_recovery_data;
+ int obd_replayed_locks;
+ atomic_t obd_req_replay_clients;
+ atomic_t obd_lock_replay_clients;
/* all lists are protected by obd_recovery_task_lock */
- cfs_list_t obd_req_replay_queue;
- cfs_list_t obd_lock_replay_queue;
- cfs_list_t obd_final_req_queue;
+ struct list_head obd_req_replay_queue;
+ struct list_head obd_lock_replay_queue;
+ struct list_head obd_final_req_queue;
union {
#ifdef HAVE_SERVER_SUPPORT
struct lprocfs_seq_vars *obd_vars;
atomic_t obd_evict_inprogress;
wait_queue_head_t obd_evict_inprogress_waitq;
- cfs_list_t obd_evict_list; /* protected with pet_lock */
+ struct list_head obd_evict_list; /* protected with pet_lock */
/**
* Ldlm pool part. Save last calculated SLV and Limit.
*/
rwlock_t obd_pool_lock;
- int obd_pool_limit;
- __u64 obd_pool_slv;
+ int obd_pool_limit;
+ __u64 obd_pool_slv;
/**
* A list of outstanding class_incref()'s against this obd. For
/* OBD Device Declarations */
extern struct obd_device *obd_devs[MAX_OBD_DEVICES];
+extern struct list_head obd_types;
+extern spinlock_t obd_types_lock;
extern rwlock_t obd_dev_lock;
/* OBD Operations Declarations */
struct config_llog_data {
struct ldlm_res_id cld_resid;
struct config_llog_instance cld_cfg;
- cfs_list_t cld_list_chain;
+ struct list_head cld_list_chain;
atomic_t cld_refcount;
struct config_llog_data *cld_sptlrpc;/* depended sptlrpc log */
struct config_llog_data *cld_params; /* common parameters log */
};
struct lustre_profile {
- cfs_list_t lp_list;
- char *lp_profile;
- char *lp_dt;
- char *lp_md;
+ struct list_head lp_list;
+ char *lp_profile;
+ char *lp_dt;
+ char *lp_md;
};
struct lustre_profile *class_get_profile(const char * prof);
struct obd_export **lri_exp;
register_lwp_cb lri_cb_func;
void *lri_cb_data;
- cfs_list_t lri_list;
+ struct list_head lri_list;
char lri_name[MTI_NAME_MAXLEN];
};
struct filter_obd {
/* NB this field MUST be first */
- struct obd_device_target fo_obt;
+ struct obd_device_target fo_obt;
/* capability related */
- unsigned int fo_fl_oss_capa;
- cfs_list_t fo_capa_keys;
- cfs_hlist_head_t *fo_capa_hash;
+ unsigned int fo_fl_oss_capa;
+ struct list_head fo_capa_keys;
+ struct hlist_head *fo_capa_hash;
};
struct echo_obd {
/* lock for capa hash/capa_list/fo_capa_keys */
DEFINE_SPINLOCK(capa_lock);
-cfs_list_t capa_list[CAPA_SITE_MAX];
+struct list_head capa_list[CAPA_SITE_MAX];
static struct capa_hmac_alg capa_hmac_algs[] = {
DEF_CAPA_HMAC_ALG("sha1", SHA1, 20, 20),
EXPORT_SYMBOL(capa_lock);
EXPORT_SYMBOL(capa_count);
-cfs_hlist_head_t *init_capa_hash(void)
+struct hlist_head *init_capa_hash(void)
{
- cfs_hlist_head_t *hash;
+ struct hlist_head *hash;
int nr_hash, i;
OBD_ALLOC(hash, PAGE_CACHE_SIZE);
if (!hash)
return NULL;
- nr_hash = PAGE_CACHE_SIZE / sizeof(cfs_hlist_head_t);
+ nr_hash = PAGE_CACHE_SIZE / sizeof(struct hlist_head);
LASSERT(nr_hash > NR_CAPAHASH);
for (i = 0; i < NR_CAPAHASH; i++)
- CFS_INIT_HLIST_HEAD(hash + i);
+ INIT_HLIST_HEAD(hash + i);
return hash;
}
EXPORT_SYMBOL(init_capa_hash);
static inline void capa_delete(struct obd_capa *ocapa)
{
- LASSERT(capa_on_server(ocapa));
- cfs_hlist_del_init(&ocapa->u.tgt.c_hash);
- cfs_list_del_init(&ocapa->c_list);
- capa_count[ocapa->c_site]--;
- /* release the ref when alloc */
- capa_put(ocapa);
+ LASSERT(capa_on_server(ocapa));
+ hlist_del_init(&ocapa->u.tgt.c_hash);
+ list_del_init(&ocapa->c_list);
+ capa_count[ocapa->c_site]--;
+ /* release the ref when alloc */
+ capa_put(ocapa);
}
-void cleanup_capa_hash(cfs_hlist_head_t *hash)
+void cleanup_capa_hash(struct hlist_head *hash)
{
int i;
- cfs_hlist_node_t *pos, *next;
+ struct hlist_node *pos, *next;
struct obd_capa *oc;
spin_lock(&capa_lock);
}
static struct obd_capa *find_capa(struct lustre_capa *capa,
- cfs_hlist_head_t *head, int alive)
+ struct hlist_head *head, int alive)
{
- cfs_hlist_node_t *pos;
+ struct hlist_node *pos;
struct obd_capa *ocapa;
int len = alive ? offsetof(struct lustre_capa, lc_keyid):sizeof(*capa);
}
#define LRU_CAPA_DELETE_COUNT 12
-static inline void capa_delete_lru(cfs_list_t *head)
+static inline void capa_delete_lru(struct list_head *head)
{
- struct obd_capa *ocapa;
- cfs_list_t *node = head->next;
- int count = 0;
+ struct obd_capa *ocapa;
+ struct list_head *node = head->next;
+ int count = 0;
- /* free LRU_CAPA_DELETE_COUNT unused capa from head */
- while (count++ < LRU_CAPA_DELETE_COUNT) {
- ocapa = cfs_list_entry(node, struct obd_capa, c_list);
- node = node->next;
+ /* free LRU_CAPA_DELETE_COUNT unused capa from head */
+ while (count++ < LRU_CAPA_DELETE_COUNT) {
+ ocapa = list_entry(node, struct obd_capa, c_list);
+ node = node->next;
if (atomic_read(&ocapa->c_refc))
- continue;
+ continue;
- DEBUG_CAPA(D_SEC, &ocapa->c_capa, "free lru");
- capa_delete(ocapa);
- }
+ DEBUG_CAPA(D_SEC, &ocapa->c_capa, "free lru");
+ capa_delete(ocapa);
+ }
}
/* add or update */
-struct obd_capa *capa_add(cfs_hlist_head_t *hash, struct lustre_capa *capa)
+struct obd_capa *capa_add(struct hlist_head *hash, struct lustre_capa *capa)
{
- cfs_hlist_head_t *head = hash + capa_hashfn(&capa->lc_fid);
+ struct hlist_head *head = hash + capa_hashfn(&capa->lc_fid);
struct obd_capa *ocapa, *old = NULL;
- cfs_list_t *list = &capa_list[CAPA_SITE_SERVER];
+ struct list_head *list = &capa_list[CAPA_SITE_SERVER];
ocapa = alloc_capa(CAPA_SITE_SERVER);
if (IS_ERR(ocapa))
if (!old) {
ocapa->c_capa = *capa;
set_capa_expiry(ocapa);
- cfs_hlist_add_head(&ocapa->u.tgt.c_hash, head);
- cfs_list_add_tail(&ocapa->c_list, list);
+ hlist_add_head(&ocapa->u.tgt.c_hash, head);
+ list_add_tail(&ocapa->c_list, list);
capa_get(ocapa);
capa_count[CAPA_SITE_SERVER]++;
if (capa_count[CAPA_SITE_SERVER] > CAPA_HASH_SIZE)
}
EXPORT_SYMBOL(capa_add);
-struct obd_capa *capa_lookup(cfs_hlist_head_t *hash, struct lustre_capa *capa,
+struct obd_capa *capa_lookup(struct hlist_head *hash, struct lustre_capa *capa,
int alive)
{
struct obd_capa *ocapa;
spin_lock(&capa_lock);
ocapa = find_capa(capa, hash + capa_hashfn(&capa->lc_fid), alive);
if (ocapa) {
- cfs_list_move_tail(&ocapa->c_list,
- &capa_list[CAPA_SITE_SERVER]);
+ list_move_tail(&ocapa->c_list, &capa_list[CAPA_SITE_SERVER]);
capa_get(ocapa);
}
spin_unlock(&capa_lock);
*/
#define cl_io_for_each(slice, io) \
- cfs_list_for_each_entry((slice), &io->ci_layers, cis_linkage)
+ list_for_each_entry((slice), &io->ci_layers, cis_linkage)
#define cl_io_for_each_reverse(slice, io) \
- cfs_list_for_each_entry_reverse((slice), &io->ci_layers, cis_linkage)
+ list_for_each_entry_reverse((slice), &io->ci_layers, cis_linkage)
static inline int cl_io_type_is_valid(enum cl_io_type type)
{
LINVRNT(cl_io_invariant(io));
ENTRY;
- while (!cfs_list_empty(&io->ci_layers)) {
+ while (!list_empty(&io->ci_layers)) {
slice = container_of(io->ci_layers.prev, struct cl_io_slice,
cis_linkage);
- cfs_list_del_init(&slice->cis_linkage);
+ list_del_init(&slice->cis_linkage);
if (slice->cis_iop->op[io->ci_type].cio_fini != NULL)
slice->cis_iop->op[io->ci_type].cio_fini(env, slice);
/*
ENTRY;
io->ci_type = iot;
- CFS_INIT_LIST_HEAD(&io->ci_lockset.cls_todo);
- CFS_INIT_LIST_HEAD(&io->ci_lockset.cls_curr);
- CFS_INIT_LIST_HEAD(&io->ci_lockset.cls_done);
- CFS_INIT_LIST_HEAD(&io->ci_layers);
+ INIT_LIST_HEAD(&io->ci_lockset.cls_todo);
+ INIT_LIST_HEAD(&io->ci_lockset.cls_curr);
+ INIT_LIST_HEAD(&io->ci_lockset.cls_done);
+ INIT_LIST_HEAD(&io->ci_layers);
result = 0;
cl_object_for_each(scan, obj) {
done = 1;
prev = NULL;
- cfs_list_for_each_entry_safe(curr, temp,
- &io->ci_lockset.cls_todo,
- cill_linkage) {
- if (prev != NULL) {
- switch (cl_lock_descr_sort(&prev->cill_descr,
- &curr->cill_descr)) {
- case 0:
- /*
- * IMPOSSIBLE: Identical locks are
- * already removed at
- * this point.
- */
- default:
- LBUG();
- case +1:
- cfs_list_move_tail(&curr->cill_linkage,
- &prev->cill_linkage);
- done = 0;
- continue; /* don't change prev: it's
- * still "previous" */
- case -1: /* already in order */
- break;
- }
- }
- prev = curr;
- }
- } while (!done);
- EXIT;
+ list_for_each_entry_safe(curr, temp, &io->ci_lockset.cls_todo,
+ cill_linkage) {
+ if (prev != NULL) {
+ switch (cl_lock_descr_sort(&prev->cill_descr,
+ &curr->cill_descr)) {
+ case 0:
+ /*
+ * IMPOSSIBLE: Identical locks are
+ * already removed at
+ * this point.
+ */
+ default:
+ LBUG();
+ case +1:
+ list_move_tail(&curr->cill_linkage,
+ &prev->cill_linkage);
+ done = 0;
+ continue; /* don't change prev: it's
+ * still "previous" */
+ case -1: /* already in order */
+ break;
+ }
+ }
+ prev = curr;
+ }
+ } while (!done);
+ EXIT;
}
/**
* \retval +ve there is a matching lock in the \a queue
* \retval 0 there are no matching locks in the \a queue
*/
-int cl_queue_match(const cfs_list_t *queue,
+int cl_queue_match(const struct list_head *queue,
const struct cl_lock_descr *need)
{
- struct cl_io_lock_link *scan;
+ struct cl_io_lock_link *scan;
+ ENTRY;
- ENTRY;
- cfs_list_for_each_entry(scan, queue, cill_linkage) {
- if (cl_lock_descr_match(&scan->cill_descr, need))
- RETURN(+1);
- }
- RETURN(0);
+ list_for_each_entry(scan, queue, cill_linkage) {
+ if (cl_lock_descr_match(&scan->cill_descr, need))
+ RETURN(+1);
+ }
+ RETURN(0);
}
EXPORT_SYMBOL(cl_queue_match);
-static int cl_queue_merge(const cfs_list_t *queue,
+static int cl_queue_merge(const struct list_head *queue,
const struct cl_lock_descr *need)
{
- struct cl_io_lock_link *scan;
-
- ENTRY;
- cfs_list_for_each_entry(scan, queue, cill_linkage) {
- if (cl_lock_descr_cmp(&scan->cill_descr, need))
- continue;
- cl_lock_descr_merge(&scan->cill_descr, need);
- CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n",
- scan->cill_descr.cld_mode, scan->cill_descr.cld_start,
- scan->cill_descr.cld_end);
- RETURN(+1);
- }
- RETURN(0);
+ struct cl_io_lock_link *scan;
+ ENTRY;
+ list_for_each_entry(scan, queue, cill_linkage) {
+ if (cl_lock_descr_cmp(&scan->cill_descr, need))
+ continue;
+ cl_lock_descr_merge(&scan->cill_descr, need);
+ CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n",
+ scan->cill_descr.cld_mode, scan->cill_descr.cld_start,
+ scan->cill_descr.cld_end);
+ RETURN(+1);
+ }
+ RETURN(0);
}
static int cl_lockset_match(const struct cl_lockset *set,
if (!IS_ERR(lock)) {
link->cill_lock = lock;
- cfs_list_move(&link->cill_linkage, &set->cls_curr);
+ list_move(&link->cill_linkage, &set->cls_curr);
if (!(link->cill_descr.cld_enq_flags & CEF_ASYNC)) {
result = cl_wait(env, lock);
if (result == 0)
- cfs_list_move(&link->cill_linkage,
- &set->cls_done);
+ list_move(&link->cill_linkage, &set->cls_done);
} else
result = 0;
} else
struct cl_lock *lock = link->cill_lock;
ENTRY;
- cfs_list_del_init(&link->cill_linkage);
+ list_del_init(&link->cill_linkage);
if (lock != NULL) {
cl_lock_release(env, lock, "io", io);
link->cill_lock = NULL;
static int cl_lockset_lock(const struct lu_env *env, struct cl_io *io,
struct cl_lockset *set)
{
- struct cl_io_lock_link *link;
- struct cl_io_lock_link *temp;
- struct cl_lock *lock;
- int result;
+ struct cl_io_lock_link *link;
+ struct cl_io_lock_link *temp;
+ struct cl_lock *lock;
+ int result;
- ENTRY;
- result = 0;
- cfs_list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) {
- if (!cl_lockset_match(set, &link->cill_descr)) {
- /* XXX some locking to guarantee that locks aren't
- * expanded in between. */
- result = cl_lockset_lock_one(env, io, set, link);
- if (result != 0)
- break;
- } else
- cl_lock_link_fini(env, io, link);
- }
- if (result == 0) {
- cfs_list_for_each_entry_safe(link, temp,
- &set->cls_curr, cill_linkage) {
- lock = link->cill_lock;
- result = cl_wait(env, lock);
- if (result == 0)
- cfs_list_move(&link->cill_linkage,
- &set->cls_done);
- else
- break;
- }
- }
- RETURN(result);
+ ENTRY;
+ result = 0;
+ list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) {
+ if (!cl_lockset_match(set, &link->cill_descr)) {
+ /* XXX some locking to guarantee that locks aren't
+ * expanded in between. */
+ result = cl_lockset_lock_one(env, io, set, link);
+ if (result != 0)
+ break;
+ } else
+ cl_lock_link_fini(env, io, link);
+ }
+ if (result == 0) {
+ list_for_each_entry_safe(link, temp, &set->cls_curr,
+ cill_linkage) {
+ lock = link->cill_lock;
+ result = cl_wait(env, lock);
+ if (result == 0)
+ list_move(&link->cill_linkage, &set->cls_done);
+ else
+ break;
+ }
+ }
+ RETURN(result);
}
/**
ENTRY;
set = &io->ci_lockset;
- cfs_list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage)
- cl_lock_link_fini(env, io, link);
+ list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage)
+ cl_lock_link_fini(env, io, link);
- cfs_list_for_each_entry_safe(link, temp, &set->cls_curr, cill_linkage)
- cl_lock_link_fini(env, io, link);
+ list_for_each_entry_safe(link, temp, &set->cls_curr, cill_linkage)
+ cl_lock_link_fini(env, io, link);
- cfs_list_for_each_entry_safe(link, temp, &set->cls_done, cill_linkage) {
- cl_unuse(env, link->cill_lock);
- cl_lock_link_fini(env, io, link);
- }
- cl_io_for_each_reverse(scan, io) {
- if (scan->cis_iop->op[io->ci_type].cio_unlock != NULL)
- scan->cis_iop->op[io->ci_type].cio_unlock(env, scan);
- }
- io->ci_state = CIS_UNLOCKED;
- LASSERT(!cl_env_info(env)->clt_counters[CNL_TOP].ctc_nr_locks_acquired);
- EXIT;
+ list_for_each_entry_safe(link, temp, &set->cls_done, cill_linkage) {
+ cl_unuse(env, link->cill_lock);
+ cl_lock_link_fini(env, io, link);
+ }
+ cl_io_for_each_reverse(scan, io) {
+ if (scan->cis_iop->op[io->ci_type].cio_unlock != NULL)
+ scan->cis_iop->op[io->ci_type].cio_unlock(env, scan);
+ }
+ io->ci_state = CIS_UNLOCKED;
+ LASSERT(!cl_env_info(env)->clt_counters[CNL_TOP].ctc_nr_locks_acquired);
+ EXIT;
}
EXPORT_SYMBOL(cl_io_unlock);
if (cl_lockset_merge(&io->ci_lockset, &link->cill_descr))
result = +1;
else {
- cfs_list_add(&link->cill_linkage, &io->ci_lockset.cls_todo);
+ list_add(&link->cill_linkage, &io->ci_lockset.cls_todo);
result = 0;
}
RETURN(result);
/*
* If ->cio_submit() failed, no pages were sent.
*/
- LASSERT(ergo(result != 0, cfs_list_empty(&queue->c2_qout.pl_pages)));
+ LASSERT(ergo(result != 0, list_empty(&queue->c2_qout.pl_pages)));
RETURN(result);
}
EXPORT_SYMBOL(cl_io_submit_rw);
rc = cl_sync_io_wait(env, io, &queue->c2_qout,
anchor, timeout);
} else {
- LASSERT(cfs_list_empty(&queue->c2_qout.pl_pages));
+ LASSERT(list_empty(&queue->c2_qout.pl_pages));
cl_page_list_for_each(pg, &queue->c2_qin)
pg->cp_sync_io = NULL;
}
struct cl_object *obj,
const struct cl_io_operations *ops)
{
- cfs_list_t *linkage = &slice->cis_linkage;
+ struct list_head *linkage = &slice->cis_linkage;
LASSERT((linkage->prev == NULL && linkage->next == NULL) ||
- cfs_list_empty(linkage));
+ list_empty(linkage));
ENTRY;
- cfs_list_add_tail(linkage, &io->ci_layers);
+ list_add_tail(linkage, &io->ci_layers);
slice->cis_io = io;
slice->cis_obj = obj;
slice->cis_iop = ops;
{
ENTRY;
plist->pl_nr = 0;
- CFS_INIT_LIST_HEAD(&plist->pl_pages);
+ INIT_LIST_HEAD(&plist->pl_pages);
plist->pl_owner = current;
EXIT;
}
LASSERT(page->cp_owner != NULL);
LINVRNT(plist->pl_owner == current);
- LASSERT(cfs_list_empty(&page->cp_batch));
- cfs_list_add_tail(&page->cp_batch, &plist->pl_pages);
+ LASSERT(list_empty(&page->cp_batch));
+ list_add_tail(&page->cp_batch, &plist->pl_pages);
++plist->pl_nr;
lu_ref_add_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist);
cl_page_get(page);
LINVRNT(plist->pl_owner == current);
ENTRY;
- cfs_list_del_init(&page->cp_batch);
+ list_del_init(&page->cp_batch);
--plist->pl_nr;
lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist);
cl_page_put(env, page);
LINVRNT(src->pl_owner == current);
ENTRY;
- cfs_list_move_tail(&page->cp_batch, &dst->pl_pages);
+ list_move_tail(&page->cp_batch, &dst->pl_pages);
--src->pl_nr;
++dst->pl_nr;
lu_ref_set_at(&page->cp_reference, &page->cp_queue_ref, "queue",
LINVRNT(src->pl_owner == current);
ENTRY;
- cfs_list_move(&page->cp_batch, &dst->pl_pages);
+ list_move(&page->cp_batch, &dst->pl_pages);
--src->pl_nr;
++dst->pl_nr;
lu_ref_set_at(&page->cp_reference, &page->cp_queue_ref, "queue",
cl_page_list_for_each_safe(page, temp, plist) {
LASSERT(plist->pl_nr > 0);
- cfs_list_del_init(&page->cp_batch);
+ list_del_init(&page->cp_batch);
--plist->pl_nr;
/*
* cl_page_disown0 rather than usual cl_page_disown() is used,
const struct cl_req_operations *ops)
{
ENTRY;
- cfs_list_add_tail(&slice->crs_linkage, &req->crq_layers);
+ list_add_tail(&slice->crs_linkage, &req->crq_layers);
slice->crs_dev = dev;
slice->crs_ops = ops;
slice->crs_req = req;
{
unsigned i;
- LASSERT(cfs_list_empty(&req->crq_pages));
+ LASSERT(list_empty(&req->crq_pages));
LASSERT(req->crq_nrpages == 0);
- LINVRNT(cfs_list_empty(&req->crq_layers));
+ LINVRNT(list_empty(&req->crq_layers));
LINVRNT(equi(req->crq_nrobjs > 0, req->crq_o != NULL));
ENTRY;
ENTRY;
result = 0;
- cfs_list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
+ list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
dev = lu2cl_dev(slice->cpl_obj->co_lu.lo_dev);
if (dev->cd_ops->cdo_req_init != NULL) {
result = dev->cd_ops->cdo_req_init(env,
/*
* for the lack of list_for_each_entry_reverse_safe()...
*/
- while (!cfs_list_empty(&req->crq_layers)) {
- slice = cfs_list_entry(req->crq_layers.prev,
- struct cl_req_slice, crs_linkage);
- cfs_list_del_init(&slice->crs_linkage);
+ while (!list_empty(&req->crq_layers)) {
+ slice = list_entry(req->crq_layers.prev,
+ struct cl_req_slice, crs_linkage);
+ list_del_init(&slice->crs_linkage);
if (slice->crs_ops->cro_completion != NULL)
slice->crs_ops->cro_completion(env, slice, rc);
}
ENTRY;
- LASSERT(cfs_list_empty(&page->cp_flight));
+ LASSERT(list_empty(&page->cp_flight));
LASSERT(page->cp_req == NULL);
CL_PAGE_DEBUG(D_PAGE, env, page, "req %p, %d, %u\n",
req, req->crq_type, req->crq_nrpages);
- cfs_list_add_tail(&page->cp_flight, &req->crq_pages);
+ list_add_tail(&page->cp_flight, &req->crq_pages);
++req->crq_nrpages;
page->cp_req = req;
obj = cl_object_top(page->cp_obj);
ENTRY;
- LASSERT(!cfs_list_empty(&page->cp_flight));
+ LASSERT(!list_empty(&page->cp_flight));
LASSERT(req->crq_nrpages > 0);
- cfs_list_del_init(&page->cp_flight);
+ list_del_init(&page->cp_flight);
--req->crq_nrpages;
page->cp_req = NULL;
EXIT;
LASSERT(req->crq_o[i].ro_obj != NULL);
result = 0;
- cfs_list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
+ list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
if (slice->crs_ops->cro_prep != NULL) {
result = slice->crs_ops->cro_prep(env, slice);
if (result != 0)
struct cl_page *page;
int i;
- LASSERT(!cfs_list_empty(&req->crq_pages));
+ LASSERT(!list_empty(&req->crq_pages));
ENTRY;
/* Take any page to use as a model. */
- page = cfs_list_entry(req->crq_pages.next, struct cl_page, cp_flight);
+ page = list_entry(req->crq_pages.next, struct cl_page, cp_flight);
for (i = 0; i < req->crq_nrobjs; ++i) {
- cfs_list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
+ list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
const struct cl_page_slice *scan;
const struct cl_object *obj;
struct cl_object *obj,
const struct cl_lock_operations *ops)
{
- ENTRY;
- slice->cls_lock = lock;
- cfs_list_add_tail(&slice->cls_linkage, &lock->cll_layers);
- slice->cls_obj = obj;
- slice->cls_ops = ops;
- EXIT;
+ ENTRY;
+ slice->cls_lock = lock;
+ list_add_tail(&slice->cls_linkage, &lock->cll_layers);
+ slice->cls_obj = obj;
+ slice->cls_ops = ops;
+ EXIT;
}
EXPORT_SYMBOL(cl_lock_slice_add);
ENTRY;
cl_lock_trace(D_DLMTRACE, env, "free lock", lock);
- while (!cfs_list_empty(&lock->cll_layers)) {
+ while (!list_empty(&lock->cll_layers)) {
struct cl_lock_slice *slice;
- slice = cfs_list_entry(lock->cll_layers.next,
- struct cl_lock_slice, cls_linkage);
- cfs_list_del_init(lock->cll_layers.next);
+ slice = list_entry(lock->cll_layers.next,
+ struct cl_lock_slice, cls_linkage);
+ list_del_init(lock->cll_layers.next);
slice->cls_ops->clo_fini(env, slice);
}
CS_LOCK_DEC(obj, total);
if (atomic_dec_and_test(&lock->cll_ref)) {
if (lock->cll_state == CLS_FREEING) {
- LASSERT(cfs_list_empty(&lock->cll_linkage));
+ LASSERT(list_empty(&lock->cll_linkage));
cl_lock_free(env, lock);
}
CS_LOCK_DEC(obj, busy);
cl_object_get(obj);
lu_object_ref_add_at(&obj->co_lu, &lock->cll_obj_ref, "cl_lock",
lock);
- CFS_INIT_LIST_HEAD(&lock->cll_layers);
- CFS_INIT_LIST_HEAD(&lock->cll_linkage);
- CFS_INIT_LIST_HEAD(&lock->cll_inclosure);
+ INIT_LIST_HEAD(&lock->cll_layers);
+ INIT_LIST_HEAD(&lock->cll_linkage);
+ INIT_LIST_HEAD(&lock->cll_inclosure);
lu_ref_init(&lock->cll_reference);
lu_ref_init(&lock->cll_holders);
mutex_init(&lock->cll_guard);
CS_LOCK_INC(obj, total);
CS_LOCK_INC(obj, create);
cl_lock_lockdep_init(lock);
- cfs_list_for_each_entry(obj, &head->loh_layers,
- co_lu.lo_linkage) {
- int err;
-
- err = obj->co_ops->coo_lock_init(env, obj, lock, io);
- if (err != 0) {
- cl_lock_finish(env, lock);
- lock = ERR_PTR(err);
- break;
- }
- }
- } else
- lock = ERR_PTR(-ENOMEM);
- RETURN(lock);
+ list_for_each_entry(obj, &head->loh_layers, co_lu.lo_linkage) {
+ int err;
+
+ err = obj->co_ops->coo_lock_init(env, obj, lock, io);
+ if (err != 0) {
+ cl_lock_finish(env, lock);
+ lock = ERR_PTR(err);
+ break;
+ }
+ }
+ } else
+ lock = ERR_PTR(-ENOMEM);
+ RETURN(lock);
}
/**
LINVRNT(cl_lock_invariant_trusted(env, lock));
ENTRY;
- cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
+ list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
if (slice->cls_ops->clo_fits_into != NULL &&
!slice->cls_ops->clo_fits_into(env, slice, need, io))
RETURN(0);
head = cl_object_header(obj);
assert_spin_locked(&head->coh_lock_guard);
CS_LOCK_INC(obj, lookup);
- cfs_list_for_each_entry(lock, &head->coh_locks, cll_linkage) {
+ list_for_each_entry(lock, &head->coh_locks, cll_linkage) {
int matched;
matched = cl_lock_ext_match(&lock->cll_descr, need) &&
LINVRNT(cl_lock_invariant_trusted(NULL, lock));
ENTRY;
- cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
+ list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
if (slice->cls_obj->co_lu.lo_dev->ld_type == dtype)
RETURN(slice);
}
const struct cl_lock_slice *slice;
lock->cll_flags |= CLF_CANCELLED;
- cfs_list_for_each_entry_reverse(slice, &lock->cll_layers,
- cls_linkage) {
+ list_for_each_entry_reverse(slice, &lock->cll_layers,
+ cls_linkage) {
if (slice->cls_ops->clo_cancel != NULL)
slice->cls_ops->clo_cancel(env, slice);
}
if (in_cache) /* coh_locks cache holds a refcount. */
cl_lock_put(env, lock);
- /*
- * From now on, no new references to this lock can be acquired
- * by cl_lock_lookup().
- */
- cfs_list_for_each_entry_reverse(slice, &lock->cll_layers,
- cls_linkage) {
- if (slice->cls_ops->clo_delete != NULL)
- slice->cls_ops->clo_delete(env, slice);
- }
- /*
- * From now on, no new references to this lock can be acquired
- * by layer-specific means (like a pointer from struct
- * ldlm_lock in osc, or a pointer from top-lock to sub-lock in
- * lov).
- *
- * Lock will be finally freed in cl_lock_put() when last of
- * existing references goes away.
- */
- }
- EXIT;
+ /*
+ * From now on, no new references to this lock can be acquired
+ * by cl_lock_lookup().
+ */
+ list_for_each_entry_reverse(slice, &lock->cll_layers,
+ cls_linkage) {
+ if (slice->cls_ops->clo_delete != NULL)
+ slice->cls_ops->clo_delete(env, slice);
+ }
+ /*
+ * From now on, no new references to this lock can be acquired
+ * by layer-specific means (like a pointer from struct
+ * ldlm_lock in osc, or a pointer from top-lock to sub-lock in
+ * lov).
+ *
+ * Lock will be finally freed in cl_lock_put() when last of
+ * existing references goes away.
+ */
+ }
+ EXIT;
}
/**
LINVRNT(cl_lock_is_mutexed(lock));
LINVRNT(cl_lock_invariant(env, lock));
- cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage)
+ list_for_each_entry(slice, &lock->cll_layers, cls_linkage)
if (slice->cls_ops->clo_state != NULL)
slice->cls_ops->clo_state(env, slice, state);
wake_up_all(&lock->cll_wq);
static int cl_unuse_try_internal(const struct lu_env *env, struct cl_lock *lock)
{
- const struct cl_lock_slice *slice;
- int result;
-
- do {
- result = 0;
+ const struct cl_lock_slice *slice;
+ int result;
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
- LASSERT(lock->cll_state == CLS_INTRANSIT);
+ do {
+ result = 0;
- result = -ENOSYS;
- cfs_list_for_each_entry_reverse(slice, &lock->cll_layers,
- cls_linkage) {
- if (slice->cls_ops->clo_unuse != NULL) {
- result = slice->cls_ops->clo_unuse(env, slice);
- if (result != 0)
- break;
- }
- }
- LASSERT(result != -ENOSYS);
- } while (result == CLO_REPEAT);
+ LINVRNT(cl_lock_is_mutexed(lock));
+ LINVRNT(cl_lock_invariant(env, lock));
+ LASSERT(lock->cll_state == CLS_INTRANSIT);
+
+ result = -ENOSYS;
+ list_for_each_entry_reverse(slice, &lock->cll_layers,
+ cls_linkage) {
+ if (slice->cls_ops->clo_unuse != NULL) {
+ result = slice->cls_ops->clo_unuse(env, slice);
+ if (result != 0)
+ break;
+ }
+ }
+ LASSERT(result != -ENOSYS);
+ } while (result == CLO_REPEAT);
- return result;
+ return result;
}
/**
result = -ENOSYS;
state = cl_lock_intransit(env, lock);
- cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
+ list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
if (slice->cls_ops->clo_use != NULL) {
result = slice->cls_ops->clo_use(env, slice);
if (result != 0)
ENTRY;
result = -ENOSYS;
- cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
+ list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
if (slice->cls_ops->clo_enqueue != NULL) {
result = slice->cls_ops->clo_enqueue(env,
slice, io, flags);
break;
result = -ENOSYS;
- cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
+ list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
if (slice->cls_ops->clo_wait != NULL) {
result = slice->cls_ops->clo_wait(env, slice);
if (result != 0)
LINVRNT(cl_lock_invariant(env, lock));
pound = 0;
- cfs_list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
+ list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
if (slice->cls_ops->clo_weigh != NULL) {
ounce = slice->cls_ops->clo_weigh(env, slice);
pound += ounce;
LINVRNT(cl_lock_is_mutexed(lock));
LINVRNT(cl_lock_invariant(env, lock));
- cfs_list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
+ list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
if (slice->cls_ops->clo_modify != NULL) {
result = slice->cls_ops->clo_modify(env, slice, desc);
if (result != 0)
LINVRNT(cl_lock_is_mutexed(origin));
LINVRNT(cl_lock_invariant(env, origin));
- CFS_INIT_LIST_HEAD(&closure->clc_list);
+ INIT_LIST_HEAD(&closure->clc_list);
closure->clc_origin = origin;
closure->clc_wait = wait;
closure->clc_nr = 0;
result = cl_lock_enclosure(env, lock, closure);
if (result == 0) {
- cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
+ list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
if (slice->cls_ops->clo_closure != NULL) {
result = slice->cls_ops->clo_closure(env, slice,
closure);
* If lock->cll_inclosure is not empty, lock is already in
* this closure.
*/
- if (cfs_list_empty(&lock->cll_inclosure)) {
+ if (list_empty(&lock->cll_inclosure)) {
cl_lock_get_trust(lock);
lu_ref_add(&lock->cll_reference, "closure", closure);
- cfs_list_add(&lock->cll_inclosure, &closure->clc_list);
+ list_add(&lock->cll_inclosure, &closure->clc_list);
closure->clc_nr++;
} else
cl_lock_mutex_put(env, lock);
void cl_lock_disclosure(const struct lu_env *env,
struct cl_lock_closure *closure)
{
- struct cl_lock *scan;
- struct cl_lock *temp;
-
- cl_lock_trace(D_DLMTRACE, env, "disclosure lock", closure->clc_origin);
- cfs_list_for_each_entry_safe(scan, temp, &closure->clc_list,
- cll_inclosure){
- cfs_list_del_init(&scan->cll_inclosure);
- cl_lock_mutex_put(env, scan);
- lu_ref_del(&scan->cll_reference, "closure", closure);
- cl_lock_put(env, scan);
- closure->clc_nr--;
- }
- LASSERT(closure->clc_nr == 0);
+ struct cl_lock *scan;
+ struct cl_lock *temp;
+
+ cl_lock_trace(D_DLMTRACE, env, "disclosure lock", closure->clc_origin);
+ list_for_each_entry_safe(scan, temp, &closure->clc_list,
+ cll_inclosure){
+ list_del_init(&scan->cll_inclosure);
+ cl_lock_mutex_put(env, scan);
+ lu_ref_del(&scan->cll_reference, "closure", closure);
+ cl_lock_put(env, scan);
+ closure->clc_nr--;
+ }
+ LASSERT(closure->clc_nr == 0);
}
EXPORT_SYMBOL(cl_lock_disclosure);
void cl_lock_closure_fini(struct cl_lock_closure *closure)
{
LASSERT(closure->clc_nr == 0);
- LASSERT(cfs_list_empty(&closure->clc_list));
+ LASSERT(list_empty(&closure->clc_list));
}
EXPORT_SYMBOL(cl_lock_closure_fini);
spin_lock(&head->coh_lock_guard);
/* It is fine to match any group lock since there could be only one
* with a uniq gid and it conflicts with all other lock modes too */
- cfs_list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
+ list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
if (scan != except &&
(scan->cll_descr.cld_mode == CLM_GROUP ||
cl_lock_ext_match(&scan->cll_descr, need)) &&
head = cl_object_header(obj);
spin_lock(&head->coh_lock_guard);
- while (!cfs_list_empty(&head->coh_locks)) {
+ while (!list_empty(&head->coh_locks)) {
lock = container_of(head->coh_locks.next,
struct cl_lock, cll_linkage);
cl_lock_get_trust(lock);
cl_lock_descr_print(env, cookie, printer, &lock->cll_descr);
(*printer)(env, cookie, " {\n");
- cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
+ list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
(*printer)(env, cookie, " %s@%p: ",
slice->cls_obj->co_lu.lo_dev->ld_type->ldt_name,
slice);
spin_lock_init(&h->coh_attr_guard);
lockdep_set_class(&h->coh_lock_guard, &cl_lock_guard_class);
lockdep_set_class(&h->coh_attr_guard, &cl_attr_guard_class);
- CFS_INIT_LIST_HEAD(&h->coh_locks);
+ INIT_LIST_HEAD(&h->coh_locks);
h->coh_page_bufsize = 0;
}
RETURN(result);
*/
void cl_object_header_fini(struct cl_object_header *h)
{
- LASSERT(cfs_list_empty(&h->coh_locks));
+ LASSERT(list_empty(&h->coh_locks));
lu_object_header_fini(&h->coh_lu);
}
EXPORT_SYMBOL(cl_object_header_fini);
top = obj->co_lu.lo_header;
result = 0;
- cfs_list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
+ list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
if (obj->co_ops->coo_attr_get != NULL) {
result = obj->co_ops->coo_attr_get(env, obj, attr);
if (result != 0) {
assert_spin_locked(cl_object_attr_guard(obj));
ENTRY;
- top = obj->co_lu.lo_header;
- result = 0;
- cfs_list_for_each_entry_reverse(obj, &top->loh_layers,
- co_lu.lo_linkage) {
- if (obj->co_ops->coo_attr_set != NULL) {
- result = obj->co_ops->coo_attr_set(env, obj, attr, v);
- if (result != 0) {
- if (result > 0)
- result = 0;
- break;
- }
- }
- }
- RETURN(result);
+ top = obj->co_lu.lo_header;
+ result = 0;
+ list_for_each_entry_reverse(obj, &top->loh_layers, co_lu.lo_linkage) {
+ if (obj->co_ops->coo_attr_set != NULL) {
+ result = obj->co_ops->coo_attr_set(env, obj, attr, v);
+ if (result != 0) {
+ if (result > 0)
+ result = 0;
+ break;
+ }
+ }
+ }
+ RETURN(result);
}
EXPORT_SYMBOL(cl_object_attr_set);
ENTRY;
top = obj->co_lu.lo_header;
result = 0;
- cfs_list_for_each_entry_reverse(obj, &top->loh_layers,
- co_lu.lo_linkage) {
+ list_for_each_entry_reverse(obj, &top->loh_layers, co_lu.lo_linkage) {
if (obj->co_ops->coo_glimpse != NULL) {
result = obj->co_ops->coo_glimpse(env, obj, lvb);
if (result != 0)
ENTRY;
top = obj->co_lu.lo_header;
result = 0;
- cfs_list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
+ list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
if (obj->co_ops->coo_conf_set != NULL) {
result = obj->co_ops->coo_conf_set(env, obj, conf);
if (result != 0)
top = obj->co_lu.lo_header;
result = 0;
- cfs_list_for_each_entry(o, &top->loh_layers, co_lu.lo_linkage) {
+ list_for_each_entry(o, &top->loh_layers, co_lu.lo_linkage) {
if (o->co_ops->coo_prune != NULL) {
result = o->co_ops->coo_prune(env, o);
if (result != 0)
int has;
spin_lock(&head->coh_lock_guard);
- has = cfs_list_empty(&head->coh_locks);
+ has = list_empty(&head->coh_locks);
spin_unlock(&head->coh_lock_guard);
return (has == 0);
* bz20044, bz22683.
*/
-static CFS_LIST_HEAD(cl_envs);
+static struct list_head cl_envs;
static unsigned cl_envs_cached_nr = 0;
static unsigned cl_envs_cached_max = 128; /* XXX: prototype: arbitrary limit
* for now. */
* This allows cl_env to be entered into cl_env_hash which implements
* the current thread -> client environment lookup.
*/
- cfs_hlist_node_t ce_node;
+ struct hlist_node ce_node;
#endif
/**
* Owner for the current cl_env.
* Linkage into global list of all client environments. Used for
* garbage collection.
*/
- cfs_list_t ce_linkage;
+ struct list_head ce_linkage;
/*
*
*/
#endif
}
-static void *cl_env_hops_obj(cfs_hlist_node_t *hn)
+static void *cl_env_hops_obj(struct hlist_node *hn)
{
- struct cl_env *cle = cfs_hlist_entry(hn, struct cl_env, ce_node);
- LASSERT(cle->ce_magic == &cl_env_init0);
- return (void *)cle;
+ struct cl_env *cle = hlist_entry(hn, struct cl_env, ce_node);
+
+ LASSERT(cle->ce_magic == &cl_env_init0);
+ return (void *)cle;
}
-static int cl_env_hops_keycmp(const void *key, cfs_hlist_node_t *hn)
+static int cl_env_hops_keycmp(const void *key, struct hlist_node *hn)
{
struct cl_env *cle = cl_env_hops_obj(hn);
return (key == cle->ce_owner);
}
-static void cl_env_hops_noop(cfs_hash_t *hs, cfs_hlist_node_t *hn)
+static void cl_env_hops_noop(cfs_hash_t *hs, struct hlist_node *hn)
{
- struct cl_env *cle = cfs_hlist_entry(hn, struct cl_env, ce_node);
+ struct cl_env *cle = hlist_entry(hn, struct cl_env, ce_node);
LASSERT(cle->ce_magic == &cl_env_init0);
}
if (cle != NULL) {
int rc;
- CFS_INIT_LIST_HEAD(&cle->ce_linkage);
+ INIT_LIST_HEAD(&cle->ce_linkage);
cle->ce_magic = &cl_env_init0;
env = &cle->ce_lu;
rc = lu_env_init(env, LCT_CL_THREAD|ctx_tags);
ENTRY;
spin_lock(&cl_envs_guard);
- LASSERT(equi(cl_envs_cached_nr == 0, cfs_list_empty(&cl_envs)));
+ LASSERT(equi(cl_envs_cached_nr == 0, list_empty(&cl_envs)));
if (cl_envs_cached_nr > 0) {
int rc;
cle = container_of(cl_envs.next, struct cl_env, ce_linkage);
- cfs_list_del_init(&cle->ce_linkage);
+ list_del_init(&cle->ce_linkage);
cl_envs_cached_nr--;
spin_unlock(&cl_envs_guard);
ENTRY;
spin_lock(&cl_envs_guard);
- for (; !cfs_list_empty(&cl_envs) && nr > 0; --nr) {
+ for (; !list_empty(&cl_envs) && nr > 0; --nr) {
cle = container_of(cl_envs.next, struct cl_env, ce_linkage);
- cfs_list_del_init(&cle->ce_linkage);
+ list_del_init(&cle->ce_linkage);
LASSERT(cl_envs_cached_nr > 0);
cl_envs_cached_nr--;
spin_unlock(&cl_envs_guard);
cl_env_fini(cle);
spin_lock(&cl_envs_guard);
}
- LASSERT(equi(cl_envs_cached_nr == 0, cfs_list_empty(&cl_envs)));
+ LASSERT(equi(cl_envs_cached_nr == 0, list_empty(&cl_envs)));
spin_unlock(&cl_envs_guard);
RETURN(nr);
}
(env->le_ctx.lc_tags & ~LCT_HAS_EXIT) == LCT_CL_THREAD &&
(env->le_ses->lc_tags & ~LCT_HAS_EXIT) == LCT_SESSION) {
spin_lock(&cl_envs_guard);
- cfs_list_add(&cle->ce_linkage, &cl_envs);
+ list_add(&cle->ce_linkage, &cl_envs);
cl_envs_cached_nr++;
spin_unlock(&cl_envs_guard);
} else
cle = &cl_env_percpu[i];
env = &cle->ce_lu;
- CFS_INIT_LIST_HEAD(&cle->ce_linkage);
+ INIT_LIST_HEAD(&cle->ce_linkage);
cle->ce_magic = &cl_env_init0;
rc = lu_env_init(env, LCT_CL_THREAD | tags);
if (rc == 0) {
*/
int cl_global_init(void)
{
- int result;
+ int result;
- result = cl_env_store_init();
- if (result)
- return result;
+ INIT_LIST_HEAD(&cl_envs);
+
+ result = cl_env_store_init();
+ if (result)
+ return result;
result = lu_kmem_init(cl_object_caches);
if (result)
const struct cl_page_slice *slice;
ENTRY;
- cfs_list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
+ list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
if (slice->cpl_obj->co_lu.lo_dev->ld_type == dtype)
RETURN(slice);
}
struct cl_object *obj = page->cp_obj;
int pagesize = cl_object_header(obj)->coh_page_bufsize;
- PASSERT(env, page, cfs_list_empty(&page->cp_batch));
+ PASSERT(env, page, list_empty(&page->cp_batch));
PASSERT(env, page, page->cp_owner == NULL);
PASSERT(env, page, page->cp_req == NULL);
PASSERT(env, page, page->cp_state == CPS_FREEING);
ENTRY;
- while (!cfs_list_empty(&page->cp_layers)) {
+ while (!list_empty(&page->cp_layers)) {
struct cl_page_slice *slice;
- slice = cfs_list_entry(page->cp_layers.next,
- struct cl_page_slice, cpl_linkage);
- cfs_list_del_init(page->cp_layers.next);
+ slice = list_entry(page->cp_layers.next,
+ struct cl_page_slice, cpl_linkage);
+ list_del_init(page->cp_layers.next);
if (unlikely(slice->cpl_ops->cpo_fini != NULL))
slice->cpl_ops->cpo_fini(env, slice);
}
page->cp_vmpage = vmpage;
cl_page_state_set_trust(page, CPS_CACHED);
page->cp_type = type;
- CFS_INIT_LIST_HEAD(&page->cp_layers);
- CFS_INIT_LIST_HEAD(&page->cp_batch);
- CFS_INIT_LIST_HEAD(&page->cp_flight);
+ INIT_LIST_HEAD(&page->cp_layers);
+ INIT_LIST_HEAD(&page->cp_batch);
+ INIT_LIST_HEAD(&page->cp_flight);
lu_ref_init(&page->cp_reference);
head = o->co_lu.lo_header;
- cfs_list_for_each_entry(o, &head->loh_layers,
- co_lu.lo_linkage) {
+ list_for_each_entry(o, &head->loh_layers,
+ co_lu.lo_linkage) {
if (o->co_ops->coo_page_init != NULL) {
result = o->co_ops->coo_page_init(env, o, page,
ind);
LASSERT(atomic_read(&page->cp_ref) == 0);
PASSERT(env, page, page->cp_owner == NULL);
- PASSERT(env, page, cfs_list_empty(&page->cp_batch));
+ PASSERT(env, page, list_empty(&page->cp_batch));
/*
* Page is no longer reachable by other threads. Tear
* it down.
#define CL_PAGE_OP(opname) offsetof(struct cl_page_operations, opname)
-#define CL_PAGE_INVOKE(_env, _page, _op, _proto, ...) \
-({ \
- const struct lu_env *__env = (_env); \
- struct cl_page *__page = (_page); \
- const struct cl_page_slice *__scan; \
- int __result; \
- ptrdiff_t __op = (_op); \
- int (*__method)_proto; \
+#define CL_PAGE_INVOKE(_env, _page, _op, _proto, ...) \
+({ \
+ const struct lu_env *__env = (_env); \
+ struct cl_page *__page = (_page); \
+ const struct cl_page_slice *__scan; \
+ int __result; \
+ ptrdiff_t __op = (_op); \
+ int (*__method)_proto; \
\
- __result = 0; \
- cfs_list_for_each_entry(__scan, &__page->cp_layers, cpl_linkage) { \
- __method = *(void **)((char *)__scan->cpl_ops + __op); \
- if (__method != NULL) { \
+ __result = 0; \
+ list_for_each_entry(__scan, &__page->cp_layers, cpl_linkage) { \
+ __method = *(void **)((char *)__scan->cpl_ops + __op); \
+ if (__method != NULL) { \
__result = (*__method)(__env, __scan, ## __VA_ARGS__); \
- if (__result != 0) \
- break; \
- } \
- } \
- if (__result > 0) \
- __result = 0; \
- __result; \
+ if (__result != 0) \
+ break; \
+ } \
+ } \
+ if (__result > 0) \
+ __result = 0; \
+ __result; \
})
#define CL_PAGE_INVOKE_REVERSE(_env, _page, _op, _proto, ...) \
\
__result = 0; \
list_for_each_entry_reverse(__scan, &__page->cp_layers, \
- cpl_linkage) { \
+ cpl_linkage) { \
__method = *(void **)((char *)__scan->cpl_ops + __op); \
if (__method != NULL) { \
__result = (*__method)(__env, __scan, ## __VA_ARGS__); \
ptrdiff_t __op = (_op); \
void (*__method)_proto; \
\
- cfs_list_for_each_entry(__scan, &__page->cp_layers, \
- cpl_linkage) { \
+ list_for_each_entry(__scan, &__page->cp_layers, cpl_linkage) { \
__method = *(void **)((char *)__scan->cpl_ops + __op); \
if (__method != NULL) \
(*__method)(__env, __scan, ## __VA_ARGS__); \
void (*__method)_proto; \
\
/* get to the bottom page. */ \
- cfs_list_for_each_entry_reverse(__scan, &__page->cp_layers, \
- cpl_linkage) { \
+ list_for_each_entry_reverse(__scan, &__page->cp_layers, \
+ cpl_linkage) { \
__method = *(void **)((char *)__scan->cpl_ops + __op); \
if (__method != NULL) \
(*__method)(__env, __scan, ## __VA_ARGS__); \
struct obd_device *obd_devs[MAX_OBD_DEVICES];
EXPORT_SYMBOL(obd_devs);
-cfs_list_t obd_types;
+struct list_head obd_types;
DEFINE_RWLOCK(obd_dev_lock);
__u64 obd_max_pages = 0;
#define obd_init_checks() do {} while(0)
#endif
-extern spinlock_t obd_types_lock;
extern int class_procfs_init(void);
extern int class_procfs_clean(void);
int lustre_register_fs(void);
for (i = CAPA_SITE_CLIENT; i < CAPA_SITE_MAX; i++)
- CFS_INIT_LIST_HEAD(&capa_list[i]);
+ INIT_LIST_HEAD(&capa_list[i]);
#endif
LCONSOLE_INFO("Lustre: Build Version: "BUILD_VERSION"\n");
if (err)
return err;
- CFS_INIT_LIST_HEAD(&obd_types);
+ INIT_LIST_HEAD(&obd_types);
err = misc_register(&obd_psdev);
if (err) {
*/
void dt_txn_callback_add(struct dt_device *dev, struct dt_txn_callback *cb)
{
- cfs_list_add(&cb->dtc_linkage, &dev->dd_txn_callbacks);
+ list_add(&cb->dtc_linkage, &dev->dd_txn_callbacks);
}
EXPORT_SYMBOL(dt_txn_callback_add);
void dt_txn_callback_del(struct dt_device *dev, struct dt_txn_callback *cb)
{
- cfs_list_del_init(&cb->dtc_linkage);
+ list_del_init(&cb->dtc_linkage);
}
EXPORT_SYMBOL(dt_txn_callback_del);
if (th->th_local)
return 0;
- cfs_list_for_each_entry(cb, &dev->dd_txn_callbacks, dtc_linkage) {
+ list_for_each_entry(cb, &dev->dd_txn_callbacks, dtc_linkage) {
if (cb->dtc_txn_start == NULL ||
!(cb->dtc_tag & env->le_ctx.lc_tags))
continue;
if (txn->th_local)
return 0;
- cfs_list_for_each_entry(cb, &dev->dd_txn_callbacks, dtc_linkage) {
+ list_for_each_entry(cb, &dev->dd_txn_callbacks, dtc_linkage) {
if (cb->dtc_txn_stop == NULL ||
!(cb->dtc_tag & env->le_ctx.lc_tags))
continue;
void dt_txn_hook_commit(struct thandle *txn)
{
- struct dt_txn_callback *cb;
+ struct dt_txn_callback *cb;
- if (txn->th_local)
- return;
+ if (txn->th_local)
+ return;
- cfs_list_for_each_entry(cb, &txn->th_dev->dd_txn_callbacks,
- dtc_linkage) {
- if (cb->dtc_txn_commit)
- cb->dtc_txn_commit(txn, cb->dtc_cookie);
- }
+ list_for_each_entry(cb, &txn->th_dev->dd_txn_callbacks,
+ dtc_linkage) {
+ if (cb->dtc_txn_commit)
+ cb->dtc_txn_commit(txn, cb->dtc_cookie);
+ }
}
EXPORT_SYMBOL(dt_txn_hook_commit);
int dt_device_init(struct dt_device *dev, struct lu_device_type *t)
{
-
- CFS_INIT_LIST_HEAD(&dev->dd_txn_callbacks);
- return lu_device_init(&dev->dd_lu_dev, t);
+ INIT_LIST_HEAD(&dev->dd_txn_callbacks);
+ return lu_device_init(&dev->dd_lu_dev, t);
}
EXPORT_SYMBOL(dt_device_init);
#include <obd_class.h>
#include <lprocfs_status.h>
-extern cfs_list_t obd_types;
spinlock_t obd_types_lock;
struct kmem_cache *obd_device_cachep;
EXPORT_SYMBOL(obdo_cachep);
struct kmem_cache *import_cachep;
-cfs_list_t obd_zombie_imports;
-cfs_list_t obd_zombie_exports;
+struct list_head obd_zombie_imports;
+struct list_head obd_zombie_exports;
spinlock_t obd_zombie_impexp_lock;
static void obd_zombie_impexp_notify(void);
static void obd_zombie_export_add(struct obd_export *exp);
struct obd_type *class_search_type(const char *name)
{
- cfs_list_t *tmp;
+ struct list_head *tmp;
struct obd_type *type;
spin_lock(&obd_types_lock);
- cfs_list_for_each(tmp, &obd_types) {
- type = cfs_list_entry(tmp, struct obd_type, typ_chain);
+ list_for_each(tmp, &obd_types) {
+ type = list_entry(tmp, struct obd_type, typ_chain);
if (strcmp(type->typ_name, name) == 0) {
spin_unlock(&obd_types_lock);
return type;
}
spin_lock(&obd_types_lock);
- cfs_list_add(&type->typ_chain, &obd_types);
+ list_add(&type->typ_chain, &obd_types);
spin_unlock(&obd_types_lock);
RETURN (0);
lu_device_type_fini(type->typ_lu);
spin_lock(&obd_types_lock);
- cfs_list_del(&type->typ_chain);
+ list_del(&type->typ_chain);
spin_unlock(&obd_types_lock);
OBD_FREE(type->typ_name, strlen(name) + 1);
if (type->typ_dt_ops != NULL)
if (exp->exp_connection)
ptlrpc_put_connection_superhack(exp->exp_connection);
- LASSERT(cfs_list_empty(&exp->exp_outstanding_replies));
- LASSERT(cfs_list_empty(&exp->exp_uncommitted_replies));
- LASSERT(cfs_list_empty(&exp->exp_req_replay_queue));
- LASSERT(cfs_list_empty(&exp->exp_hp_rpcs));
+ LASSERT(list_empty(&exp->exp_outstanding_replies));
+ LASSERT(list_empty(&exp->exp_uncommitted_replies));
+ LASSERT(list_empty(&exp->exp_req_replay_queue));
+ LASSERT(list_empty(&exp->exp_hp_rpcs));
obd_destroy_export(exp);
class_decref(obd, "export", exp);
atomic_read(&exp->exp_refcount) - 1);
if (atomic_dec_and_test(&exp->exp_refcount)) {
- LASSERT(!cfs_list_empty(&exp->exp_obd_chain));
+ LASSERT(!list_empty(&exp->exp_obd_chain));
CDEBUG(D_IOCTL, "final put %p/%s\n",
exp, exp->exp_client_uuid.uuid);
atomic_set(&export->exp_cb_count, 0);
atomic_set(&export->exp_locks_count, 0);
#if LUSTRE_TRACKS_LOCK_EXP_REFS
- CFS_INIT_LIST_HEAD(&export->exp_locks_list);
+ INIT_LIST_HEAD(&export->exp_locks_list);
spin_lock_init(&export->exp_locks_list_guard);
#endif
atomic_set(&export->exp_replay_count, 0);
export->exp_obd = obd;
- CFS_INIT_LIST_HEAD(&export->exp_outstanding_replies);
+ INIT_LIST_HEAD(&export->exp_outstanding_replies);
spin_lock_init(&export->exp_uncommitted_replies_lock);
- CFS_INIT_LIST_HEAD(&export->exp_uncommitted_replies);
- CFS_INIT_LIST_HEAD(&export->exp_req_replay_queue);
- CFS_INIT_LIST_HEAD(&export->exp_handle.h_link);
- CFS_INIT_LIST_HEAD(&export->exp_hp_rpcs);
- CFS_INIT_LIST_HEAD(&export->exp_reg_rpcs);
+ INIT_LIST_HEAD(&export->exp_uncommitted_replies);
+ INIT_LIST_HEAD(&export->exp_req_replay_queue);
+ INIT_LIST_HEAD(&export->exp_handle.h_link);
+ INIT_LIST_HEAD(&export->exp_hp_rpcs);
+ INIT_LIST_HEAD(&export->exp_reg_rpcs);
class_handle_hash(&export->exp_handle, &export_handle_ops);
export->exp_last_request_time = cfs_time_current_sec();
spin_lock_init(&export->exp_lock);
spin_lock_init(&export->exp_rpc_lock);
- CFS_INIT_HLIST_NODE(&export->exp_uuid_hash);
- CFS_INIT_HLIST_NODE(&export->exp_nid_hash);
+ INIT_HLIST_NODE(&export->exp_uuid_hash);
+ INIT_HLIST_NODE(&export->exp_nid_hash);
spin_lock_init(&export->exp_bl_list_lock);
- CFS_INIT_LIST_HEAD(&export->exp_bl_list);
+ INIT_LIST_HEAD(&export->exp_bl_list);
export->exp_sp_peer = LUSTRE_SP_ANY;
export->exp_flvr.sf_rpc = SPTLRPC_FLVR_INVALID;
}
class_incref(obd, "export", export);
- cfs_list_add(&export->exp_obd_chain, &export->exp_obd->obd_exports);
- cfs_list_add_tail(&export->exp_obd_chain_timed,
- &export->exp_obd->obd_exports_timed);
+ list_add(&export->exp_obd_chain, &export->exp_obd->obd_exports);
+ list_add_tail(&export->exp_obd_chain_timed,
+ &export->exp_obd->obd_exports_timed);
export->exp_obd->obd_num_exports++;
spin_unlock(&obd->obd_dev_lock);
cfs_hash_putref(hash);
if (hash)
cfs_hash_putref(hash);
class_handle_unhash(&export->exp_handle);
- LASSERT(cfs_hlist_unhashed(&export->exp_uuid_hash));
+ LASSERT(hlist_unhashed(&export->exp_uuid_hash));
obd_destroy_export(export);
OBD_FREE_PTR(export);
return ERR_PTR(rc);
spin_lock(&exp->exp_obd->obd_dev_lock);
/* delete an uuid-export hashitem from hashtables */
- if (!cfs_hlist_unhashed(&exp->exp_uuid_hash))
+ if (!hlist_unhashed(&exp->exp_uuid_hash))
cfs_hash_del(exp->exp_obd->obd_uuid_hash,
&exp->exp_client_uuid,
&exp->exp_uuid_hash);
- cfs_list_move(&exp->exp_obd_chain, &exp->exp_obd->obd_unlinked_exports);
- cfs_list_del_init(&exp->exp_obd_chain_timed);
+ list_move(&exp->exp_obd_chain, &exp->exp_obd->obd_unlinked_exports);
+ list_del_init(&exp->exp_obd_chain_timed);
exp->exp_obd->obd_num_exports--;
spin_unlock(&exp->exp_obd->obd_dev_lock);
class_export_put(exp);
ptlrpc_put_connection_superhack(imp->imp_connection);
- while (!cfs_list_empty(&imp->imp_conn_list)) {
- struct obd_import_conn *imp_conn;
+ while (!list_empty(&imp->imp_conn_list)) {
+ struct obd_import_conn *imp_conn;
- imp_conn = cfs_list_entry(imp->imp_conn_list.next,
- struct obd_import_conn, oic_item);
- cfs_list_del_init(&imp_conn->oic_item);
+ imp_conn = list_entry(imp->imp_conn_list.next,
+ struct obd_import_conn, oic_item);
+ list_del_init(&imp_conn->oic_item);
ptlrpc_put_connection_superhack(imp_conn->oic_conn);
OBD_FREE(imp_conn, sizeof(*imp_conn));
}
void class_import_put(struct obd_import *imp)
{
- ENTRY;
+ ENTRY;
- LASSERT(cfs_list_empty(&imp->imp_zombie_chain));
+ LASSERT(list_empty(&imp->imp_zombie_chain));
LASSERT_ATOMIC_GT_LT(&imp->imp_refcount, 0, LI_POISON);
CDEBUG(D_INFO, "import %p refcount=%d obd=%s\n", imp,
if (imp == NULL)
return NULL;
- CFS_INIT_LIST_HEAD(&imp->imp_pinger_chain);
- CFS_INIT_LIST_HEAD(&imp->imp_zombie_chain);
- CFS_INIT_LIST_HEAD(&imp->imp_replay_list);
- CFS_INIT_LIST_HEAD(&imp->imp_sending_list);
- CFS_INIT_LIST_HEAD(&imp->imp_delayed_list);
- CFS_INIT_LIST_HEAD(&imp->imp_committed_list);
+ INIT_LIST_HEAD(&imp->imp_pinger_chain);
+ INIT_LIST_HEAD(&imp->imp_zombie_chain);
+ INIT_LIST_HEAD(&imp->imp_replay_list);
+ INIT_LIST_HEAD(&imp->imp_sending_list);
+ INIT_LIST_HEAD(&imp->imp_delayed_list);
+ INIT_LIST_HEAD(&imp->imp_committed_list);
imp->imp_replay_cursor = &imp->imp_committed_list;
spin_lock_init(&imp->imp_lock);
imp->imp_last_success_conn = 0;
atomic_set(&imp->imp_inflight, 0);
atomic_set(&imp->imp_replay_inflight, 0);
atomic_set(&imp->imp_inval_count, 0);
- CFS_INIT_LIST_HEAD(&imp->imp_conn_list);
- CFS_INIT_LIST_HEAD(&imp->imp_handle.h_link);
+ INIT_LIST_HEAD(&imp->imp_conn_list);
+ INIT_LIST_HEAD(&imp->imp_handle.h_link);
class_handle_hash(&imp->imp_handle, &import_handle_ops);
init_imp_at(&imp->imp_at);
exp, lock, lock->l_exp_refs_target);
}
if ((lock->l_exp_refs_nr ++) == 0) {
- cfs_list_add(&lock->l_exp_refs_link, &exp->exp_locks_list);
+ list_add(&lock->l_exp_refs_link, &exp->exp_locks_list);
lock->l_exp_refs_target = exp;
}
CDEBUG(D_INFO, "lock = %p, export = %p, refs = %u\n",
lock, lock->l_exp_refs_target, exp);
}
if (-- lock->l_exp_refs_nr == 0) {
- cfs_list_del_init(&lock->l_exp_refs_link);
+ list_del_init(&lock->l_exp_refs_link);
lock->l_exp_refs_target = NULL;
}
CDEBUG(D_INFO, "lock = %p, export = %p, refs = %u\n",
* all end up in here, and if any of them race we shouldn't
* call extra class_export_puts(). */
if (already_disconnected) {
- LASSERT(cfs_hlist_unhashed(&export->exp_nid_hash));
+ LASSERT(hlist_unhashed(&export->exp_nid_hash));
GOTO(no_disconn, already_disconnected);
}
CDEBUG(D_IOCTL, "disconnect: cookie "LPX64"\n",
export->exp_handle.h_cookie);
- if (!cfs_hlist_unhashed(&export->exp_nid_hash))
+ if (!hlist_unhashed(&export->exp_nid_hash))
cfs_hash_del(export->exp_obd->obd_nid_hash,
&export->exp_connection->c_peer.nid,
&export->exp_nid_hash);
}
EXPORT_SYMBOL(class_connected_export);
-static void class_disconnect_export_list(cfs_list_t *list,
+static void class_disconnect_export_list(struct list_head *list,
enum obd_option flags)
{
int rc;
/* It's possible that an export may disconnect itself, but
* nothing else will be added to this list. */
- while (!cfs_list_empty(list)) {
- exp = cfs_list_entry(list->next, struct obd_export,
- exp_obd_chain);
- /* need for safe call CDEBUG after obd_disconnect */
- class_export_get(exp);
+ while (!list_empty(list)) {
+ exp = list_entry(list->next, struct obd_export,
+ exp_obd_chain);
+ /* need for safe call CDEBUG after obd_disconnect */
+ class_export_get(exp);
spin_lock(&exp->exp_lock);
exp->exp_flags = flags;
exp);
/* Need to delete this now so we don't end up pointing
* to work_list later when this export is cleaned up. */
- cfs_list_del_init(&exp->exp_obd_chain);
+ list_del_init(&exp->exp_obd_chain);
class_export_put(exp);
continue;
}
void class_disconnect_exports(struct obd_device *obd)
{
- cfs_list_t work_list;
+ struct list_head work_list;
ENTRY;
/* Move all of the exports from obd_exports to a work list, en masse. */
- CFS_INIT_LIST_HEAD(&work_list);
+ INIT_LIST_HEAD(&work_list);
spin_lock(&obd->obd_dev_lock);
- cfs_list_splice_init(&obd->obd_exports, &work_list);
- cfs_list_splice_init(&obd->obd_delayed_exports, &work_list);
+ list_splice_init(&obd->obd_exports, &work_list);
+ list_splice_init(&obd->obd_delayed_exports, &work_list);
spin_unlock(&obd->obd_dev_lock);
- if (!cfs_list_empty(&work_list)) {
+ if (!list_empty(&work_list)) {
CDEBUG(D_HA, "OBD device %d (%p) has exports, "
"disconnecting them\n", obd->obd_minor, obd);
class_disconnect_export_list(&work_list,
void class_disconnect_stale_exports(struct obd_device *obd,
int (*test_export)(struct obd_export *))
{
- cfs_list_t work_list;
+ struct list_head work_list;
struct obd_export *exp, *n;
int evicted = 0;
ENTRY;
- CFS_INIT_LIST_HEAD(&work_list);
+ INIT_LIST_HEAD(&work_list);
spin_lock(&obd->obd_dev_lock);
- cfs_list_for_each_entry_safe(exp, n, &obd->obd_exports,
- exp_obd_chain) {
+ list_for_each_entry_safe(exp, n, &obd->obd_exports,
+ exp_obd_chain) {
/* don't count self-export as client */
if (obd_uuid_equals(&exp->exp_client_uuid,
&exp->exp_obd->obd_uuid))
exp->exp_failed = 1;
spin_unlock(&exp->exp_lock);
- cfs_list_move(&exp->exp_obd_chain, &work_list);
+ list_move(&exp->exp_obd_chain, &work_list);
evicted++;
CDEBUG(D_HA, "%s: disconnect stale client %s@%s\n",
obd->obd_name, exp->exp_client_uuid.uuid,
int nreplies = 0;
spin_lock(&exp->exp_lock);
- cfs_list_for_each_entry(rs, &exp->exp_outstanding_replies,
- rs_exp_list) {
+ list_for_each_entry(rs, &exp->exp_outstanding_replies,
+ rs_exp_list) {
if (nreplies == 0)
first_reply = rs;
nreplies++;
struct obd_export *exp;
spin_lock(&obd->obd_dev_lock);
- cfs_list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain)
+ list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain)
print_export_data(exp, "ACTIVE", locks);
- cfs_list_for_each_entry(exp, &obd->obd_unlinked_exports, exp_obd_chain)
+ list_for_each_entry(exp, &obd->obd_unlinked_exports, exp_obd_chain)
print_export_data(exp, "UNLINKED", locks);
- cfs_list_for_each_entry(exp, &obd->obd_delayed_exports, exp_obd_chain)
+ list_for_each_entry(exp, &obd->obd_delayed_exports, exp_obd_chain)
print_export_data(exp, "DELAYED", locks);
spin_unlock(&obd->obd_dev_lock);
spin_lock(&obd_zombie_impexp_lock);
- cfs_list_for_each_entry(exp, &obd_zombie_exports, exp_obd_chain)
+ list_for_each_entry(exp, &obd_zombie_exports, exp_obd_chain)
print_export_data(exp, "ZOMBIE", locks);
spin_unlock(&obd_zombie_impexp_lock);
}
void obd_exports_barrier(struct obd_device *obd)
{
int waited = 2;
- LASSERT(cfs_list_empty(&obd->obd_exports));
+ LASSERT(list_empty(&obd->obd_exports));
spin_lock(&obd->obd_dev_lock);
- while (!cfs_list_empty(&obd->obd_unlinked_exports)) {
+ while (!list_empty(&obd->obd_unlinked_exports)) {
spin_unlock(&obd->obd_dev_lock);
schedule_timeout_and_set_state(TASK_UNINTERRUPTIBLE,
cfs_time_seconds(waited));
do {
spin_lock(&obd_zombie_impexp_lock);
- import = NULL;
- if (!cfs_list_empty(&obd_zombie_imports)) {
- import = cfs_list_entry(obd_zombie_imports.next,
- struct obd_import,
- imp_zombie_chain);
- cfs_list_del_init(&import->imp_zombie_chain);
- }
+ import = NULL;
+ if (!list_empty(&obd_zombie_imports)) {
+ import = list_entry(obd_zombie_imports.next,
+ struct obd_import,
+ imp_zombie_chain);
+ list_del_init(&import->imp_zombie_chain);
+ }
- export = NULL;
- if (!cfs_list_empty(&obd_zombie_exports)) {
- export = cfs_list_entry(obd_zombie_exports.next,
- struct obd_export,
- exp_obd_chain);
- cfs_list_del_init(&export->exp_obd_chain);
- }
+ export = NULL;
+ if (!list_empty(&obd_zombie_exports)) {
+ export = list_entry(obd_zombie_exports.next,
+ struct obd_export,
+ exp_obd_chain);
+ list_del_init(&export->exp_obd_chain);
+ }
spin_unlock(&obd_zombie_impexp_lock);
*/
static void obd_zombie_export_add(struct obd_export *exp) {
spin_lock(&exp->exp_obd->obd_dev_lock);
- LASSERT(!cfs_list_empty(&exp->exp_obd_chain));
- cfs_list_del_init(&exp->exp_obd_chain);
+ LASSERT(!list_empty(&exp->exp_obd_chain));
+ list_del_init(&exp->exp_obd_chain);
spin_unlock(&exp->exp_obd->obd_dev_lock);
spin_lock(&obd_zombie_impexp_lock);
zombies_count++;
- cfs_list_add(&exp->exp_obd_chain, &obd_zombie_exports);
+ list_add(&exp->exp_obd_chain, &obd_zombie_exports);
spin_unlock(&obd_zombie_impexp_lock);
obd_zombie_impexp_notify();
LASSERT(imp->imp_sec == NULL);
LASSERT(imp->imp_rq_pool == NULL);
spin_lock(&obd_zombie_impexp_lock);
- LASSERT(cfs_list_empty(&imp->imp_zombie_chain));
+ LASSERT(list_empty(&imp->imp_zombie_chain));
zombies_count++;
- cfs_list_add(&imp->imp_zombie_chain, &obd_zombie_imports);
+ list_add(&imp->imp_zombie_chain, &obd_zombie_imports);
spin_unlock(&obd_zombie_impexp_lock);
obd_zombie_impexp_notify();
struct task_struct *task;
#endif
- CFS_INIT_LIST_HEAD(&obd_zombie_imports);
- CFS_INIT_LIST_HEAD(&obd_zombie_exports);
+ INIT_LIST_HEAD(&obd_zombie_imports);
+
+ INIT_LIST_HEAD(&obd_zombie_exports);
spin_lock_init(&obd_zombie_impexp_lock);
init_completion(&obd_zombie_start);
init_completion(&obd_zombie_stop);
EXPORT_SYMBOL(lustre_in_group_p);
struct lustre_idmap_entry {
- cfs_list_t lie_rmt_uid_hash; /* hashed as lie_rmt_uid; */
- cfs_list_t lie_lcl_uid_hash; /* hashed as lie_lcl_uid; */
- cfs_list_t lie_rmt_gid_hash; /* hashed as lie_rmt_gid; */
- cfs_list_t lie_lcl_gid_hash; /* hashed as lie_lcl_gid; */
+ struct list_head lie_rmt_uid_hash; /* hashed as lie_rmt_uid; */
+ struct list_head lie_lcl_uid_hash; /* hashed as lie_lcl_uid; */
+ struct list_head lie_rmt_gid_hash; /* hashed as lie_rmt_gid; */
+ struct list_head lie_lcl_gid_hash; /* hashed as lie_lcl_gid; */
uid_t lie_rmt_uid; /* remote uid */
uid_t lie_lcl_uid; /* local uid */
gid_t lie_rmt_gid; /* remote gid */
if (e == NULL)
return NULL;
- CFS_INIT_LIST_HEAD(&e->lie_rmt_uid_hash);
- CFS_INIT_LIST_HEAD(&e->lie_lcl_uid_hash);
- CFS_INIT_LIST_HEAD(&e->lie_rmt_gid_hash);
- CFS_INIT_LIST_HEAD(&e->lie_lcl_gid_hash);
+ INIT_LIST_HEAD(&e->lie_rmt_uid_hash);
+ INIT_LIST_HEAD(&e->lie_lcl_uid_hash);
+ INIT_LIST_HEAD(&e->lie_rmt_gid_hash);
+ INIT_LIST_HEAD(&e->lie_lcl_gid_hash);
e->lie_rmt_uid = rmt_uid;
e->lie_lcl_uid = lcl_uid;
e->lie_rmt_gid = rmt_gid;
static void idmap_entry_free(struct lustre_idmap_entry *e)
{
- if (!cfs_list_empty(&e->lie_rmt_uid_hash))
- cfs_list_del(&e->lie_rmt_uid_hash);
- if (!cfs_list_empty(&e->lie_lcl_uid_hash))
- cfs_list_del(&e->lie_lcl_uid_hash);
- if (!cfs_list_empty(&e->lie_rmt_gid_hash))
- cfs_list_del(&e->lie_rmt_gid_hash);
- if (!cfs_list_empty(&e->lie_lcl_gid_hash))
- cfs_list_del(&e->lie_lcl_gid_hash);
- OBD_FREE_PTR(e);
+ list_del(&e->lie_rmt_uid_hash);
+ list_del(&e->lie_lcl_uid_hash);
+ list_del(&e->lie_rmt_gid_hash);
+ list_del(&e->lie_lcl_gid_hash);
+ OBD_FREE_PTR(e);
}
/*
uid_t rmt_uid, uid_t lcl_uid,
gid_t rmt_gid, gid_t lcl_gid)
{
- cfs_list_t *head;
+ struct list_head *head;
struct lustre_idmap_entry *e;
head = &t->lit_idmaps[RMT_UIDMAP_IDX][lustre_idmap_hashfunc(rmt_uid)];
- cfs_list_for_each_entry(e, head, lie_rmt_uid_hash)
+ list_for_each_entry(e, head, lie_rmt_uid_hash)
if (e->lie_rmt_uid == rmt_uid) {
if (e->lie_lcl_uid == lcl_uid) {
if (e->lie_rmt_gid == rmt_gid &&
}
head = &t->lit_idmaps[RMT_GIDMAP_IDX][lustre_idmap_hashfunc(rmt_gid)];
- cfs_list_for_each_entry(e, head, lie_rmt_gid_hash)
+ list_for_each_entry(e, head, lie_rmt_gid_hash)
if (e->lie_rmt_gid == rmt_gid) {
if (e->lie_lcl_gid == lcl_gid) {
if (unlikely(e->lie_rmt_uid == rmt_uid &&
return NULL;
}
-static __u32 idmap_lookup_uid(cfs_list_t *hash, int reverse,
+static __u32 idmap_lookup_uid(struct list_head *hash, int reverse,
__u32 uid)
{
- cfs_list_t *head = &hash[lustre_idmap_hashfunc(uid)];
- struct lustre_idmap_entry *e;
+ struct list_head *head = &hash[lustre_idmap_hashfunc(uid)];
+ struct lustre_idmap_entry *e;
- if (!reverse) {
- cfs_list_for_each_entry(e, head, lie_rmt_uid_hash)
- if (e->lie_rmt_uid == uid)
- return e->lie_lcl_uid;
- } else {
- cfs_list_for_each_entry(e, head, lie_lcl_uid_hash)
- if (e->lie_lcl_uid == uid)
- return e->lie_rmt_uid;
- }
+ if (!reverse) {
+ list_for_each_entry(e, head, lie_rmt_uid_hash)
+ if (e->lie_rmt_uid == uid)
+ return e->lie_lcl_uid;
+ } else {
+ list_for_each_entry(e, head, lie_lcl_uid_hash)
+ if (e->lie_lcl_uid == uid)
+ return e->lie_rmt_uid;
+ }
- return CFS_IDMAP_NOTFOUND;
+ return CFS_IDMAP_NOTFOUND;
}
-static __u32 idmap_lookup_gid(cfs_list_t *hash, int reverse, __u32 gid)
+static __u32 idmap_lookup_gid(struct list_head *hash, int reverse, __u32 gid)
{
- cfs_list_t *head = &hash[lustre_idmap_hashfunc(gid)];
+ struct list_head *head = &hash[lustre_idmap_hashfunc(gid)];
struct lustre_idmap_entry *e;
if (!reverse) {
- cfs_list_for_each_entry(e, head, lie_rmt_gid_hash)
+ list_for_each_entry(e, head, lie_rmt_gid_hash)
if (e->lie_rmt_gid == gid)
return e->lie_lcl_gid;
} else {
- cfs_list_for_each_entry(e, head, lie_lcl_gid_hash)
+ list_for_each_entry(e, head, lie_lcl_gid_hash)
if (e->lie_lcl_gid == gid)
return e->lie_rmt_gid;
}
return -ENOMEM;
spin_lock(&t->lit_lock);
- e1 = idmap_search_entry(t, ruid, luid, rgid, lgid);
- if (e1 == NULL) {
- cfs_list_add_tail(&e0->lie_rmt_uid_hash,
- &t->lit_idmaps[RMT_UIDMAP_IDX]
- [lustre_idmap_hashfunc(ruid)]);
- cfs_list_add_tail(&e0->lie_lcl_uid_hash,
- &t->lit_idmaps[LCL_UIDMAP_IDX]
- [lustre_idmap_hashfunc(luid)]);
- cfs_list_add_tail(&e0->lie_rmt_gid_hash,
- &t->lit_idmaps[RMT_GIDMAP_IDX]
- [lustre_idmap_hashfunc(rgid)]);
- cfs_list_add_tail(&e0->lie_lcl_gid_hash,
- &t->lit_idmaps[LCL_GIDMAP_IDX]
- [lustre_idmap_hashfunc(lgid)]);
- }
+ e1 = idmap_search_entry(t, ruid, luid, rgid, lgid);
+ if (e1 == NULL) {
+ list_add_tail(&e0->lie_rmt_uid_hash,
+ &t->lit_idmaps[RMT_UIDMAP_IDX]
+ [lustre_idmap_hashfunc(ruid)]);
+ list_add_tail(&e0->lie_lcl_uid_hash,
+ &t->lit_idmaps[LCL_UIDMAP_IDX]
+ [lustre_idmap_hashfunc(luid)]);
+ list_add_tail(&e0->lie_rmt_gid_hash,
+ &t->lit_idmaps[RMT_GIDMAP_IDX]
+ [lustre_idmap_hashfunc(rgid)]);
+ list_add_tail(&e0->lie_lcl_gid_hash,
+ &t->lit_idmaps[LCL_GIDMAP_IDX]
+ [lustre_idmap_hashfunc(lgid)]);
+ }
spin_unlock(&t->lit_lock);
- if (e1 != NULL) {
- idmap_entry_free(e0);
- if (IS_ERR(e1))
- return PTR_ERR(e1);
- }
- } else if (IS_ERR(e0)) {
- return PTR_ERR(e0);
- }
+ if (e1 != NULL) {
+ idmap_entry_free(e0);
+ if (IS_ERR(e1))
+ return PTR_ERR(e1);
+ }
+ } else if (IS_ERR(e0)) {
+ return PTR_ERR(e0);
+ }
- return 0;
+ return 0;
}
EXPORT_SYMBOL(lustre_idmap_add);
struct lustre_idmap_table *t,
int reverse, uid_t uid)
{
- cfs_list_t *hash;
+ struct list_head *hash;
if (mu && (mu->uc_valid == UCRED_OLD || mu->uc_valid == UCRED_NEW)) {
if (!reverse) {
int lustre_idmap_lookup_gid(struct lu_ucred *mu, struct lustre_idmap_table *t,
int reverse, gid_t gid)
{
- cfs_list_t *hash;
+ struct list_head *hash;
if (mu && (mu->uc_valid == UCRED_OLD || mu->uc_valid == UCRED_NEW)) {
if (!reverse) {
spin_lock_init(&t->lit_lock);
for (i = 0; i < ARRAY_SIZE(t->lit_idmaps); i++)
for (j = 0; j < ARRAY_SIZE(t->lit_idmaps[i]); j++)
- CFS_INIT_LIST_HEAD(&t->lit_idmaps[i][j]);
+ INIT_LIST_HEAD(&t->lit_idmaps[i][j]);
return t;
}
void lustre_idmap_fini(struct lustre_idmap_table *t)
{
- cfs_list_t *list;
+ struct list_head *list;
struct lustre_idmap_entry *e;
int i;
LASSERT(t);
list = t->lit_idmaps[RMT_UIDMAP_IDX];
spin_lock(&t->lit_lock);
for (i = 0; i < CFS_IDMAP_HASHSIZE; i++)
- while (!cfs_list_empty(&list[i])) {
- e = cfs_list_entry(list[i].next,
- struct lustre_idmap_entry,
- lie_rmt_uid_hash);
+ while (!list_empty(&list[i])) {
+ e = list_entry(list[i].next,
+ struct lustre_idmap_entry,
+ lie_rmt_uid_hash);
idmap_entry_free(e);
}
spin_unlock(&t->lit_lock);
init_rwsem(&loghandle->lgh_lock);
spin_lock_init(&loghandle->lgh_hdr_lock);
- CFS_INIT_LIST_HEAD(&loghandle->u.phd.phd_entry);
+ INIT_LIST_HEAD(&loghandle->u.phd.phd_entry);
atomic_set(&loghandle->lgh_refcount, 1);
return loghandle;
goto out;
if (loghandle->lgh_hdr->llh_flags & LLOG_F_IS_PLAIN)
- LASSERT(cfs_list_empty(&loghandle->u.phd.phd_entry));
+ LASSERT(list_empty(&loghandle->u.phd.phd_entry));
else if (loghandle->lgh_hdr->llh_flags & LLOG_F_IS_CAT)
- LASSERT(cfs_list_empty(&loghandle->u.chd.chd_head));
+ LASSERT(list_empty(&loghandle->u.chd.chd_head));
LASSERT(sizeof(*(loghandle->lgh_hdr)) == LLOG_CHUNK_SIZE);
OBD_FREE(loghandle->lgh_hdr, LLOG_CHUNK_SIZE);
out:
}
}
if (flags & LLOG_F_IS_CAT) {
- LASSERT(cfs_list_empty(&handle->u.chd.chd_head));
- CFS_INIT_LIST_HEAD(&handle->u.chd.chd_head);
+ LASSERT(list_empty(&handle->u.chd.chd_head));
+ INIT_LIST_HEAD(&handle->u.chd.chd_head);
llh->llh_size = sizeof(struct llog_logid_rec);
} else if (!(flags & LLOG_F_IS_PLAIN)) {
CERROR("%s: unknown flags: %#x (expected %#x or %#x)\n",
RETURN(-EBADF);
down_write(&cathandle->lgh_lock);
- cfs_list_for_each_entry(loghandle, &cathandle->u.chd.chd_head,
- u.phd.phd_entry) {
+ list_for_each_entry(loghandle, &cathandle->u.chd.chd_head,
+ u.phd.phd_entry) {
struct llog_logid *cgl = &loghandle->lgh_id;
if (ostid_id(&cgl->lgl_oi) == ostid_id(&logid->lgl_oi) &&
}
down_write(&cathandle->lgh_lock);
- cfs_list_add(&loghandle->u.phd.phd_entry, &cathandle->u.chd.chd_head);
+ list_add(&loghandle->u.phd.phd_entry, &cathandle->u.chd.chd_head);
up_write(&cathandle->lgh_lock);
loghandle->u.phd.phd_cat_handle = cathandle;
ENTRY;
- cfs_list_for_each_entry_safe(loghandle, n, &cathandle->u.chd.chd_head,
- u.phd.phd_entry) {
+ list_for_each_entry_safe(loghandle, n, &cathandle->u.chd.chd_head,
+ u.phd.phd_entry) {
struct llog_log_hdr *llh = loghandle->lgh_hdr;
int index;
/* unlink open-not-created llogs */
- cfs_list_del_init(&loghandle->u.phd.phd_entry);
+ list_del_init(&loghandle->u.phd.phd_entry);
llh = loghandle->lgh_hdr;
if (loghandle->lgh_obj != NULL && llh != NULL &&
(llh->llh_flags & LLOG_F_ZAP_WHEN_EMPTY) &&
NULL, NULL, LLOG_OPEN_NEW);
if (rc == 0) {
cathandle->u.chd.chd_current_log = loghandle;
- cfs_list_add_tail(&loghandle->u.phd.phd_entry,
- &cathandle->u.chd.chd_head);
+ list_add_tail(&loghandle->u.phd.phd_entry,
+ &cathandle->u.chd.chd_head);
}
}
up_write(&cathandle->lgh_lock);
NULL, NULL, LLOG_OPEN_NEW);
if (rc == 0) {
cathandle->u.chd.chd_next_log = loghandle;
- cfs_list_add_tail(&loghandle->u.phd.phd_entry,
- &cathandle->u.chd.chd_head);
+ list_add_tail(&loghandle->u.phd.phd_entry,
+ &cathandle->u.chd.chd_head);
}
}
up_write(&cathandle->lgh_lock);
down_write(&cathandle->lgh_lock);
if (cathandle->u.chd.chd_current_log == loghandle)
cathandle->u.chd.chd_current_log = NULL;
- cfs_list_del_init(&loghandle->u.phd.phd_entry);
+ list_del_init(&loghandle->u.phd.phd_entry);
up_write(&cathandle->lgh_lock);
LASSERT(index == loghandle->u.phd.phd_cookie.lgc_index);
/* llog was opened and keep in a list, close it now */
#include "local_storage.h"
/* all initialized local storages on this node are linked on this */
-static CFS_LIST_HEAD(ls_list_head);
+static struct list_head ls_list_head = LIST_HEAD_INIT(ls_list_head);
static DEFINE_MUTEX(ls_list_mutex);
static int ls_object_init(const struct lu_env *env, struct lu_object *o,
{
struct ls_device *ls, *ret = NULL;
- cfs_list_for_each_entry(ls, &ls_list_head, ls_linkage) {
+ list_for_each_entry(ls, &ls_list_head, ls_linkage) {
if (ls->ls_osd == dev) {
atomic_inc(&ls->ls_refcount);
ret = ls;
GOTO(out_ls, ls = ERR_PTR(-ENOMEM));
atomic_set(&ls->ls_refcount, 1);
- CFS_INIT_LIST_HEAD(&ls->ls_los_list);
+ INIT_LIST_HEAD(&ls->ls_los_list);
mutex_init(&ls->ls_los_mutex);
ls->ls_osd = dev;
ls->ls_top_dev.dd_lu_dev.ld_site = dev->dd_lu_dev.ld_site;
/* finally add ls to the list */
- cfs_list_add(&ls->ls_linkage, &ls_list_head);
+ list_add(&ls->ls_linkage, &ls_list_head);
out_ls:
mutex_unlock(&ls_list_mutex);
RETURN(ls);
mutex_lock(&ls_list_mutex);
if (atomic_read(&ls->ls_refcount) == 0) {
- LASSERT(cfs_list_empty(&ls->ls_los_list));
- cfs_list_del(&ls->ls_linkage);
+ LASSERT(list_empty(&ls->ls_los_list));
+ list_del(&ls->ls_linkage);
lu_site_purge(env, ls->ls_top_dev.dd_lu_dev.ld_site, ~0);
lu_device_fini(&ls->ls_top_dev.dd_lu_dev);
OBD_FREE_PTR(ls);
{
struct local_oid_storage *los, *ret = NULL;
- cfs_list_for_each_entry(los, &ls->ls_los_list, los_list) {
+ list_for_each_entry(los, &ls->ls_los_list, los_list) {
if (los->los_seq == seq) {
atomic_inc(&los->los_refcount);
ret = los;
mutex_init(&(*los)->los_id_lock);
(*los)->los_dev = &ls->ls_top_dev;
atomic_inc(&ls->ls_refcount);
- cfs_list_add(&(*los)->los_list, &ls->ls_los_list);
+ list_add(&(*los)->los_list, &ls->ls_los_list);
/* Use {seq, 0, 0} to create the LAST_ID file for every
* sequence. OIDs start at LUSTRE_FID_INIT_OID.
}
out_los:
if (rc != 0) {
- cfs_list_del(&(*los)->los_list);
+ list_del(&(*los)->los_list);
atomic_dec(&ls->ls_refcount);
OBD_FREE_PTR(*los);
*los = NULL;
if (los->los_obj)
lu_object_put_nocache(env, &los->los_obj->do_lu);
- cfs_list_del(&los->los_list);
+ list_del(&los->los_list);
OBD_FREE_PTR(los);
mutex_unlock(&ls->ls_los_mutex);
ls_device_put(env, ls);
struct ls_device {
struct dt_device ls_top_dev;
/* all initialized ls_devices on this node linked by this */
- cfs_list_t ls_linkage;
+ struct list_head ls_linkage;
/* how many handle's reference this local storage */
atomic_t ls_refcount;
/* underlaying OSD device */
struct dt_device *ls_osd;
/* list of all local OID storages */
- cfs_list_t ls_los_list;
+ struct list_head ls_los_list;
struct mutex ls_los_mutex;
};
*/
struct job_stat {
- cfs_hlist_node_t js_hash;
- cfs_list_t js_list;
- atomic_t js_refcount;
- char js_jobid[JOBSTATS_JOBID_SIZE];
- time_t js_timestamp; /* seconds */
- struct lprocfs_stats *js_stats;
- struct obd_job_stats *js_jobstats;
+ struct hlist_node js_hash;
+ struct list_head js_list;
+ atomic_t js_refcount;
+ char js_jobid[JOBSTATS_JOBID_SIZE];
+ time_t js_timestamp; /* seconds */
+ struct lprocfs_stats *js_stats;
+ struct obd_job_stats *js_jobstats;
};
static unsigned job_stat_hash(cfs_hash_t *hs, const void *key, unsigned mask)
return cfs_hash_djb2_hash(key, strlen(key), mask);
}
-static void *job_stat_key(cfs_hlist_node_t *hnode)
+static void *job_stat_key(struct hlist_node *hnode)
{
struct job_stat *job;
- job = cfs_hlist_entry(hnode, struct job_stat, js_hash);
+ job = hlist_entry(hnode, struct job_stat, js_hash);
return job->js_jobid;
}
-static int job_stat_keycmp(const void *key, cfs_hlist_node_t *hnode)
+static int job_stat_keycmp(const void *key, struct hlist_node *hnode)
{
struct job_stat *job;
- job = cfs_hlist_entry(hnode, struct job_stat, js_hash);
+ job = hlist_entry(hnode, struct job_stat, js_hash);
return (strlen(job->js_jobid) == strlen(key)) &&
!strncmp(job->js_jobid, key, strlen(key));
}
-static void *job_stat_object(cfs_hlist_node_t *hnode)
+static void *job_stat_object(struct hlist_node *hnode)
{
- return cfs_hlist_entry(hnode, struct job_stat, js_hash);
+ return hlist_entry(hnode, struct job_stat, js_hash);
}
-static void job_stat_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void job_stat_get(cfs_hash_t *hs, struct hlist_node *hnode)
{
struct job_stat *job;
- job = cfs_hlist_entry(hnode, struct job_stat, js_hash);
+ job = hlist_entry(hnode, struct job_stat, js_hash);
atomic_inc(&job->js_refcount);
}
LASSERT(job->js_jobstats);
write_lock(&job->js_jobstats->ojs_lock);
- cfs_list_del_init(&job->js_list);
+ list_del_init(&job->js_list);
write_unlock(&job->js_jobstats->ojs_lock);
lprocfs_free_stats(&job->js_stats);
job_free(job);
}
-static void job_stat_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void job_stat_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
{
struct job_stat *job;
- job = cfs_hlist_entry(hnode, struct job_stat, js_hash);
+ job = hlist_entry(hnode, struct job_stat, js_hash);
job_putref(job);
}
-static void job_stat_exit(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void job_stat_exit(cfs_hash_t *hs, struct hlist_node *hnode)
{
CERROR("should not have any items\n");
}
};
static int job_iter_callback(cfs_hash_t *hs, cfs_hash_bd_t *bd,
- cfs_hlist_node_t *hnode, void *data)
+ struct hlist_node *hnode, void *data)
{
time_t oldest = *((time_t *)data);
struct job_stat *job;
- job = cfs_hlist_entry(hnode, struct job_stat, js_hash);
+ job = hlist_entry(hnode, struct job_stat, js_hash);
if (!oldest || job->js_timestamp < oldest)
cfs_hash_bd_del_locked(hs, bd, hnode);
memcpy(job->js_jobid, jobid, JOBSTATS_JOBID_SIZE);
job->js_timestamp = cfs_time_current_sec();
job->js_jobstats = jobs;
- CFS_INIT_HLIST_NODE(&job->js_hash);
- CFS_INIT_LIST_HEAD(&job->js_list);
+ INIT_HLIST_NODE(&job->js_hash);
+ INIT_LIST_HEAD(&job->js_list);
atomic_set(&job->js_refcount, 1);
return job;
if (job2 != job) {
job_putref(job);
job = job2;
- /* We cannot LASSERT(!cfs_list_empty(&job->js_list)) here,
+ /* We cannot LASSERT(!list_empty(&job->js_list)) here,
* since we just lost the race for inserting "job" into the
* ojs_list, and some other thread is doing it _right_now_.
* Instead, be content the other thread is doing this, since
* "job2" was initialized in job_alloc() already. LU-2163 */
} else {
- LASSERT(cfs_list_empty(&job->js_list));
+ LASSERT(list_empty(&job->js_list));
write_lock(&stats->ojs_lock);
- cfs_list_add_tail(&job->js_list, &stats->ojs_list);
+ list_add_tail(&job->js_list, &stats->ojs_list);
write_unlock(&stats->ojs_lock);
}
cfs_hash_for_each_safe(stats->ojs_hash, job_iter_callback, &oldest);
cfs_hash_putref(stats->ojs_hash);
stats->ojs_hash = NULL;
- LASSERT(cfs_list_empty(&stats->ojs_list));
+ LASSERT(list_empty(&stats->ojs_list));
}
EXPORT_SYMBOL(lprocfs_job_stats_fini);
if (off == 0)
return SEQ_START_TOKEN;
off--;
- cfs_list_for_each_entry(job, &stats->ojs_list, js_list) {
+ list_for_each_entry(job, &stats->ojs_list, js_list) {
if (!off--)
return job;
}
{
struct obd_job_stats *stats = p->private;
struct job_stat *job;
- cfs_list_t *next;
+ struct list_head *next;
++*pos;
if (v == SEQ_START_TOKEN) {
}
return next == &stats->ojs_list ? NULL :
- cfs_list_entry(next, struct job_stat, js_list);
+ list_entry(next, struct job_stat, js_list);
}
/*
if (stats->ojs_hash == NULL)
RETURN(-ENOMEM);
- CFS_INIT_LIST_HEAD(&stats->ojs_list);
+ INIT_LIST_HEAD(&stats->ojs_list);
rwlock_init(&stats->ojs_lock);
stats->ojs_cntr_num = cntr_num;
stats->ojs_cntr_init_fn = init_fn;
/* not need locking because all clients is died */
while (!list_empty(&obd->obd_nid_stats)) {
stat = list_entry(obd->obd_nid_stats.next,
- struct nid_stat, nid_list);
+ struct nid_stat, nid_list);
list_del_init(&stat->nid_list);
cfs_hash_del(hash, &stat->nid, &stat->nid_hash);
lprocfs_free_client_stats(stat);
lprocfs_nid_stats_clear_seq_write(struct file *file, const char *buffer,
size_t count, loff_t *off)
{
- struct list_head free_list = LIST_HEAD_INIT(free_list);
struct seq_file *m = file->private_data;
struct obd_device *obd = m->private;
struct nid_stat *client_stat;
+ struct list_head free_list;
+ INIT_LIST_HEAD(&free_list);
cfs_hash_cond_del(obd->obd_nid_stats_hash,
lprocfs_nid_stats_clear_write_cb, &free_list);
int lprocfs_nid_stats_clear_write(struct file *file, const char *buffer,
unsigned long count, void *data)
{
- struct list_head free_list = LIST_HEAD_INIT(free_list);
struct obd_device *obd = (struct obd_device *)data;
struct nid_stat *client_stat;
+ struct list_head free_list;
+ INIT_LIST_HEAD(&free_list);
cfs_hash_cond_del(obd->obd_nid_stats_hash,
lprocfs_nid_stats_clear_write_cb, &free_list);
while (!list_empty(&free_list)) {
client_stat = list_entry(free_list.next, struct nid_stat,
- nid_list);
+ nid_list);
list_del_init(&client_stat->nid_list);
lprocfs_free_client_stats(client_stat);
}
#include <lu_ref.h>
#include <libcfs/list.h>
-extern spinlock_t obd_types_lock;
-
enum {
LU_CACHE_PERCENT_MAX = 50,
LU_CACHE_PERCENT_DEFAULT = 20
if (fid_is_zero(fid)) {
LASSERT(top->loh_hash.next == NULL
&& top->loh_hash.pprev == NULL);
- LASSERT(cfs_list_empty(&top->loh_lru));
+ LASSERT(list_empty(&top->loh_lru));
if (!atomic_dec_and_test(&top->loh_ref))
return;
- cfs_list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
+ list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
if (o->lo_ops->loo_object_release != NULL)
o->lo_ops->loo_object_release(env, o);
}
* When last reference is released, iterate over object
* layers, and notify them that object is no longer busy.
*/
- cfs_list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
+ list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
if (o->lo_ops->loo_object_release != NULL)
o->lo_ops->loo_object_release(env, o);
}
if (!lu_object_is_dying(top)) {
- LASSERT(cfs_list_empty(&top->loh_lru));
- cfs_list_add_tail(&top->loh_lru, &bkt->lsb_lru);
+ LASSERT(list_empty(&top->loh_lru));
+ list_add_tail(&top->loh_lru, &bkt->lsb_lru);
cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1);
return;
}
cfs_hash_bd_t bd;
cfs_hash_bd_get_and_lock(obj_hash, &top->loh_fid, &bd, 1);
- cfs_list_del_init(&top->loh_lru);
+ list_del_init(&top->loh_lru);
cfs_hash_bd_del_locked(obj_hash, &bd, &top->loh_hash);
cfs_hash_bd_unlock(obj_hash, &bd, 1);
}
{
struct lu_object *scan;
struct lu_object *top;
- cfs_list_t *layers;
+ struct list_head *layers;
unsigned int init_mask = 0;
unsigned int init_flag;
int clean;
*/
clean = 1;
init_flag = 1;
- cfs_list_for_each_entry(scan, layers, lo_linkage) {
+ list_for_each_entry(scan, layers, lo_linkage) {
if (init_mask & init_flag)
goto next;
clean = 0;
}
} while (!clean);
- cfs_list_for_each_entry_reverse(scan, layers, lo_linkage) {
+ list_for_each_entry_reverse(scan, layers, lo_linkage) {
if (scan->lo_ops->loo_object_start != NULL) {
result = scan->lo_ops->loo_object_start(env, scan);
if (result != 0) {
*/
static void lu_object_free(const struct lu_env *env, struct lu_object *o)
{
- struct lu_site_bkt_data *bkt;
- struct lu_site *site;
- struct lu_object *scan;
- cfs_list_t *layers;
- cfs_list_t splice;
+ struct lu_site_bkt_data *bkt;
+ struct lu_site *site;
+ struct lu_object *scan;
+ struct list_head *layers;
+ struct list_head splice;
site = o->lo_dev->ld_site;
layers = &o->lo_header->loh_layers;
/*
* First call ->loo_object_delete() method to release all resources.
*/
- cfs_list_for_each_entry_reverse(scan, layers, lo_linkage) {
+ list_for_each_entry_reverse(scan, layers, lo_linkage) {
if (scan->lo_ops->loo_object_delete != NULL)
scan->lo_ops->loo_object_delete(env, scan);
}
* necessary, because lu_object_header is freed together with the
* top-level slice.
*/
- CFS_INIT_LIST_HEAD(&splice);
- cfs_list_splice_init(layers, &splice);
- while (!cfs_list_empty(&splice)) {
+ INIT_LIST_HEAD(&splice);
+ list_splice_init(layers, &splice);
+ while (!list_empty(&splice)) {
/*
* Free layers in bottom-to-top order, so that object header
* lives as long as possible and ->loo_object_free() methods
* can look at its contents.
*/
o = container_of0(splice.prev, struct lu_object, lo_linkage);
- cfs_list_del_init(&o->lo_linkage);
+ list_del_init(&o->lo_linkage);
LASSERT(o->lo_ops->loo_object_free != NULL);
o->lo_ops->loo_object_free(env, o);
}
struct lu_site_bkt_data *bkt;
cfs_hash_bd_t bd;
cfs_hash_bd_t bd2;
- cfs_list_t dispose;
+ struct list_head dispose;
int did_sth;
int start;
int count;
if (OBD_FAIL_CHECK(OBD_FAIL_OBD_NO_LRU))
RETURN(0);
- CFS_INIT_LIST_HEAD(&dispose);
+ INIT_LIST_HEAD(&dispose);
/*
* Under LRU list lock, scan LRU list and move unreferenced objects to
* the dispose list, removing them from LRU and hash table.
cfs_hash_bd_lock(s->ls_obj_hash, &bd, 1);
bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
- cfs_list_for_each_entry_safe(h, temp, &bkt->lsb_lru, loh_lru) {
+ list_for_each_entry_safe(h, temp, &bkt->lsb_lru, loh_lru) {
LASSERT(atomic_read(&h->loh_ref) == 0);
cfs_hash_bd_get(s->ls_obj_hash, &h->loh_fid, &bd2);
cfs_hash_bd_del_locked(s->ls_obj_hash,
&bd2, &h->loh_hash);
- cfs_list_move(&h->loh_lru, &dispose);
+ list_move(&h->loh_lru, &dispose);
if (did_sth == 0)
did_sth = 1;
* Free everything on the dispose list. This is safe against
* races due to the reasons described in lu_object_put().
*/
- while (!cfs_list_empty(&dispose)) {
- h = container_of0(dispose.next,
- struct lu_object_header, loh_lru);
- cfs_list_del_init(&h->loh_lru);
- lu_object_free(env, lu_object_top(h));
- lprocfs_counter_incr(s->ls_stats, LU_SS_LRU_PURGED);
- }
+ while (!list_empty(&dispose)) {
+ h = container_of0(dispose.next,
+ struct lu_object_header, loh_lru);
+ list_del_init(&h->loh_lru);
+ lu_object_free(env, lu_object_top(h));
+ lprocfs_counter_incr(s->ls_stats, LU_SS_LRU_PURGED);
+ }
if (nr == 0)
break;
lu_printer_t printer,
const struct lu_object_header *hdr)
{
- (*printer)(env, cookie, "header@%p[%#lx, %d, "DFID"%s%s%s]",
+ (*printer)(env, cookie, "header@%p[%#lx, %d, "DFID"%s%s%s]",
hdr, hdr->loh_flags, atomic_read(&hdr->loh_ref),
- PFID(&hdr->loh_fid),
- cfs_hlist_unhashed(&hdr->loh_hash) ? "" : " hash",
- cfs_list_empty((cfs_list_t *)&hdr->loh_lru) ? \
- "" : " lru",
- hdr->loh_attr & LOHA_EXISTS ? " exist":"");
+ PFID(&hdr->loh_fid),
+ hlist_unhashed(&hdr->loh_hash) ? "" : " hash",
+ list_empty((struct list_head *)&hdr->loh_lru) ? \
+ "" : " lru",
+ hdr->loh_attr & LOHA_EXISTS ? " exist" : "");
}
EXPORT_SYMBOL(lu_object_header_print);
lu_object_header_print(env, cookie, printer, top);
(*printer)(env, cookie, "{\n");
- cfs_list_for_each_entry(o, &top->loh_layers, lo_linkage) {
+ list_for_each_entry(o, &top->loh_layers, lo_linkage) {
/*
* print `.' \a depth times followed by type name and address
*/
struct lu_object_header *top;
top = o->lo_header;
- cfs_list_for_each_entry(o, &top->loh_layers, lo_linkage) {
+ list_for_each_entry(o, &top->loh_layers, lo_linkage) {
if (o->lo_ops->loo_object_invariant != NULL &&
!o->lo_ops->loo_object_invariant(o))
return 0;
wait_queue_t *waiter,
__u64 *version)
{
- struct lu_site_bkt_data *bkt;
- struct lu_object_header *h;
- cfs_hlist_node_t *hnode;
- __u64 ver = cfs_hash_bd_version_get(bd);
+ struct lu_site_bkt_data *bkt;
+ struct lu_object_header *h;
+ struct hlist_node *hnode;
+ __u64 ver = cfs_hash_bd_version_get(bd);
if (*version == ver)
return ERR_PTR(-ENOENT);
if (likely(!lu_object_is_dying(h))) {
cfs_hash_get(s->ls_obj_hash, hnode);
lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_HIT);
- cfs_list_del_init(&h->loh_lru);
+ list_del_init(&h->loh_lru);
return lu_object_top(h);
}
cfs_hash_bd_t *bd,
const struct lu_fid *f)
{
- cfs_hlist_node_t *hnode;
+ struct hlist_node *hnode;
struct lu_object_header *h;
/* cfs_hash_bd_peek_locked is a somehow "internal" function
cfs_hash_get(s->ls_obj_hash, hnode);
lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_HIT);
- cfs_list_del_init(&h->loh_lru);
+ list_del_init(&h->loh_lru);
return lu_object_top(h);
}
/**
* Global list of all device types.
*/
-static CFS_LIST_HEAD(lu_device_types);
+static struct list_head lu_device_types;
int lu_device_type_init(struct lu_device_type *ldt)
{
/**
* Global list of all sites on this node
*/
-static CFS_LIST_HEAD(lu_sites);
+static struct list_head lu_sites;
static DEFINE_MUTEX(lu_sites_guard);
/**
static int
lu_site_obj_print(cfs_hash_t *hs, cfs_hash_bd_t *bd,
- cfs_hlist_node_t *hnode, void *data)
+ struct hlist_node *hnode, void *data)
{
- struct lu_site_print_arg *arg = (struct lu_site_print_arg *)data;
- struct lu_object_header *h;
+ struct lu_site_print_arg *arg = (struct lu_site_print_arg *)data;
+ struct lu_object_header *h;
- h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
- if (!cfs_list_empty(&h->loh_layers)) {
- const struct lu_object *o;
+ h = hlist_entry(hnode, struct lu_object_header, loh_hash);
+ if (!list_empty(&h->loh_layers)) {
+ const struct lu_object *o;
- o = lu_object_top(h);
- lu_object_print(arg->lsp_env, arg->lsp_cookie,
- arg->lsp_printer, o);
- } else {
- lu_object_header_print(arg->lsp_env, arg->lsp_cookie,
- arg->lsp_printer, h);
- }
- return 0;
+ o = lu_object_top(h);
+ lu_object_print(arg->lsp_env, arg->lsp_cookie,
+ arg->lsp_printer, o);
+ } else {
+ lu_object_header_print(arg->lsp_env, arg->lsp_cookie,
+ arg->lsp_printer, h);
+ }
+ return 0;
}
/**
return hash & mask;
}
-static void *lu_obj_hop_object(cfs_hlist_node_t *hnode)
+static void *lu_obj_hop_object(struct hlist_node *hnode)
{
- return cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
+ return hlist_entry(hnode, struct lu_object_header, loh_hash);
}
-static void *lu_obj_hop_key(cfs_hlist_node_t *hnode)
+static void *lu_obj_hop_key(struct hlist_node *hnode)
{
- struct lu_object_header *h;
+ struct lu_object_header *h;
- h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
- return &h->loh_fid;
+ h = hlist_entry(hnode, struct lu_object_header, loh_hash);
+ return &h->loh_fid;
}
-static int lu_obj_hop_keycmp(const void *key, cfs_hlist_node_t *hnode)
+static int lu_obj_hop_keycmp(const void *key, struct hlist_node *hnode)
{
- struct lu_object_header *h;
+ struct lu_object_header *h;
- h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
- return lu_fid_eq(&h->loh_fid, (struct lu_fid *)key);
+ h = hlist_entry(hnode, struct lu_object_header, loh_hash);
+ return lu_fid_eq(&h->loh_fid, (struct lu_fid *)key);
}
-static void lu_obj_hop_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void lu_obj_hop_get(cfs_hash_t *hs, struct hlist_node *hnode)
{
- struct lu_object_header *h;
+ struct lu_object_header *h;
- h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
+ h = hlist_entry(hnode, struct lu_object_header, loh_hash);
if (atomic_add_return(1, &h->loh_ref) == 1) {
- struct lu_site_bkt_data *bkt;
- cfs_hash_bd_t bd;
+ struct lu_site_bkt_data *bkt;
+ cfs_hash_bd_t bd;
- cfs_hash_bd_get(hs, &h->loh_fid, &bd);
- bkt = cfs_hash_bd_extra_get(hs, &bd);
- bkt->lsb_busy++;
- }
+ cfs_hash_bd_get(hs, &h->loh_fid, &bd);
+ bkt = cfs_hash_bd_extra_get(hs, &bd);
+ bkt->lsb_busy++;
+ }
}
-static void lu_obj_hop_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void lu_obj_hop_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
{
LBUG(); /* we should never called it */
}
void lu_dev_add_linkage(struct lu_site *s, struct lu_device *d)
{
spin_lock(&s->ls_ld_lock);
- if (cfs_list_empty(&d->ld_linkage))
- cfs_list_add(&d->ld_linkage, &s->ls_ld_linkage);
+ if (list_empty(&d->ld_linkage))
+ list_add(&d->ld_linkage, &s->ls_ld_linkage);
spin_unlock(&s->ls_ld_lock);
}
EXPORT_SYMBOL(lu_dev_add_linkage);
void lu_dev_del_linkage(struct lu_site *s, struct lu_device *d)
{
spin_lock(&s->ls_ld_lock);
- cfs_list_del_init(&d->ld_linkage);
+ list_del_init(&d->ld_linkage);
spin_unlock(&s->ls_ld_lock);
}
EXPORT_SYMBOL(lu_dev_del_linkage);
int i;
ENTRY;
+ INIT_LIST_HEAD(&lu_sites);
+
memset(s, 0, sizeof *s);
mutex_init(&s->ls_purge_mutex);
bits = lu_htable_order(top);
cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) {
bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
- CFS_INIT_LIST_HEAD(&bkt->lsb_lru);
+ INIT_LIST_HEAD(&bkt->lsb_lru);
init_waitqueue_head(&bkt->lsb_marche_funebre);
}
lprocfs_counter_init(s->ls_stats, LU_SS_LRU_PURGED,
0, "lru_purged", "lru_purged");
- CFS_INIT_LIST_HEAD(&s->ls_linkage);
+ INIT_LIST_HEAD(&s->ls_linkage);
s->ls_top_dev = top;
top->ld_site = s;
lu_device_get(top);
lu_ref_add(&top->ld_reference, "site-top", s);
- CFS_INIT_LIST_HEAD(&s->ls_ld_linkage);
+ INIT_LIST_HEAD(&s->ls_ld_linkage);
spin_lock_init(&s->ls_ld_lock);
lu_dev_add_linkage(s, top);
void lu_site_fini(struct lu_site *s)
{
mutex_lock(&lu_sites_guard);
- cfs_list_del_init(&s->ls_linkage);
+ list_del_init(&s->ls_linkage);
mutex_unlock(&lu_sites_guard);
if (s->ls_obj_hash != NULL) {
mutex_lock(&lu_sites_guard);
result = lu_context_refill(&lu_shrink_env.le_ctx);
if (result == 0)
- cfs_list_add(&s->ls_linkage, &lu_sites);
+ list_add(&s->ls_linkage, &lu_sites);
mutex_unlock(&lu_sites_guard);
return result;
}
o->lo_dev = d;
lu_device_get(d);
lu_ref_add_at(&d->ld_reference, &o->lo_dev_ref, "lu_object", o);
- CFS_INIT_LIST_HEAD(&o->lo_linkage);
+ INIT_LIST_HEAD(&o->lo_linkage);
return 0;
}
{
struct lu_device *dev = o->lo_dev;
- LASSERT(cfs_list_empty(&o->lo_linkage));
+ LASSERT(list_empty(&o->lo_linkage));
if (dev != NULL) {
lu_ref_del_at(&dev->ld_reference, &o->lo_dev_ref,
*/
void lu_object_add_top(struct lu_object_header *h, struct lu_object *o)
{
- cfs_list_move(&o->lo_linkage, &h->loh_layers);
+ list_move(&o->lo_linkage, &h->loh_layers);
}
EXPORT_SYMBOL(lu_object_add_top);
*/
void lu_object_add(struct lu_object *before, struct lu_object *o)
{
- cfs_list_move(&o->lo_linkage, &before->lo_linkage);
+ list_move(&o->lo_linkage, &before->lo_linkage);
}
EXPORT_SYMBOL(lu_object_add);
{
memset(h, 0, sizeof *h);
atomic_set(&h->loh_ref, 1);
- CFS_INIT_HLIST_NODE(&h->loh_hash);
- CFS_INIT_LIST_HEAD(&h->loh_lru);
- CFS_INIT_LIST_HEAD(&h->loh_layers);
+ INIT_HLIST_NODE(&h->loh_hash);
+ INIT_LIST_HEAD(&h->loh_lru);
+ INIT_LIST_HEAD(&h->loh_layers);
lu_ref_init(&h->loh_reference);
return 0;
}
*/
void lu_object_header_fini(struct lu_object_header *h)
{
- LASSERT(cfs_list_empty(&h->loh_layers));
- LASSERT(cfs_list_empty(&h->loh_lru));
- LASSERT(cfs_hlist_unhashed(&h->loh_hash));
+ LASSERT(list_empty(&h->loh_layers));
+ LASSERT(list_empty(&h->loh_lru));
+ LASSERT(hlist_unhashed(&h->loh_hash));
lu_ref_fini(&h->loh_reference);
}
EXPORT_SYMBOL(lu_object_header_fini);
struct lu_object *lu_object_locate(struct lu_object_header *h,
const struct lu_device_type *dtype)
{
- struct lu_object *o;
+ struct lu_object *o;
- cfs_list_for_each_entry(o, &h->loh_layers, lo_linkage) {
- if (o->lo_dev->ld_type == dtype)
- return o;
- }
- return NULL;
+ list_for_each_entry(o, &h->loh_layers, lo_linkage) {
+ if (o->lo_dev->ld_type == dtype)
+ return o;
+ }
+ return NULL;
}
EXPORT_SYMBOL(lu_object_locate);
-
-
/**
* Finalize and free devices in the device stack.
*
/**
* List of remembered contexts. XXX document me.
*/
-static CFS_LIST_HEAD(lu_context_remembered);
+static struct list_head lu_context_remembered;
/**
* Destroy \a key in all remembered contexts. This is used to destroy key
* XXX memory barrier has to go here.
*/
spin_lock(&lu_keys_guard);
- cfs_list_for_each_entry(ctx, &lu_context_remembered,
- lc_remember)
+ list_for_each_entry(ctx, &lu_context_remembered,
+ lc_remember)
key_fini(ctx, key->lct_index);
spin_unlock(&lu_keys_guard);
++key_set_version;
ctx->lc_tags = tags;
if (tags & LCT_REMEMBER) {
spin_lock(&lu_keys_guard);
- cfs_list_add(&ctx->lc_remember, &lu_context_remembered);
+ list_add(&ctx->lc_remember, &lu_context_remembered);
spin_unlock(&lu_keys_guard);
} else {
- CFS_INIT_LIST_HEAD(&ctx->lc_remember);
+ INIT_LIST_HEAD(&ctx->lc_remember);
}
rc = keys_init(ctx);
ctx->lc_state = LCS_FINALIZED;
if ((ctx->lc_tags & LCT_REMEMBER) == 0) {
- LASSERT(cfs_list_empty(&ctx->lc_remember));
+ LASSERT(list_empty(&ctx->lc_remember));
keys_fini(ctx);
} else { /* could race with key degister */
spin_lock(&lu_keys_guard);
keys_fini(ctx);
- cfs_list_del_init(&ctx->lc_remember);
+ list_del_init(&ctx->lc_remember);
spin_unlock(&lu_keys_guard);
}
}
cfs_hash_for_each_bucket(hs, &bd, i) {
struct lu_site_bkt_data *bkt = cfs_hash_bd_extra_get(hs, &bd);
- cfs_hlist_head_t *hhead;
+ struct hlist_head *hhead;
cfs_hash_bd_lock(hs, &bd, 1);
stats->lss_busy += bkt->lsb_busy;
}
cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
- if (!cfs_hlist_empty(hhead))
+ if (!hlist_empty(hhead))
stats->lss_populated++;
}
cfs_hash_bd_unlock(hs, &bd, 1);
CDEBUG(D_INFO, "Lustre LU module (%p).\n", &lu_keys);
+ INIT_LIST_HEAD(&lu_device_types);
+ INIT_LIST_HEAD(&lu_context_remembered);
+
result = lu_ref_global_init();
if (result != 0)
return result;
*
* Protected by lu_ref_refs_guard.
*/
-static CFS_LIST_HEAD(lu_ref_refs);
+static struct list_head lu_ref_refs;
static spinlock_t lu_ref_refs_guard;
static struct lu_ref lu_ref_marker = {
.lf_guard = __SPIN_LOCK_UNLOCKED(lu_ref_marker.lf_guard),
- .lf_list = CFS_LIST_HEAD_INIT(lu_ref_marker.lf_list),
- .lf_linkage = CFS_LIST_HEAD_INIT(lu_ref_marker.lf_linkage)
+ .lf_list = LIST_HEAD_INIT(lu_ref_marker.lf_list),
+ .lf_linkage = LIST_HEAD_INIT(lu_ref_marker.lf_linkage)
};
void lu_ref_print(const struct lu_ref *ref)
CERROR("lu_ref: %p %d %d %s:%d\n",
ref, ref->lf_refs, ref->lf_failed, ref->lf_func, ref->lf_line);
- cfs_list_for_each_entry(link, &ref->lf_list, ll_linkage) {
+ list_for_each_entry(link, &ref->lf_list, ll_linkage) {
CERROR(" link: %s %p\n", link->ll_scope, link->ll_source);
}
}
struct lu_ref *ref;
spin_lock(&lu_ref_refs_guard);
- cfs_list_for_each_entry(ref, &lu_ref_refs, lf_linkage) {
+ list_for_each_entry(ref, &lu_ref_refs, lf_linkage) {
if (lu_ref_is_marker(ref))
continue;
ref->lf_func = func;
ref->lf_line = line;
spin_lock_init(&ref->lf_guard);
- CFS_INIT_LIST_HEAD(&ref->lf_list);
+ INIT_LIST_HEAD(&ref->lf_list);
spin_lock(&lu_ref_refs_guard);
- cfs_list_add(&ref->lf_linkage, &lu_ref_refs);
+ list_add(&ref->lf_linkage, &lu_ref_refs);
spin_unlock(&lu_ref_refs_guard);
}
EXPORT_SYMBOL(lu_ref_init_loc);
void lu_ref_fini(struct lu_ref *ref)
{
- REFASSERT(ref, cfs_list_empty(&ref->lf_list));
+ REFASSERT(ref, list_empty(&ref->lf_list));
REFASSERT(ref, ref->lf_refs == 0);
spin_lock(&lu_ref_refs_guard);
- cfs_list_del_init(&ref->lf_linkage);
+ list_del_init(&ref->lf_linkage);
spin_unlock(&lu_ref_refs_guard);
}
EXPORT_SYMBOL(lu_ref_fini);
link->ll_scope = scope;
link->ll_source = source;
spin_lock(&ref->lf_guard);
- cfs_list_add_tail(&link->ll_linkage, &ref->lf_list);
+ list_add_tail(&link->ll_linkage, &ref->lf_list);
ref->lf_refs++;
spin_unlock(&ref->lf_guard);
}
link->ll_scope = scope;
link->ll_source = source;
spin_lock(&ref->lf_guard);
- cfs_list_add_tail(&link->ll_linkage, &ref->lf_list);
+ list_add_tail(&link->ll_linkage, &ref->lf_list);
ref->lf_refs++;
spin_unlock(&ref->lf_guard);
}
unsigned iterations;
iterations = 0;
- cfs_list_for_each_entry(link, &ref->lf_list, ll_linkage) {
+ list_for_each_entry(link, &ref->lf_list, ll_linkage) {
++iterations;
if (lu_ref_link_eq(link, scope, source)) {
if (iterations > lu_ref_chain_max_length) {
spin_lock(&ref->lf_guard);
link = lu_ref_find(ref, scope, source);
if (link != NULL) {
- cfs_list_del(&link->ll_linkage);
+ list_del(&link->ll_linkage);
ref->lf_refs--;
spin_unlock(&ref->lf_guard);
OBD_SLAB_FREE(link, lu_ref_link_kmem, sizeof(*link));
spin_lock(&ref->lf_guard);
REFASSERT(ref, link->ll_ref == ref);
REFASSERT(ref, lu_ref_link_eq(link, scope, source));
- cfs_list_del(&link->ll_linkage);
+ list_del(&link->ll_linkage);
ref->lf_refs--;
spin_unlock(&ref->lf_guard);
}
struct lu_ref *ref = seq->private;
spin_lock(&lu_ref_refs_guard);
- if (cfs_list_empty(&ref->lf_linkage))
+ if (list_empty(&ref->lf_linkage))
ref = NULL;
spin_unlock(&lu_ref_refs_guard);
struct lu_ref *next;
LASSERT(seq->private == p);
- LASSERT(!cfs_list_empty(&ref->lf_linkage));
+ LASSERT(!list_empty(&ref->lf_linkage));
spin_lock(&lu_ref_refs_guard);
- next = cfs_list_entry(ref->lf_linkage.next, struct lu_ref, lf_linkage);
+ next = list_entry(ref->lf_linkage.next, struct lu_ref, lf_linkage);
if (&next->lf_linkage == &lu_ref_refs) {
p = NULL;
} else {
(*pos)++;
- cfs_list_move(&ref->lf_linkage, &next->lf_linkage);
+ list_move(&ref->lf_linkage, &next->lf_linkage);
}
spin_unlock(&lu_ref_refs_guard);
return p;
struct lu_ref *next;
spin_lock(&lu_ref_refs_guard);
- next = cfs_list_entry(ref->lf_linkage.next, struct lu_ref, lf_linkage);
+ next = list_entry(ref->lf_linkage.next, struct lu_ref, lf_linkage);
if ((&next->lf_linkage == &lu_ref_refs) || lu_ref_is_marker(next)) {
spin_unlock(&lu_ref_refs_guard);
return 0;
struct lu_ref_link *link;
int i = 0;
- cfs_list_for_each_entry(link, &next->lf_list, ll_linkage)
+ list_for_each_entry(link, &next->lf_list, ll_linkage)
seq_printf(seq, " #%d link: %s %p\n",
i++, link->ll_scope, link->ll_source);
}
result = seq_open(file, &lu_ref_seq_ops);
if (result == 0) {
spin_lock(&lu_ref_refs_guard);
- if (!cfs_list_empty(&marker->lf_linkage))
+ if (!list_empty(&marker->lf_linkage))
result = -EAGAIN;
else
- cfs_list_add(&marker->lf_linkage, &lu_ref_refs);
+ list_add(&marker->lf_linkage, &lu_ref_refs);
spin_unlock(&lu_ref_refs_guard);
if (result == 0) {
struct lu_ref *ref = ((struct seq_file *)file->private_data)->private;
spin_lock(&lu_ref_refs_guard);
- cfs_list_del_init(&ref->lf_linkage);
+ list_del_init(&ref->lf_linkage);
spin_unlock(&lu_ref_refs_guard);
return seq_release(inode, file);
CDEBUG(D_CONSOLE,
"lu_ref tracking is enabled. Performance isn't.\n");
+ INIT_LIST_HEAD(&lu_ref_refs);
spin_lock_init(&lu_ref_refs_guard);
result = lu_kmem_init(lu_ref_caches);
#include <lustre_lib.h>
#ifndef __KERNEL__
-# define list_add_rcu cfs_list_add
-# define list_del_rcu cfs_list_del
-# define list_for_each_rcu cfs_list_for_each
-# define list_for_each_safe_rcu cfs_list_for_each_safe
-# define list_for_each_entry_rcu cfs_list_for_each_entry
+# define list_add_rcu list_add
+# define list_del_rcu list_del
+# define list_for_each_rcu list_for_each
+# define list_for_each_safe_rcu list_for_each_safe
+# define list_for_each_entry_rcu list_for_each_entry
# define rcu_read_lock() spin_lock(&bucket->lock)
# define rcu_read_unlock() spin_unlock(&bucket->lock)
#endif /* !__KERNEL__ */
static spinlock_t handle_base_lock;
static struct handle_bucket {
- spinlock_t lock;
- cfs_list_t head;
+ spinlock_t lock;
+ struct list_head head;
} *handle_hash;
#define HANDLE_HASH_SIZE (1 << 16)
ENTRY;
LASSERT(h != NULL);
- LASSERT(cfs_list_empty(&h->h_link));
+ LASSERT(list_empty(&h->h_link));
/*
* This is fast, but simplistic cookie generation algorithm, it will
static void class_handle_unhash_nolock(struct portals_handle *h)
{
- if (cfs_list_empty(&h->h_link)) {
+ if (list_empty(&h->h_link)) {
CERROR("removing an already-removed handle ("LPX64")\n",
h->h_cookie);
return;
spin_lock_init(&handle_base_lock);
for (bucket = handle_hash + HANDLE_HASH_SIZE - 1; bucket >= handle_hash;
bucket--) {
- CFS_INIT_LIST_HEAD(&bucket->head);
+ INIT_LIST_HEAD(&bucket->head);
spin_lock_init(&bucket->lock);
}
#define NIDS_MAX 32
struct uuid_nid_data {
- cfs_list_t un_list;
- struct obd_uuid un_uuid;
- int un_nid_count;
- lnet_nid_t un_nids[NIDS_MAX];
+ struct list_head un_list;
+ struct obd_uuid un_uuid;
+ int un_nid_count;
+ lnet_nid_t un_nids[NIDS_MAX];
};
/* FIXME: This should probably become more elegant than a global linked list */
-static cfs_list_t g_uuid_list;
+static struct list_head g_uuid_list;
static spinlock_t g_uuid_lock;
void class_init_uuidlist(void)
{
- CFS_INIT_LIST_HEAD(&g_uuid_list);
+ INIT_LIST_HEAD(&g_uuid_list);
spin_lock_init(&g_uuid_lock);
}
obd_str2uuid(&tmp, uuid);
spin_lock(&g_uuid_lock);
- cfs_list_for_each_entry(data, &g_uuid_list, un_list) {
+ list_for_each_entry(data, &g_uuid_list, un_list) {
if (obd_uuid_equals(&data->un_uuid, &tmp)) {
if (index >= data->un_nid_count)
break;
data->un_nid_count = 1;
spin_lock(&g_uuid_lock);
- cfs_list_for_each_entry(entry, &g_uuid_list, un_list) {
+ list_for_each_entry(entry, &g_uuid_list, un_list) {
if (obd_uuid_equals(&entry->un_uuid, &data->un_uuid)) {
int i;
}
}
if (!found)
- cfs_list_add(&data->un_list, &g_uuid_list);
+ list_add(&data->un_list, &g_uuid_list);
spin_unlock(&g_uuid_lock);
if (found) {
/* Delete the nids for one uuid if specified, otherwise delete all */
int class_del_uuid(const char *uuid)
{
- CFS_LIST_HEAD(deathrow);
struct uuid_nid_data *data;
+ struct list_head deathrow;
+
+ INIT_LIST_HEAD(&deathrow);
spin_lock(&g_uuid_lock);
- if (uuid != NULL) {
- struct obd_uuid tmp;
+ if (uuid != NULL) {
+ struct obd_uuid tmp;
- obd_str2uuid(&tmp, uuid);
- cfs_list_for_each_entry(data, &g_uuid_list, un_list) {
- if (obd_uuid_equals(&data->un_uuid, &tmp)) {
- cfs_list_move(&data->un_list, &deathrow);
- break;
- }
- }
- } else
- cfs_list_splice_init(&g_uuid_list, &deathrow);
+ obd_str2uuid(&tmp, uuid);
+ list_for_each_entry(data, &g_uuid_list, un_list) {
+ if (obd_uuid_equals(&data->un_uuid, &tmp)) {
+ list_move(&data->un_list, &deathrow);
+ break;
+ }
+ }
+ } else
+ list_splice_init(&g_uuid_list, &deathrow);
spin_unlock(&g_uuid_lock);
- if (uuid != NULL && cfs_list_empty(&deathrow)) {
- CDEBUG(D_INFO, "Try to delete a non-existent uuid %s\n", uuid);
- return -EINVAL;
- }
-
- while (!cfs_list_empty(&deathrow)) {
- data = cfs_list_entry(deathrow.next, struct uuid_nid_data,
- un_list);
- cfs_list_del(&data->un_list);
+ if (uuid != NULL && list_empty(&deathrow)) {
+ CDEBUG(D_INFO, "Try to delete a non-existent uuid %s\n", uuid);
+ return -EINVAL;
+ }
- CDEBUG(D_INFO, "del uuid %s %s/%d\n",
- obd_uuid2str(&data->un_uuid),
- libcfs_nid2str(data->un_nids[0]),
- data->un_nid_count);
+ while (!list_empty(&deathrow)) {
+ data = list_entry(deathrow.next, struct uuid_nid_data,
+ un_list);
+ list_del(&data->un_list);
- OBD_FREE(data, sizeof(*data));
- }
+ CDEBUG(D_INFO, "del uuid %s %s/%d\n",
+ obd_uuid2str(&data->un_uuid),
+ libcfs_nid2str(data->un_nids[0]),
+ data->un_nid_count);
- return 0;
+ OBD_FREE(data, sizeof(*data));
+ }
+ return 0;
}
/* check if @nid exists in nid list of @uuid */
obd_uuid2str(uuid), libcfs_nid2str(nid));
spin_lock(&g_uuid_lock);
- cfs_list_for_each_entry(entry, &g_uuid_list, un_list) {
+ list_for_each_entry(entry, &g_uuid_list, un_list) {
int i;
if (!obd_uuid_equals(&entry->un_uuid, uuid))
obd->obd_pool_limit = 0;
obd->obd_pool_slv = 0;
- CFS_INIT_LIST_HEAD(&obd->obd_exports);
- CFS_INIT_LIST_HEAD(&obd->obd_unlinked_exports);
- CFS_INIT_LIST_HEAD(&obd->obd_delayed_exports);
- CFS_INIT_LIST_HEAD(&obd->obd_exports_timed);
- CFS_INIT_LIST_HEAD(&obd->obd_nid_stats);
+ INIT_LIST_HEAD(&obd->obd_exports);
+ INIT_LIST_HEAD(&obd->obd_unlinked_exports);
+ INIT_LIST_HEAD(&obd->obd_delayed_exports);
+ INIT_LIST_HEAD(&obd->obd_exports_timed);
+ INIT_LIST_HEAD(&obd->obd_nid_stats);
spin_lock_init(&obd->obd_nid_lock);
spin_lock_init(&obd->obd_dev_lock);
mutex_init(&obd->obd_dev_mutex);
spin_lock_init(&obd->obd_recovery_task_lock);
init_waitqueue_head(&obd->obd_next_transno_waitq);
init_waitqueue_head(&obd->obd_evict_inprogress_waitq);
- CFS_INIT_LIST_HEAD(&obd->obd_req_replay_queue);
- CFS_INIT_LIST_HEAD(&obd->obd_lock_replay_queue);
- CFS_INIT_LIST_HEAD(&obd->obd_final_req_queue);
- CFS_INIT_LIST_HEAD(&obd->obd_evict_list);
+ INIT_LIST_HEAD(&obd->obd_req_replay_queue);
+ INIT_LIST_HEAD(&obd->obd_lock_replay_queue);
+ INIT_LIST_HEAD(&obd->obd_final_req_queue);
+ INIT_LIST_HEAD(&obd->obd_evict_list);
INIT_LIST_HEAD(&obd->obd_lwp_list);
llog_group_init(&obd->obd_olg);
GOTO(err_hash, err = PTR_ERR(exp));
obd->obd_self_export = exp;
- cfs_list_del_init(&exp->exp_obd_chain_timed);
+ list_del_init(&exp->exp_obd_chain_timed);
class_export_put(exp);
err = obd_setup(obd, lcfg);
RETURN(rc);
}
-CFS_LIST_HEAD(lustre_profile_list);
+struct list_head lustre_profile_list =
+ LIST_HEAD_INIT(lustre_profile_list);
struct lustre_profile *class_get_profile(const char * prof)
{
struct lustre_profile *lprof;
ENTRY;
- cfs_list_for_each_entry(lprof, &lustre_profile_list, lp_list) {
+ list_for_each_entry(lprof, &lustre_profile_list, lp_list) {
if (!strcmp(lprof->lp_profile, prof)) {
RETURN(lprof);
}
OBD_ALLOC(lprof, sizeof(*lprof));
if (lprof == NULL)
RETURN(-ENOMEM);
- CFS_INIT_LIST_HEAD(&lprof->lp_list);
+ INIT_LIST_HEAD(&lprof->lp_list);
LASSERT(proflen == (strlen(prof) + 1));
OBD_ALLOC(lprof->lp_profile, proflen);
memcpy(lprof->lp_md, mdc, mdclen);
}
- cfs_list_add(&lprof->lp_list, &lustre_profile_list);
+ list_add(&lprof->lp_list, &lustre_profile_list);
RETURN(err);
out:
lprof = class_get_profile(prof);
if (lprof) {
- cfs_list_del(&lprof->lp_list);
+ list_del(&lprof->lp_list);
OBD_FREE(lprof->lp_profile, strlen(lprof->lp_profile) + 1);
OBD_FREE(lprof->lp_dt, strlen(lprof->lp_dt) + 1);
if (lprof->lp_md)
struct lustre_profile *lprof, *n;
ENTRY;
- cfs_list_for_each_entry_safe(lprof, n, &lustre_profile_list, lp_list) {
- cfs_list_del(&lprof->lp_list);
+ list_for_each_entry_safe(lprof, n, &lustre_profile_list, lp_list) {
+ list_del(&lprof->lp_list);
OBD_FREE(lprof->lp_profile, strlen(lprof->lp_profile) + 1);
OBD_FREE(lprof->lp_dt, strlen(lprof->lp_dt) + 1);
if (lprof->lp_md)
}
static void *
-uuid_key(cfs_hlist_node_t *hnode)
+uuid_key(struct hlist_node *hnode)
{
- struct obd_export *exp;
+ struct obd_export *exp;
- exp = cfs_hlist_entry(hnode, struct obd_export, exp_uuid_hash);
+ exp = hlist_entry(hnode, struct obd_export, exp_uuid_hash);
- return &exp->exp_client_uuid;
+ return &exp->exp_client_uuid;
}
/*
* state with this function
*/
static int
-uuid_keycmp(const void *key, cfs_hlist_node_t *hnode)
+uuid_keycmp(const void *key, struct hlist_node *hnode)
{
struct obd_export *exp;
LASSERT(key);
- exp = cfs_hlist_entry(hnode, struct obd_export, exp_uuid_hash);
+ exp = hlist_entry(hnode, struct obd_export, exp_uuid_hash);
return obd_uuid_equals(key, &exp->exp_client_uuid) &&
!exp->exp_failed;
}
static void *
-uuid_export_object(cfs_hlist_node_t *hnode)
+uuid_export_object(struct hlist_node *hnode)
{
- return cfs_hlist_entry(hnode, struct obd_export, exp_uuid_hash);
+ return hlist_entry(hnode, struct obd_export, exp_uuid_hash);
}
static void
-uuid_export_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+uuid_export_get(cfs_hash_t *hs, struct hlist_node *hnode)
{
struct obd_export *exp;
- exp = cfs_hlist_entry(hnode, struct obd_export, exp_uuid_hash);
+ exp = hlist_entry(hnode, struct obd_export, exp_uuid_hash);
class_export_get(exp);
}
static void
-uuid_export_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+uuid_export_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
{
struct obd_export *exp;
- exp = cfs_hlist_entry(hnode, struct obd_export, exp_uuid_hash);
+ exp = hlist_entry(hnode, struct obd_export, exp_uuid_hash);
class_export_put(exp);
}
}
static void *
-nid_key(cfs_hlist_node_t *hnode)
+nid_key(struct hlist_node *hnode)
{
struct obd_export *exp;
- exp = cfs_hlist_entry(hnode, struct obd_export, exp_nid_hash);
+ exp = hlist_entry(hnode, struct obd_export, exp_nid_hash);
RETURN(&exp->exp_connection->c_peer.nid);
}
* state with this function
*/
static int
-nid_kepcmp(const void *key, cfs_hlist_node_t *hnode)
+nid_kepcmp(const void *key, struct hlist_node *hnode)
{
struct obd_export *exp;
LASSERT(key);
- exp = cfs_hlist_entry(hnode, struct obd_export, exp_nid_hash);
+ exp = hlist_entry(hnode, struct obd_export, exp_nid_hash);
RETURN(exp->exp_connection->c_peer.nid == *(lnet_nid_t *)key &&
!exp->exp_failed);
}
static void *
-nid_export_object(cfs_hlist_node_t *hnode)
+nid_export_object(struct hlist_node *hnode)
{
- return cfs_hlist_entry(hnode, struct obd_export, exp_nid_hash);
+ return hlist_entry(hnode, struct obd_export, exp_nid_hash);
}
static void
-nid_export_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+nid_export_get(cfs_hash_t *hs, struct hlist_node *hnode)
{
struct obd_export *exp;
- exp = cfs_hlist_entry(hnode, struct obd_export, exp_nid_hash);
+ exp = hlist_entry(hnode, struct obd_export, exp_nid_hash);
class_export_get(exp);
}
static void
-nid_export_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+nid_export_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
{
struct obd_export *exp;
- exp = cfs_hlist_entry(hnode, struct obd_export, exp_nid_hash);
+ exp = hlist_entry(hnode, struct obd_export, exp_nid_hash);
class_export_put(exp);
}
*/
static void *
-nidstats_key(cfs_hlist_node_t *hnode)
+nidstats_key(struct hlist_node *hnode)
{
struct nid_stat *ns;
- ns = cfs_hlist_entry(hnode, struct nid_stat, nid_hash);
+ ns = hlist_entry(hnode, struct nid_stat, nid_hash);
return &ns->nid;
}
static int
-nidstats_keycmp(const void *key, cfs_hlist_node_t *hnode)
+nidstats_keycmp(const void *key, struct hlist_node *hnode)
{
return *(lnet_nid_t *)nidstats_key(hnode) == *(lnet_nid_t *)key;
}
static void *
-nidstats_object(cfs_hlist_node_t *hnode)
+nidstats_object(struct hlist_node *hnode)
{
- return cfs_hlist_entry(hnode, struct nid_stat, nid_hash);
+ return hlist_entry(hnode, struct nid_stat, nid_hash);
}
static void
-nidstats_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+nidstats_get(cfs_hash_t *hs, struct hlist_node *hnode)
{
struct nid_stat *ns;
- ns = cfs_hlist_entry(hnode, struct nid_stat, nid_hash);
+ ns = hlist_entry(hnode, struct nid_stat, nid_hash);
nidstat_getref(ns);
}
static void
-nidstats_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+nidstats_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
{
struct nid_stat *ns;
- ns = cfs_hlist_entry(hnode, struct nid_stat, nid_hash);
+ ns = hlist_entry(hnode, struct nid_stat, nid_hash);
nidstat_putref(ns);
}
/*********** mount lookup *********/
DEFINE_MUTEX(lustre_mount_info_lock);
-static CFS_LIST_HEAD(server_mount_info_list);
+static struct list_head server_mount_info_list =
+ LIST_HEAD_INIT(server_mount_info_list);
static struct lustre_mount_info *server_find_mount(const char *name)
{
- cfs_list_t *tmp;
+ struct list_head *tmp;
struct lustre_mount_info *lmi;
ENTRY;
- cfs_list_for_each(tmp, &server_mount_info_list) {
- lmi = cfs_list_entry(tmp, struct lustre_mount_info,
- lmi_list_chain);
+ list_for_each(tmp, &server_mount_info_list) {
+ lmi = list_entry(tmp, struct lustre_mount_info,
+ lmi_list_chain);
if (strcmp(name, lmi->lmi_name) == 0)
RETURN(lmi);
}
}
lmi->lmi_name = name_cp;
lmi->lmi_sb = sb;
- cfs_list_add(&lmi->lmi_list_chain, &server_mount_info_list);
+ list_add(&lmi->lmi_list_chain, &server_mount_info_list);
mutex_unlock(&lustre_mount_info_lock);
CDEBUG(D_MOUNT, "deregister mount %p from %s\n", lmi->lmi_sb, name);
OBD_FREE(lmi->lmi_name, strlen(lmi->lmi_name) + 1);
- cfs_list_del(&lmi->lmi_list_chain);
+ list_del(&lmi->lmi_list_chain);
OBD_FREE(lmi, sizeof(*lmi));
mutex_unlock(&lustre_mount_info_lock);
}
EXPORT_SYMBOL(tgt_name2lwp_name);
-static CFS_LIST_HEAD(lwp_register_list);
+static struct list_head lwp_register_list =
+ LIST_HEAD_INIT(lwp_register_list);
DEFINE_MUTEX(lwp_register_list_lock);
int lustre_register_lwp_item(const char *lwpname, struct obd_export **exp,
lri->lri_exp = exp;
lri->lri_cb_func = cb_func;
lri->lri_cb_data = cb_data;
- CFS_INIT_LIST_HEAD(&lri->lri_list);
- cfs_list_add(&lri->lri_list, &lwp_register_list);
+ INIT_LIST_HEAD(&lri->lri_list);
+ list_add(&lri->lri_list, &lwp_register_list);
if (*exp != NULL && cb_func != NULL)
cb_func(cb_data);
struct lwp_register_item *lri, *tmp;
mutex_lock(&lwp_register_list_lock);
- cfs_list_for_each_entry_safe(lri, tmp, &lwp_register_list, lri_list) {
+ list_for_each_entry_safe(lri, tmp, &lwp_register_list, lri_list) {
if (exp == lri->lri_exp) {
if (*exp)
class_export_put(*exp);
- cfs_list_del(&lri->lri_list);
+ list_del(&lri->lri_list);
OBD_FREE_PTR(lri);
break;
}
LASSERT(exp != NULL);
mutex_lock(&lwp_register_list_lock);
- cfs_list_for_each_entry_safe(lri, tmp, &lwp_register_list, lri_list) {
+ list_for_each_entry_safe(lri, tmp, &lwp_register_list, lri_list) {
if (strcmp(exp->exp_obd->obd_name, lri->lri_name))
continue;
if (*lri->lri_exp != NULL)