}
EXPORT_SYMBOL(cl_io_fini);
-static int cl_io_init0(const struct lu_env *env, struct cl_io *io,
- enum cl_io_type iot, struct cl_object *obj)
+static int __cl_io_init(const struct lu_env *env, struct cl_io *io,
+ enum cl_io_type iot, struct cl_object *obj)
{
struct cl_object *scan;
int result;
{
LASSERT(obj != cl_object_top(obj));
- return cl_io_init0(env, io, iot, obj);
+ return __cl_io_init(env, io, iot, obj);
}
EXPORT_SYMBOL(cl_io_sub_init);
/* clear I/O restart from previous instance */
io->ci_need_restart = 0;
- return cl_io_init0(env, io, iot, obj);
+ return __cl_io_init(env, io, iot, obj);
}
EXPORT_SYMBOL(cl_io_init);
list_del_init(&page->cp_batch);
--plist->pl_nr;
/*
- * cl_page_disown0 rather than usual cl_page_disown() is used,
+ * __cl_page_disown rather than usual cl_page_disown() is used,
* because pages are possibly in CPS_FREEING state already due
* to the call to cl_page_list_discard().
*/
/*
- * XXX cl_page_disown0() will fail if page is not locked.
+ * XXX __cl_page_disown() will fail if page is not locked.
*/
- cl_page_disown0(env, page);
+ __cl_page_disown(env, page);
lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue",
plist);
cl_page_put(env, page);
#include <cl_object.h>
#include "cl_internal.h"
-static void cl_lock_trace0(int level, const struct lu_env *env,
- const char *prefix, const struct cl_lock *lock,
- const char *func, const int line)
+static void __cl_lock_trace(int level, const struct lu_env *env,
+ const char *prefix, const struct cl_lock *lock,
+ const char *func, const int line)
{
struct cl_object_header *h = cl_object_header(lock->cll_descr.cld_obj);
CDEBUG(level, "%s: %p (%p/%d) at %s():%d\n",
prefix, lock, env, h->coh_nesting, func, line);
}
#define cl_lock_trace(level, env, prefix, lock) \
- cl_lock_trace0(level, env, prefix, lock, __FUNCTION__, __LINE__)
+ __cl_lock_trace(level, env, prefix, lock, __FUNCTION__, __LINE__)
/**
* Adds lock slice to the compound lock.
#include <cl_object.h>
#include "cl_internal.h"
-static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg);
+static void __cl_page_delete(const struct lu_env *env, struct cl_page *pg);
static DEFINE_MUTEX(cl_page_kmem_mutex);
#ifdef LIBCFS_DEBUG
result = o->co_ops->coo_page_init(env, o,
cl_page, ind);
if (result != 0) {
- cl_page_delete0(env, cl_page);
+ __cl_page_delete(env, cl_page);
cl_page_free(env, cl_page, NULL);
cl_page = ERR_PTR(result);
break;
return cl_page_in_use_noref(pg);
}
-static void cl_page_state_set0(const struct lu_env *env,
- struct cl_page *cl_page,
- enum cl_page_state state)
+static void __cl_page_state_set(const struct lu_env *env,
+ struct cl_page *cl_page,
+ enum cl_page_state state)
{
enum cl_page_state old;
static void cl_page_state_set(const struct lu_env *env,
struct cl_page *page, enum cl_page_state state)
{
- cl_page_state_set0(env, page, state);
+ __cl_page_state_set(env, page, state);
}
/**
EXIT;
}
-void cl_page_disown0(const struct lu_env *env, struct cl_page *cp)
+void __cl_page_disown(const struct lu_env *env, struct cl_page *cp)
{
struct page *vmpage;
enum cl_page_state state;
* \see cl_page_own_try()
* \see cl_page_own
*/
-static int cl_page_own0(const struct lu_env *env, struct cl_io *io,
- struct cl_page *cl_page, int nonblock)
+static int __cl_page_own(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *cl_page, int nonblock)
{
struct page *vmpage = cl_page->cp_vmpage;
int result;
cl_page_owner_set(cl_page);
if (cl_page->cp_state == CPS_FREEING) {
- cl_page_disown0(env, cl_page);
+ __cl_page_disown(env, cl_page);
result = -ENOENT;
goto out;
}
/**
* Own a page, might be blocked.
*
- * \see cl_page_own0()
+ * \see __cl_page_own()
*/
int cl_page_own(const struct lu_env *env, struct cl_io *io, struct cl_page *pg)
{
- return cl_page_own0(env, io, pg, 0);
+ return __cl_page_own(env, io, pg, 0);
}
EXPORT_SYMBOL(cl_page_own);
/**
* Nonblock version of cl_page_own().
*
- * \see cl_page_own0()
+ * \see __cl_page_own()
*/
int cl_page_own_try(const struct lu_env *env, struct cl_io *io,
struct cl_page *pg)
{
- return cl_page_own0(env, io, pg, 1);
+ return __cl_page_own(env, io, pg, 1);
}
EXPORT_SYMBOL(cl_page_own_try);
PINVRNT(env, pg, cl_page_is_owned(pg, io) ||
pg->cp_state == CPS_FREEING);
- cl_page_disown0(env, pg);
+ __cl_page_disown(env, pg);
}
EXPORT_SYMBOL(cl_page_disown);
/**
* Version of cl_page_delete() that can be called for not fully constructed
- * cl_pages, e.g. in an error handling cl_page_find()->cl_page_delete0()
+ * cl_pages, e.g. in an error handling cl_page_find()->__cl_page_delete()
* path. Doesn't check cl_page invariant.
*/
-static void cl_page_delete0(const struct lu_env *env, struct cl_page *cp)
+static void __cl_page_delete(const struct lu_env *env, struct cl_page *cp)
{
struct page *vmpage;
const struct cl_page_slice *slice;
* Severe all ways to obtain new pointers to @pg.
*/
cl_page_owner_clear(cp);
- cl_page_state_set0(env, cp, CPS_FREEING);
+ __cl_page_state_set(env, cp, CPS_FREEING);
cl_page_slice_for_each_reverse(cp, slice, i) {
if (slice->cpl_ops->cpo_delete != NULL)
{
PINVRNT(env, pg, cl_page_invariant(pg));
ENTRY;
- cl_page_delete0(env, pg);
+ __cl_page_delete(env, pg);
EXIT;
}
EXPORT_SYMBOL(cl_page_delete);