* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*/
/*
#include <lustre_log.h>
#include "ptlrpc_internal.h"
-static atomic_t llcd_count = ATOMIC_INIT(0);
+static cfs_atomic_t llcd_count = CFS_ATOMIC_INIT(0);
static cfs_mem_cache_t *llcd_cache = NULL;
#ifdef __KERNEL__
LLOG_LCM_FL_EXIT = 1 << 1
};
+struct llcd_async_args {
+ struct llog_canceld_ctxt *la_ctxt;
+};
+
static void llcd_print(struct llog_canceld_ctxt *llcd,
const char *func, int line)
{
llcd->llcd_cookiebytes = 0;
llcd->llcd_size = size;
- spin_lock(&lcm->lcm_lock);
+ cfs_spin_lock(&lcm->lcm_lock);
llcd->llcd_lcm = lcm;
- atomic_inc(&lcm->lcm_count);
- list_add_tail(&llcd->llcd_list, &lcm->lcm_llcds);
- spin_unlock(&lcm->lcm_lock);
- atomic_inc(&llcd_count);
+ cfs_atomic_inc(&lcm->lcm_count);
+ cfs_list_add_tail(&llcd->llcd_list, &lcm->lcm_llcds);
+ cfs_spin_unlock(&lcm->lcm_lock);
+ cfs_atomic_inc(&llcd_count);
CDEBUG(D_RPCTRACE, "Alloc llcd %p on lcm %p (%d)\n",
- llcd, lcm, atomic_read(&lcm->lcm_count));
+ llcd, lcm, cfs_atomic_read(&lcm->lcm_count));
return llcd;
}
int size;
if (lcm) {
- if (atomic_read(&lcm->lcm_count) == 0) {
+ if (cfs_atomic_read(&lcm->lcm_count) == 0) {
CERROR("Invalid llcd free %p\n", llcd);
llcd_print(llcd, __FUNCTION__, __LINE__);
LBUG();
}
- spin_lock(&lcm->lcm_lock);
- LASSERT(!list_empty(&llcd->llcd_list));
- list_del_init(&llcd->llcd_list);
- atomic_dec(&lcm->lcm_count);
- spin_unlock(&lcm->lcm_lock);
+ cfs_spin_lock(&lcm->lcm_lock);
+ LASSERT(!cfs_list_empty(&llcd->llcd_list));
+ cfs_list_del_init(&llcd->llcd_list);
+ cfs_atomic_dec(&lcm->lcm_count);
+ cfs_spin_unlock(&lcm->lcm_lock);
CDEBUG(D_RPCTRACE, "Free llcd %p on lcm %p (%d)\n",
- llcd, lcm, atomic_read(&lcm->lcm_count));
+ llcd, lcm, cfs_atomic_read(&lcm->lcm_count));
}
- LASSERT(atomic_read(&llcd_count) > 0);
- atomic_dec(&llcd_count);
+ LASSERT(cfs_atomic_read(&llcd_count) > 0);
+ cfs_atomic_dec(&llcd_count);
size = offsetof(struct llog_canceld_ctxt, llcd_cookies) +
llcd->llcd_size;
*/
static int
llcd_interpret(const struct lu_env *env,
- struct ptlrpc_request *req, void *noused, int rc)
+ struct ptlrpc_request *req, void *args, int rc)
{
- struct llog_canceld_ctxt *llcd = req->rq_async_args.pointer_arg[0];
+ struct llcd_async_args *la = args;
+ struct llog_canceld_ctxt *llcd = la->la_ctxt;
+
CDEBUG(D_RPCTRACE, "Sent llcd %p (%d) - killing it\n", llcd, rc);
llcd_free(llcd);
return 0;
char *bufs[2] = { NULL, (char *)llcd->llcd_cookies };
struct obd_import *import = NULL;
struct llog_commit_master *lcm;
+ struct llcd_async_args *la;
struct ptlrpc_request *req;
struct llog_ctxt *ctxt;
int rc;
* Check if we're in exit stage. Do not send llcd in
* this case.
*/
- if (test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags))
+ if (cfs_test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags))
GOTO(exit, rc = -ENODEV);
CDEBUG(D_RPCTRACE, "Sending llcd %p\n", llcd);
/* bug 5515 */
req->rq_request_portal = LDLM_CANCEL_REQUEST_PORTAL;
req->rq_reply_portal = LDLM_CANCEL_REPLY_PORTAL;
+
req->rq_interpret_reply = (ptlrpc_interpterer_t)llcd_interpret;
- req->rq_async_args.pointer_arg[0] = llcd;
+
+ CLASSERT(sizeof(*la) <= sizeof(req->rq_async_args));
+ la = ptlrpc_req_async_args(req);
+ la->la_ctxt = llcd;
/* llog cancels will be replayed after reconnect so this will do twice
* first from replay llog, second for resended rpc */
* Let all know that we're stopping. This will also make
* llcd_send() refuse any new llcds.
*/
- set_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags);
+ cfs_set_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags);
/*
* Stop processing thread. No new rpcs will be accepted for
* those forgotten in sync may still be attached to ctxt. Let's
* print them.
*/
- if (atomic_read(&lcm->lcm_count) != 0) {
+ if (cfs_atomic_read(&lcm->lcm_count) != 0) {
struct llog_canceld_ctxt *llcd;
- struct list_head *tmp;
+ cfs_list_t *tmp;
CERROR("Busy llcds found (%d) on lcm %p\n",
- atomic_read(&lcm->lcm_count), lcm);
+ cfs_atomic_read(&lcm->lcm_count), lcm);
- spin_lock(&lcm->lcm_lock);
- list_for_each(tmp, &lcm->lcm_llcds) {
- llcd = list_entry(tmp, struct llog_canceld_ctxt,
- llcd_list);
+ cfs_spin_lock(&lcm->lcm_lock);
+ cfs_list_for_each(tmp, &lcm->lcm_llcds) {
+ llcd = cfs_list_entry(tmp, struct llog_canceld_ctxt,
+ llcd_list);
llcd_print(llcd, __FUNCTION__, __LINE__);
}
- spin_unlock(&lcm->lcm_lock);
+ cfs_spin_unlock(&lcm->lcm_lock);
/*
* No point to go further with busy llcds at this point
snprintf(lcm->lcm_name, sizeof(lcm->lcm_name),
"lcm_%s", name);
- atomic_set(&lcm->lcm_count, 0);
- atomic_set(&lcm->lcm_refcount, 1);
- spin_lock_init(&lcm->lcm_lock);
+ cfs_atomic_set(&lcm->lcm_count, 0);
+ cfs_atomic_set(&lcm->lcm_refcount, 1);
+ cfs_spin_lock_init(&lcm->lcm_lock);
CFS_INIT_LIST_HEAD(&lcm->lcm_llcds);
rc = llog_recov_thread_start(lcm);
if (rc) {
OBD_FREE_PTR(lpca);
RETURN(-ENODEV);
}
- rc = cfs_kernel_thread(llog_cat_process_thread, lpca,
- CLONE_VM | CLONE_FILES);
+ rc = cfs_create_thread(llog_cat_process_thread, lpca, CFS_DAEMON_FLAGS);
if (rc < 0) {
CERROR("Error starting llog_cat_process_thread(): %d\n", rc);
OBD_FREE_PTR(lpca);
/*
* Start recovery in separate thread.
*/
- mutex_down(&ctxt->loc_sem);
+ cfs_mutex_down(&ctxt->loc_sem);
ctxt->loc_gen = *gen;
rc = llog_recov_thread_replay(ctxt, ctxt->llog_proc_cb, logid);
- mutex_up(&ctxt->loc_sem);
+ cfs_mutex_up(&ctxt->loc_sem);
RETURN(rc);
}
LASSERT(ctxt != NULL);
- mutex_down(&ctxt->loc_sem);
+ cfs_mutex_down(&ctxt->loc_sem);
if (!ctxt->loc_lcm) {
CDEBUG(D_RPCTRACE, "No lcm for ctxt %p\n", ctxt);
GOTO(out, rc = -ENODEV);
GOTO(out, rc = -ENODEV);
}
- if (test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags)) {
+ if (cfs_test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags)) {
CDEBUG(D_RPCTRACE, "Commit thread is stopping for ctxt %p\n",
ctxt);
GOTO(out, rc = -ENODEV);
* Allocation is successful, let's check for stop
* flag again to fall back as soon as possible.
*/
- if (test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags))
+ if (cfs_test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags))
GOTO(out, rc = -ENODEV);
}
* Allocation is successful, let's check for stop
* flag again to fall back as soon as possible.
*/
- if (test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags))
+ if (cfs_test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags))
GOTO(out, rc = -ENODEV);
}
out:
if (rc)
llcd_put(ctxt);
- mutex_up(&ctxt->loc_sem);
+ cfs_mutex_up(&ctxt->loc_sem);
return rc;
}
EXPORT_SYMBOL(llog_obd_repl_cancel);
/*
* Flush any remaining llcd.
*/
- mutex_down(&ctxt->loc_sem);
+ cfs_mutex_down(&ctxt->loc_sem);
if (exp && (ctxt->loc_imp == exp->exp_imp_reverse)) {
/*
* This is ost->mds connection, we can't be sure that mds
*/
CDEBUG(D_RPCTRACE, "Kill cached llcd\n");
llcd_put(ctxt);
- mutex_up(&ctxt->loc_sem);
+ cfs_mutex_up(&ctxt->loc_sem);
} else {
/*
* This is either llog_sync() from generic llog code or sync
* llcds to the target with waiting for completion.
*/
CDEBUG(D_RPCTRACE, "Sync cached llcd\n");
- mutex_up(&ctxt->loc_sem);
+ cfs_mutex_up(&ctxt->loc_sem);
rc = llog_cancel(ctxt, NULL, 0, NULL, OBD_LLOG_FL_SENDNOW);
}
RETURN(rc);
* In 2.6.22 cfs_mem_cache_destroy() will not return error
* for busy resources. Let's check it another way.
*/
- LASSERTF(atomic_read(&llcd_count) == 0,
+ LASSERTF(cfs_atomic_read(&llcd_count) == 0,
"Can't destroy llcd cache! Number of "
- "busy llcds: %d\n", atomic_read(&llcd_count));
+ "busy llcds: %d\n", cfs_atomic_read(&llcd_count));
cfs_mem_cache_destroy(llcd_cache);
llcd_cache = NULL;
}