* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*/
/*
#include <lustre_log.h>
#include "ptlrpc_internal.h"
-static atomic_t llcd_count = ATOMIC_INIT(0);
+static cfs_atomic_t llcd_count = CFS_ATOMIC_INIT(0);
static cfs_mem_cache_t *llcd_cache = NULL;
#ifdef __KERNEL__
LLOG_LCM_FL_EXIT = 1 << 1
};
+struct llcd_async_args {
+ struct llog_canceld_ctxt *la_ctxt;
+};
+
+static void llcd_print(struct llog_canceld_ctxt *llcd,
+ const char *func, int line)
+{
+ CDEBUG(D_RPCTRACE, "Llcd (%p) at %s:%d:\n", llcd, func, line);
+ CDEBUG(D_RPCTRACE, " size: %d\n", llcd->llcd_size);
+ CDEBUG(D_RPCTRACE, " ctxt: %p\n", llcd->llcd_ctxt);
+ CDEBUG(D_RPCTRACE, " lcm : %p\n", llcd->llcd_lcm);
+ CDEBUG(D_RPCTRACE, " cookiebytes : %d\n", llcd->llcd_cookiebytes);
+}
+
/**
* Allocate new llcd from cache, init it and return to caller.
* Bumps number of objects allocated.
*/
-static struct llog_canceld_ctxt *llcd_alloc(void)
+static struct llog_canceld_ctxt *llcd_alloc(struct llog_commit_master *lcm)
{
struct llog_canceld_ctxt *llcd;
- int llcd_size;
+ int size, overhead;
+
+ LASSERT(lcm != NULL);
/*
- * Payload of lustre_msg V2 is bigger.
+ * We want to send one page of cookies with rpc header. This buffer
+ * will be assigned later to the rpc, this is why we preserve the
+ * space for rpc header.
*/
- llcd_size = CFS_PAGE_SIZE -
- lustre_msg_size(LUSTRE_MSG_MAGIC_V2, 1, NULL);
- llcd_size += offsetof(struct llog_canceld_ctxt, llcd_cookies);
- OBD_SLAB_ALLOC(llcd, llcd_cache, CFS_ALLOC_STD, llcd_size);
+ size = CFS_PAGE_SIZE - lustre_msg_size(LUSTRE_MSG_MAGIC_V2, 1, NULL);
+ overhead = offsetof(struct llog_canceld_ctxt, llcd_cookies);
+ OBD_SLAB_ALLOC(llcd, llcd_cache, CFS_ALLOC_STD, size + overhead);
if (!llcd)
return NULL;
- llcd->llcd_size = llcd_size;
+ CFS_INIT_LIST_HEAD(&llcd->llcd_list);
llcd->llcd_cookiebytes = 0;
- atomic_inc(&llcd_count);
+ llcd->llcd_size = size;
+
+ cfs_spin_lock(&lcm->lcm_lock);
+ llcd->llcd_lcm = lcm;
+ cfs_atomic_inc(&lcm->lcm_count);
+ cfs_list_add_tail(&llcd->llcd_list, &lcm->lcm_llcds);
+ cfs_spin_unlock(&lcm->lcm_lock);
+ cfs_atomic_inc(&llcd_count);
+
+ CDEBUG(D_RPCTRACE, "Alloc llcd %p on lcm %p (%d)\n",
+ llcd, lcm, cfs_atomic_read(&lcm->lcm_count));
+
return llcd;
}
*/
static void llcd_free(struct llog_canceld_ctxt *llcd)
{
- LASSERT(atomic_read(&llcd_count) > 0);
- OBD_SLAB_FREE(llcd, llcd_cache, llcd->llcd_size);
- atomic_dec(&llcd_count);
-}
+ struct llog_commit_master *lcm = llcd->llcd_lcm;
+ int size;
+
+ if (lcm) {
+ if (cfs_atomic_read(&lcm->lcm_count) == 0) {
+ CERROR("Invalid llcd free %p\n", llcd);
+ llcd_print(llcd, __FUNCTION__, __LINE__);
+ LBUG();
+ }
+ cfs_spin_lock(&lcm->lcm_lock);
+ LASSERT(!cfs_list_empty(&llcd->llcd_list));
+ cfs_list_del_init(&llcd->llcd_list);
+ cfs_atomic_dec(&lcm->lcm_count);
+ cfs_spin_unlock(&lcm->lcm_lock);
+
+ CDEBUG(D_RPCTRACE, "Free llcd %p on lcm %p (%d)\n",
+ llcd, lcm, cfs_atomic_read(&lcm->lcm_count));
+ }
-/**
- * Copy passed @cookies to @llcd.
- */
-static void llcd_copy(struct llog_canceld_ctxt *llcd,
- struct llog_cookie *cookies)
-{
- memcpy((char *)llcd->llcd_cookies + llcd->llcd_cookiebytes,
- cookies, sizeof(*cookies));
- llcd->llcd_cookiebytes += sizeof(*cookies);
+ LASSERT(cfs_atomic_read(&llcd_count) > 0);
+ cfs_atomic_dec(&llcd_count);
+
+ size = offsetof(struct llog_canceld_ctxt, llcd_cookies) +
+ llcd->llcd_size;
+ OBD_SLAB_FREE(llcd, llcd_cache, size);
}
/**
* Checks if passed cookie fits into llcd free space buffer. Returns
* 1 if yes and 0 otherwise.
*/
-static int llcd_fit(struct llog_canceld_ctxt *llcd,
- struct llog_cookie *cookies)
+static inline int
+llcd_fit(struct llog_canceld_ctxt *llcd, struct llog_cookie *cookies)
{
- return (llcd->llcd_size -
- llcd->llcd_cookiebytes) >= sizeof(*cookies);
+ return (llcd->llcd_size - llcd->llcd_cookiebytes >= sizeof(*cookies));
}
-static void llcd_print(struct llog_canceld_ctxt *llcd,
- const char *func, int line)
+/**
+ * Copy passed @cookies to @llcd.
+ */
+static inline void
+llcd_copy(struct llog_canceld_ctxt *llcd, struct llog_cookie *cookies)
{
- CDEBUG(D_RPCTRACE, "Llcd (%p) at %s:%d:\n", llcd, func, line);
- CDEBUG(D_RPCTRACE, " size: %d\n", llcd->llcd_size);
- CDEBUG(D_RPCTRACE, " ctxt: %p\n", llcd->llcd_ctxt);
- CDEBUG(D_RPCTRACE, " lcm : %p\n", llcd->llcd_lcm);
- CDEBUG(D_RPCTRACE, " cookiebytes : %d\n", llcd->llcd_cookiebytes);
+ LASSERT(llcd_fit(llcd, cookies));
+ memcpy((char *)llcd->llcd_cookies + llcd->llcd_cookiebytes,
+ cookies, sizeof(*cookies));
+ llcd->llcd_cookiebytes += sizeof(*cookies);
}
/**
*/
static int
llcd_interpret(const struct lu_env *env,
- struct ptlrpc_request *req, void *noused, int rc)
+ struct ptlrpc_request *req, void *args, int rc)
{
- struct llog_canceld_ctxt *llcd = req->rq_async_args.pointer_arg[0];
- CDEBUG(D_RPCTRACE, "Sent llcd %p (%d)\n", llcd, rc);
+ struct llcd_async_args *la = args;
+ struct llog_canceld_ctxt *llcd = la->la_ctxt;
+
+ CDEBUG(D_RPCTRACE, "Sent llcd %p (%d) - killing it\n", llcd, rc);
llcd_free(llcd);
return 0;
}
char *bufs[2] = { NULL, (char *)llcd->llcd_cookies };
struct obd_import *import = NULL;
struct llog_commit_master *lcm;
+ struct llcd_async_args *la;
struct ptlrpc_request *req;
struct llog_ctxt *ctxt;
int rc;
* Check if we're in exit stage. Do not send llcd in
* this case.
*/
- if (test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags))
+ if (cfs_test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags))
GOTO(exit, rc = -ENODEV);
CDEBUG(D_RPCTRACE, "Sending llcd %p\n", llcd);
/* bug 5515 */
req->rq_request_portal = LDLM_CANCEL_REQUEST_PORTAL;
req->rq_reply_portal = LDLM_CANCEL_REPLY_PORTAL;
+
req->rq_interpret_reply = (ptlrpc_interpterer_t)llcd_interpret;
- req->rq_async_args.pointer_arg[0] = llcd;
+
+ CLASSERT(sizeof(*la) <= sizeof(req->rq_async_args));
+ la = ptlrpc_req_async_args(req);
+ la->la_ctxt = llcd;
+
+ /* llog cancels will be replayed after reconnect so this will do twice
+ * first from replay llog, second for resended rpc */
+ req->rq_no_delay = req->rq_no_resend = 1;
+
rc = ptlrpc_set_add_new_req(&lcm->lcm_pc, req);
if (rc) {
ptlrpc_request_free(req);
static int
llcd_attach(struct llog_ctxt *ctxt, struct llog_canceld_ctxt *llcd)
{
- struct llog_commit_master *lcm;
-
LASSERT(ctxt != NULL && llcd != NULL);
LASSERT_SEM_LOCKED(&ctxt->loc_sem);
LASSERT(ctxt->loc_llcd == NULL);
- lcm = ctxt->loc_lcm;
- atomic_inc(&lcm->lcm_count);
- CDEBUG(D_RPCTRACE, "Attach llcd %p to ctxt %p (%d)\n",
- llcd, ctxt, atomic_read(&lcm->lcm_count));
llcd->llcd_ctxt = llog_ctxt_get(ctxt);
- llcd->llcd_lcm = ctxt->loc_lcm;
ctxt->loc_llcd = llcd;
+
+ CDEBUG(D_RPCTRACE, "Attach llcd %p to ctxt %p\n",
+ llcd, ctxt);
+
return 0;
}
*/
static struct llog_canceld_ctxt *llcd_detach(struct llog_ctxt *ctxt)
{
- struct llog_commit_master *lcm;
struct llog_canceld_ctxt *llcd;
LASSERT(ctxt != NULL);
if (!llcd)
return NULL;
- lcm = ctxt->loc_lcm;
- if (atomic_read(&lcm->lcm_count) == 0) {
- CERROR("Invalid detach occured %p:%p\n", ctxt, llcd);
- llcd_print(llcd, __FUNCTION__, __LINE__);
- LBUG();
- }
- atomic_dec(&lcm->lcm_count);
- ctxt->loc_llcd = NULL;
-
- CDEBUG(D_RPCTRACE, "Detach llcd %p from ctxt %p (%d)\n",
- llcd, ctxt, atomic_read(&lcm->lcm_count));
+ CDEBUG(D_RPCTRACE, "Detach llcd %p from ctxt %p\n",
+ llcd, ctxt);
+ ctxt->loc_llcd = NULL;
llog_ctxt_put(ctxt);
return llcd;
}
static struct llog_canceld_ctxt *llcd_get(struct llog_ctxt *ctxt)
{
struct llog_canceld_ctxt *llcd;
-
- llcd = llcd_alloc();
+ LASSERT(ctxt);
+ llcd = llcd_alloc(ctxt->loc_lcm);
if (!llcd) {
- CERROR("Couldn't alloc an llcd for ctxt %p\n", ctxt);
+ CERROR("Can't alloc an llcd for ctxt %p\n", ctxt);
return NULL;
}
llcd_attach(ctxt, llcd);
*/
static void llcd_put(struct llog_ctxt *ctxt)
{
- struct llog_commit_master *lcm;
struct llog_canceld_ctxt *llcd;
- lcm = ctxt->loc_lcm;
llcd = llcd_detach(ctxt);
if (llcd)
llcd_free(llcd);
-
- if (atomic_read(&lcm->lcm_count) == 0)
- cfs_waitq_signal(&lcm->lcm_waitq);
}
/**
rc, lcm->lcm_name);
RETURN(rc);
}
- lcm->lcm_set = lcm->lcm_pc.pc_set;
RETURN(rc);
}
EXPORT_SYMBOL(llog_recov_thread_start);
*/
void llog_recov_thread_stop(struct llog_commit_master *lcm, int force)
{
- struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
ENTRY;
- /**
+ /*
* Let all know that we're stopping. This will also make
* llcd_send() refuse any new llcds.
*/
- set_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags);
+ cfs_set_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags);
- /**
+ /*
* Stop processing thread. No new rpcs will be accepted for
* for processing now.
*/
ptlrpcd_stop(&lcm->lcm_pc, force);
/*
- * Wait for llcd number == 0. Note, this is infinite wait.
- * All other parts should make sure that no lost llcd is left.
+ * By this point no alive inflight llcds should be left. Only
+ * those forgotten in sync may still be attached to ctxt. Let's
+ * print them.
*/
- l_wait_event(lcm->lcm_waitq,
- atomic_read(&lcm->lcm_count) == 0, &lwi);
+ if (cfs_atomic_read(&lcm->lcm_count) != 0) {
+ struct llog_canceld_ctxt *llcd;
+ cfs_list_t *tmp;
+
+ CERROR("Busy llcds found (%d) on lcm %p\n",
+ cfs_atomic_read(&lcm->lcm_count), lcm);
+
+ cfs_spin_lock(&lcm->lcm_lock);
+ cfs_list_for_each(tmp, &lcm->lcm_llcds) {
+ llcd = cfs_list_entry(tmp, struct llog_canceld_ctxt,
+ llcd_list);
+ llcd_print(llcd, __FUNCTION__, __LINE__);
+ }
+ cfs_spin_unlock(&lcm->lcm_lock);
+
+ /*
+ * No point to go further with busy llcds at this point
+ * as this is clear bug. It might mean we got hanging
+ * rpc which holds import ref and this means we will not
+ * be able to cleanup anyways.
+ *
+ * Or we just missed to kill them when they were not
+ * attached to ctxt. In this case our slab will remind
+ * us about this a bit later.
+ */
+ LBUG();
+ }
EXIT;
}
EXPORT_SYMBOL(llog_recov_thread_stop);
* Try to create threads with unique names.
*/
snprintf(lcm->lcm_name, sizeof(lcm->lcm_name),
- "ll_log_commit_%s", name);
+ "lcm_%s", name);
- strncpy(lcm->lcm_name, name, sizeof(lcm->lcm_name));
- cfs_waitq_init(&lcm->lcm_waitq);
- atomic_set(&lcm->lcm_count, 0);
+ cfs_atomic_set(&lcm->lcm_count, 0);
+ cfs_atomic_set(&lcm->lcm_refcount, 1);
+ cfs_spin_lock_init(&lcm->lcm_lock);
+ CFS_INIT_LIST_HEAD(&lcm->lcm_llcds);
rc = llog_recov_thread_start(lcm);
if (rc) {
CERROR("Can't start commit thread, rc %d\n", rc);
{
ENTRY;
llog_recov_thread_stop(lcm, force);
- OBD_FREE_PTR(lcm);
+ lcm_put(lcm);
EXIT;
}
EXPORT_SYMBOL(llog_recov_thread_fini);
OBD_FREE_PTR(lpca);
RETURN(-ENODEV);
}
- rc = cfs_kernel_thread(llog_cat_process_thread, lpca,
- CLONE_VM | CLONE_FILES);
+ rc = cfs_create_thread(llog_cat_process_thread, lpca, CFS_DAEMON_FLAGS);
if (rc < 0) {
CERROR("Error starting llog_cat_process_thread(): %d\n", rc);
OBD_FREE_PTR(lpca);
/*
* Start recovery in separate thread.
*/
- mutex_down(&ctxt->loc_sem);
+ cfs_mutex_down(&ctxt->loc_sem);
ctxt->loc_gen = *gen;
rc = llog_recov_thread_replay(ctxt, ctxt->llog_proc_cb, logid);
- mutex_up(&ctxt->loc_sem);
+ cfs_mutex_up(&ctxt->loc_sem);
RETURN(rc);
}
LASSERT(ctxt != NULL);
- mutex_down(&ctxt->loc_sem);
+ cfs_mutex_down(&ctxt->loc_sem);
+ if (!ctxt->loc_lcm) {
+ CDEBUG(D_RPCTRACE, "No lcm for ctxt %p\n", ctxt);
+ GOTO(out, rc = -ENODEV);
+ }
lcm = ctxt->loc_lcm;
+ CDEBUG(D_INFO, "cancel on lsm %p\n", lcm);
/*
* Let's check if we have all structures alive. We also check for
GOTO(out, rc = -ENODEV);
}
- if (ctxt->loc_obd->obd_stopping) {
- CDEBUG(D_RPCTRACE, "Obd is stopping for ctxt %p\n", ctxt);
- GOTO(out, rc = -ENODEV);
- }
-
- if (test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags)) {
+ if (cfs_test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags)) {
CDEBUG(D_RPCTRACE, "Commit thread is stopping for ctxt %p\n",
ctxt);
GOTO(out, rc = -ENODEV);
* Allocation is successful, let's check for stop
* flag again to fall back as soon as possible.
*/
- if (test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags))
+ if (cfs_test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags))
GOTO(out, rc = -ENODEV);
}
* Allocation is successful, let's check for stop
* flag again to fall back as soon as possible.
*/
- if (test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags))
+ if (cfs_test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags))
GOTO(out, rc = -ENODEV);
}
/*
- * Copy cookies to @llcd, no matter old or new allocated one.
+ * Copy cookies to @llcd, no matter old or new allocated
+ * one.
*/
llcd_copy(llcd, cookies);
}
/*
- * Let's check if we need to send copied @cookies asap. If yes - do it.
+ * Let's check if we need to send copied @cookies asap. If yes
+ * then do it.
*/
if (llcd && (flags & OBD_LLOG_FL_SENDNOW)) {
+ CDEBUG(D_RPCTRACE, "Sync llcd %p\n", llcd);
rc = llcd_push(ctxt);
if (rc)
GOTO(out, rc);
out:
if (rc)
llcd_put(ctxt);
- mutex_up(&ctxt->loc_sem);
+ cfs_mutex_up(&ctxt->loc_sem);
return rc;
}
EXPORT_SYMBOL(llog_obd_repl_cancel);
int rc = 0;
ENTRY;
- mutex_down(&ctxt->loc_sem);
+ /*
+ * Flush any remaining llcd.
+ */
+ cfs_mutex_down(&ctxt->loc_sem);
if (exp && (ctxt->loc_imp == exp->exp_imp_reverse)) {
- CDEBUG(D_RPCTRACE, "Reverse import disconnect\n");
/*
- * Check for llcd which might be left attached to @ctxt.
- * Let's kill it.
+ * This is ost->mds connection, we can't be sure that mds
+ * can still receive cookies, let's killed the cached llcd.
*/
+ CDEBUG(D_RPCTRACE, "Kill cached llcd\n");
llcd_put(ctxt);
- mutex_up(&ctxt->loc_sem);
+ cfs_mutex_up(&ctxt->loc_sem);
} else {
- mutex_up(&ctxt->loc_sem);
+ /*
+ * This is either llog_sync() from generic llog code or sync
+ * on client disconnect. In either way let's do it and send
+ * llcds to the target with waiting for completion.
+ */
+ CDEBUG(D_RPCTRACE, "Sync cached llcd\n");
+ cfs_mutex_up(&ctxt->loc_sem);
rc = llog_cancel(ctxt, NULL, 0, NULL, OBD_LLOG_FL_SENDNOW);
}
RETURN(rc);
* In 2.6.22 cfs_mem_cache_destroy() will not return error
* for busy resources. Let's check it another way.
*/
- LASSERTF(atomic_read(&llcd_count) == 0,
+ LASSERTF(cfs_atomic_read(&llcd_count) == 0,
"Can't destroy llcd cache! Number of "
- "busy llcds: %d\n", atomic_read(&llcd_count));
+ "busy llcds: %d\n", cfs_atomic_read(&llcd_count));
cfs_mem_cache_destroy(llcd_cache);
llcd_cache = NULL;
}