LLOG_LCM_FL_EXIT = 1 << 1
};
-static void llcd_print(struct llog_canceld_ctxt *llcd,
- const char *func, int line)
+static void llcd_print(struct llog_canceld_ctxt *llcd,
+ const char *func, int line)
{
CDEBUG(D_RPCTRACE, "Llcd (%p) at %s:%d:\n", llcd, func, line);
CDEBUG(D_RPCTRACE, " size: %d\n", llcd->llcd_size);
atomic_dec(&lcm->lcm_count);
spin_unlock(&lcm->lcm_lock);
- CDEBUG(D_RPCTRACE, "Free llcd %p on lcm %p (%d)\n",
+ CDEBUG(D_RPCTRACE, "Free llcd %p on lcm %p (%d)\n",
llcd, lcm, atomic_read(&lcm->lcm_count));
}
LASSERT(atomic_read(&llcd_count) > 0);
atomic_dec(&llcd_count);
- size = offsetof(struct llog_canceld_ctxt, llcd_cookies) +
+ size = offsetof(struct llog_canceld_ctxt, llcd_cookies) +
llcd->llcd_size;
OBD_SLAB_FREE(llcd, llcd_cache, size);
}
* Checks if passed cookie fits into llcd free space buffer. Returns
* 1 if yes and 0 otherwise.
*/
-static inline int
+static inline int
llcd_fit(struct llog_canceld_ctxt *llcd, struct llog_cookie *cookies)
{
return (llcd->llcd_size - llcd->llcd_cookiebytes >= sizeof(*cookies));
/**
* Copy passed @cookies to @llcd.
*/
-static inline void
+static inline void
llcd_copy(struct llog_canceld_ctxt *llcd, struct llog_cookie *cookies)
{
LASSERT(llcd_fit(llcd, cookies));
- memcpy((char *)llcd->llcd_cookies + llcd->llcd_cookiebytes,
+ memcpy((char *)llcd->llcd_cookies + llcd->llcd_cookiebytes,
cookies, sizeof(*cookies));
llcd->llcd_cookiebytes += sizeof(*cookies);
}
req->rq_reply_portal = LDLM_CANCEL_REPLY_PORTAL;
req->rq_interpret_reply = (ptlrpc_interpterer_t)llcd_interpret;
req->rq_async_args.pointer_arg[0] = llcd;
+
+ /* llog cancels will be replayed after reconnect so this will do twice
+ * first from replay llog, second for resended rpc */
+ req->rq_no_delay = req->rq_no_resend = 1;
+
rc = ptlrpc_set_add_new_req(&lcm->lcm_pc, req);
if (rc) {
ptlrpc_request_free(req);
if (!llcd)
return NULL;
- CDEBUG(D_RPCTRACE, "Detach llcd %p from ctxt %p\n",
+ CDEBUG(D_RPCTRACE, "Detach llcd %p from ctxt %p\n",
llcd, ctxt);
ctxt->loc_llcd = NULL;
static struct llog_canceld_ctxt *llcd_get(struct llog_ctxt *ctxt)
{
struct llog_canceld_ctxt *llcd;
-
+ LASSERT(ctxt);
llcd = llcd_alloc(ctxt->loc_lcm);
if (!llcd) {
CERROR("Can't alloc an llcd for ctxt %p\n", ctxt);
struct llog_canceld_ctxt *llcd;
struct list_head *tmp;
- CERROR("Busy llcds found (%d) on lcm %p\n",
- atomic_read(&lcm->lcm_count) == 0, lcm);
+ CERROR("Busy llcds found (%d) on lcm %p\n",
+ atomic_read(&lcm->lcm_count), lcm);
spin_lock(&lcm->lcm_lock);
list_for_each(tmp, &lcm->lcm_llcds) {
llcd_print(llcd, __FUNCTION__, __LINE__);
}
spin_unlock(&lcm->lcm_lock);
-
+
/*
* No point to go further with busy llcds at this point
* as this is clear bug. It might mean we got hanging
* Try to create threads with unique names.
*/
snprintf(lcm->lcm_name, sizeof(lcm->lcm_name),
- "ll_log_commit_%s", name);
+ "lcm_%s", name);
- strncpy(lcm->lcm_name, name, sizeof(lcm->lcm_name));
atomic_set(&lcm->lcm_count, 0);
+ atomic_set(&lcm->lcm_refcount, 1);
spin_lock_init(&lcm->lcm_lock);
CFS_INIT_LIST_HEAD(&lcm->lcm_llcds);
rc = llog_recov_thread_start(lcm);
{
ENTRY;
llog_recov_thread_stop(lcm, force);
- OBD_FREE_PTR(lcm);
+ lcm_put(lcm);
EXIT;
}
EXPORT_SYMBOL(llog_recov_thread_fini);
LASSERT(ctxt != NULL);
mutex_down(&ctxt->loc_sem);
+ if (!ctxt->loc_lcm) {
+ CDEBUG(D_RPCTRACE, "No lcm for ctxt %p\n", ctxt);
+ GOTO(out, rc = -ENODEV);
+ }
lcm = ctxt->loc_lcm;
+ CDEBUG(D_INFO, "cancel on lsm %p\n", lcm);
/*
* Let's check if we have all structures alive. We also check for
int rc = 0;
ENTRY;
- /*
- * Flush any remaining llcd.
+ /*
+ * Flush any remaining llcd.
*/
mutex_down(&ctxt->loc_sem);
if (exp && (ctxt->loc_imp == exp->exp_imp_reverse)) {
llcd_put(ctxt);
mutex_up(&ctxt->loc_sem);
} else {
- /*
+ /*
* This is either llog_sync() from generic llog code or sync
* on client disconnect. In either way let's do it and send
- * llcds to the target with waiting for completion.
+ * llcds to the target with waiting for completion.
*/
CDEBUG(D_RPCTRACE, "Sync cached llcd\n");
mutex_up(&ctxt->loc_sem);