X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fptlrpc%2Frecov_thread.c;h=e1d8eead2ab3cdbe012472d0566a2edd8086584e;hb=71d68757c76cb01ba4d9afa09c766fe48dce369c;hp=01918db0e17fc9fc4452de471b6f5ab92acbfdfc;hpb=2d594879dd55133ee0403c3134b4ef2397615897;p=fs%2Flustre-release.git diff --git a/lustre/ptlrpc/recov_thread.c b/lustre/ptlrpc/recov_thread.c index 01918db..e1d8eea 100644 --- a/lustre/ptlrpc/recov_thread.c +++ b/lustre/ptlrpc/recov_thread.c @@ -1,6 +1,4 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -26,8 +24,10 @@ * GPL HEADER END */ /* - * Copyright 2008 Sun Microsystems, Inc. All rights reserved + * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. + * + * Copyright (c) 2011, Whamcloud, Inc. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -47,10 +47,6 @@ #define DEBUG_SUBSYSTEM S_LOG -#ifndef EXPORT_SYMTAB -# define EXPORT_SYMTAB -#endif - #ifdef __KERNEL__ # include #else @@ -67,7 +63,7 @@ #include #include "ptlrpc_internal.h" -static atomic_t llcd_count = ATOMIC_INIT(0); +static cfs_atomic_t llcd_count = CFS_ATOMIC_INIT(0); static cfs_mem_cache_t *llcd_cache = NULL; #ifdef __KERNEL__ @@ -76,28 +72,56 @@ enum { LLOG_LCM_FL_EXIT = 1 << 1 }; -/** +struct llcd_async_args { + struct llog_canceld_ctxt *la_ctxt; +}; + +static void llcd_print(struct llog_canceld_ctxt *llcd, + const char *func, int line) +{ + CDEBUG(D_RPCTRACE, "Llcd (%p) at %s:%d:\n", llcd, func, line); + CDEBUG(D_RPCTRACE, " size: %d\n", llcd->llcd_size); + CDEBUG(D_RPCTRACE, " ctxt: %p\n", llcd->llcd_ctxt); + CDEBUG(D_RPCTRACE, " lcm : %p\n", llcd->llcd_lcm); + CDEBUG(D_RPCTRACE, " cookiebytes : %d\n", llcd->llcd_cookiebytes); +} + +/** * Allocate new llcd from cache, init it and return to caller. * Bumps number of objects allocated. */ -static struct llog_canceld_ctxt *llcd_alloc(void) +static struct llog_canceld_ctxt *llcd_alloc(struct llog_commit_master *lcm) { struct llog_canceld_ctxt *llcd; - int llcd_size; + int size, overhead; + + LASSERT(lcm != NULL); - /* - * Payload of lustre_msg V2 is bigger. + /* + * We want to send one page of cookies with rpc header. This buffer + * will be assigned later to the rpc, this is why we preserve the + * space for rpc header. */ - llcd_size = CFS_PAGE_SIZE - - lustre_msg_size(LUSTRE_MSG_MAGIC_V2, 1, NULL); - llcd_size += offsetof(struct llog_canceld_ctxt, llcd_cookies); - OBD_SLAB_ALLOC(llcd, llcd_cache, CFS_ALLOC_STD, llcd_size); + size = CFS_PAGE_SIZE - lustre_msg_size(LUSTRE_MSG_MAGIC_V2, 1, NULL); + overhead = offsetof(struct llog_canceld_ctxt, llcd_cookies); + OBD_SLAB_ALLOC_GFP(llcd, llcd_cache, size + overhead, CFS_ALLOC_STD); if (!llcd) return NULL; - llcd->llcd_size = llcd_size; + CFS_INIT_LIST_HEAD(&llcd->llcd_list); llcd->llcd_cookiebytes = 0; - atomic_inc(&llcd_count); + llcd->llcd_size = size; + + cfs_spin_lock(&lcm->lcm_lock); + llcd->llcd_lcm = lcm; + cfs_atomic_inc(&lcm->lcm_count); + cfs_list_add_tail(&llcd->llcd_list, &lcm->lcm_llcds); + cfs_spin_unlock(&lcm->lcm_lock); + cfs_atomic_inc(&llcd_count); + + CDEBUG(D_RPCTRACE, "Alloc llcd %p on lcm %p (%d)\n", + llcd, lcm, cfs_atomic_read(&lcm->lcm_count)); + return llcd; } @@ -106,41 +130,53 @@ static struct llog_canceld_ctxt *llcd_alloc(void) */ static void llcd_free(struct llog_canceld_ctxt *llcd) { - LASSERT(atomic_read(&llcd_count) > 0); - OBD_SLAB_FREE(llcd, llcd_cache, llcd->llcd_size); - atomic_dec(&llcd_count); -} + struct llog_commit_master *lcm = llcd->llcd_lcm; + int size; + + if (lcm) { + if (cfs_atomic_read(&lcm->lcm_count) == 0) { + CERROR("Invalid llcd free %p\n", llcd); + llcd_print(llcd, __FUNCTION__, __LINE__); + LBUG(); + } + cfs_spin_lock(&lcm->lcm_lock); + LASSERT(!cfs_list_empty(&llcd->llcd_list)); + cfs_list_del_init(&llcd->llcd_list); + cfs_atomic_dec(&lcm->lcm_count); + cfs_spin_unlock(&lcm->lcm_lock); + + CDEBUG(D_RPCTRACE, "Free llcd %p on lcm %p (%d)\n", + llcd, lcm, cfs_atomic_read(&lcm->lcm_count)); + } -/** - * Copy passed @cookies to @llcd. - */ -static void llcd_copy(struct llog_canceld_ctxt *llcd, - struct llog_cookie *cookies) -{ - memcpy((char *)llcd->llcd_cookies + llcd->llcd_cookiebytes, - cookies, sizeof(*cookies)); - llcd->llcd_cookiebytes += sizeof(*cookies); + LASSERT(cfs_atomic_read(&llcd_count) > 0); + cfs_atomic_dec(&llcd_count); + + size = offsetof(struct llog_canceld_ctxt, llcd_cookies) + + llcd->llcd_size; + OBD_SLAB_FREE(llcd, llcd_cache, size); } /** * Checks if passed cookie fits into llcd free space buffer. Returns * 1 if yes and 0 otherwise. */ -static int llcd_fit(struct llog_canceld_ctxt *llcd, - struct llog_cookie *cookies) +static inline int +llcd_fit(struct llog_canceld_ctxt *llcd, struct llog_cookie *cookies) { - return (llcd->llcd_size - - llcd->llcd_cookiebytes) >= sizeof(*cookies); + return (llcd->llcd_size - llcd->llcd_cookiebytes >= sizeof(*cookies)); } -static void llcd_print(struct llog_canceld_ctxt *llcd, - const char *func, int line) +/** + * Copy passed @cookies to @llcd. + */ +static inline void +llcd_copy(struct llog_canceld_ctxt *llcd, struct llog_cookie *cookies) { - CDEBUG(D_RPCTRACE, "Llcd (%p) at %s:%d:\n", llcd, func, line); - CDEBUG(D_RPCTRACE, " size: %d\n", llcd->llcd_size); - CDEBUG(D_RPCTRACE, " ctxt: %p\n", llcd->llcd_ctxt); - CDEBUG(D_RPCTRACE, " lcm : %p\n", llcd->llcd_lcm); - CDEBUG(D_RPCTRACE, " cookiebytes : %d\n", llcd->llcd_cookiebytes); + LASSERT(llcd_fit(llcd, cookies)); + memcpy((char *)llcd->llcd_cookies + llcd->llcd_cookiebytes, + cookies, sizeof(*cookies)); + llcd->llcd_cookiebytes += sizeof(*cookies); } /** @@ -148,19 +184,21 @@ static void llcd_print(struct llog_canceld_ctxt *llcd, * sending result. Error is passed in @rc. Note, that this will be called * in cleanup time when all inflight rpcs aborted. */ -static int +static int llcd_interpret(const struct lu_env *env, - struct ptlrpc_request *req, void *noused, int rc) + struct ptlrpc_request *req, void *args, int rc) { - struct llog_canceld_ctxt *llcd = req->rq_async_args.pointer_arg[0]; - CDEBUG(D_RPCTRACE, "Sent llcd %p (%d)\n", llcd, rc); + struct llcd_async_args *la = args; + struct llog_canceld_ctxt *llcd = la->la_ctxt; + + CDEBUG(D_RPCTRACE, "Sent llcd %p (%d) - killing it\n", llcd, rc); llcd_free(llcd); return 0; } - + /** * Send @llcd to remote node. Free llcd uppon completion or error. Sending - * is performed in async style so this function will return asap without + * is performed in async style so this function will return asap without * blocking. */ static int llcd_send(struct llog_canceld_ctxt *llcd) @@ -168,6 +206,7 @@ static int llcd_send(struct llog_canceld_ctxt *llcd) char *bufs[2] = { NULL, (char *)llcd->llcd_cookies }; struct obd_import *import = NULL; struct llog_commit_master *lcm; + struct llcd_async_args *la; struct ptlrpc_request *req; struct llog_ctxt *ctxt; int rc; @@ -175,31 +214,31 @@ static int llcd_send(struct llog_canceld_ctxt *llcd) ctxt = llcd->llcd_ctxt; if (!ctxt) { - CERROR("Invalid llcd with NULL ctxt found (%p)\n", + CERROR("Invalid llcd with NULL ctxt found (%p)\n", llcd); llcd_print(llcd, __FUNCTION__, __LINE__); LBUG(); } - LASSERT_SEM_LOCKED(&ctxt->loc_sem); + LASSERT_MUTEX_LOCKED(&ctxt->loc_mutex); if (llcd->llcd_cookiebytes == 0) GOTO(exit, rc = 0); lcm = llcd->llcd_lcm; - - /* + + /* * Check if we're in exit stage. Do not send llcd in - * this case. + * this case. */ - if (test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags)) + if (cfs_test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags)) GOTO(exit, rc = -ENODEV); CDEBUG(D_RPCTRACE, "Sending llcd %p\n", llcd); import = llcd->llcd_ctxt->loc_imp; - if (!import || (import == LP_POISON) || + if (!import || (import == LP_POISON) || (import->imp_client == LP_POISON)) { - CERROR("Invalid import %p for llcd %p\n", + CERROR("Invalid import %p for llcd %p\n", import, llcd); GOTO(exit, rc = -ENODEV); } @@ -207,12 +246,12 @@ static int llcd_send(struct llog_canceld_ctxt *llcd) OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_DELAY_RECOV, 10); /* - * No need to get import here as it is already done in + * No need to get import here as it is already done in * llog_receptor_accept(). */ req = ptlrpc_request_alloc(import, &RQF_LOG_CANCEL); if (req == NULL) { - CERROR("Can't allocate request for sending llcd %p\n", + CERROR("Can't allocate request for sending llcd %p\n", llcd); GOTO(exit, rc = -ENOMEM); } @@ -232,13 +271,18 @@ static int llcd_send(struct llog_canceld_ctxt *llcd) /* bug 5515 */ req->rq_request_portal = LDLM_CANCEL_REQUEST_PORTAL; req->rq_reply_portal = LDLM_CANCEL_REPLY_PORTAL; + req->rq_interpret_reply = (ptlrpc_interpterer_t)llcd_interpret; - req->rq_async_args.pointer_arg[0] = llcd; - rc = ptlrpc_set_add_new_req(&lcm->lcm_pc, req); - if (rc) { - ptlrpc_request_free(req); - GOTO(exit, rc); - } + + CLASSERT(sizeof(*la) <= sizeof(req->rq_async_args)); + la = ptlrpc_req_async_args(req); + la->la_ctxt = llcd; + + /* llog cancels will be replayed after reconnect so this will do twice + * first from replay llog, second for resended rpc */ + req->rq_no_delay = req->rq_no_resend = 1; + + ptlrpc_set_add_new_req(&lcm->lcm_pc, req); RETURN(0); exit: CDEBUG(D_RPCTRACE, "Refused llcd %p\n", llcd); @@ -253,18 +297,15 @@ exit: static int llcd_attach(struct llog_ctxt *ctxt, struct llog_canceld_ctxt *llcd) { - struct llog_commit_master *lcm; - LASSERT(ctxt != NULL && llcd != NULL); - LASSERT_SEM_LOCKED(&ctxt->loc_sem); + LASSERT_MUTEX_LOCKED(&ctxt->loc_mutex); LASSERT(ctxt->loc_llcd == NULL); - lcm = ctxt->loc_lcm; - atomic_inc(&lcm->lcm_count); - CDEBUG(D_RPCTRACE, "Attach llcd %p to ctxt %p (%d)\n", - llcd, ctxt, atomic_read(&lcm->lcm_count)); llcd->llcd_ctxt = llog_ctxt_get(ctxt); - llcd->llcd_lcm = ctxt->loc_lcm; ctxt->loc_llcd = llcd; + + CDEBUG(D_RPCTRACE, "Attach llcd %p to ctxt %p\n", + llcd, ctxt); + return 0; } @@ -274,28 +315,19 @@ llcd_attach(struct llog_ctxt *ctxt, struct llog_canceld_ctxt *llcd) */ static struct llog_canceld_ctxt *llcd_detach(struct llog_ctxt *ctxt) { - struct llog_commit_master *lcm; struct llog_canceld_ctxt *llcd; LASSERT(ctxt != NULL); - LASSERT_SEM_LOCKED(&ctxt->loc_sem); + LASSERT_MUTEX_LOCKED(&ctxt->loc_mutex); llcd = ctxt->loc_llcd; if (!llcd) return NULL; - lcm = ctxt->loc_lcm; - if (atomic_read(&lcm->lcm_count) == 0) { - CERROR("Invalid detach occured %p:%p\n", ctxt, llcd); - llcd_print(llcd, __FUNCTION__, __LINE__); - LBUG(); - } - atomic_dec(&lcm->lcm_count); - ctxt->loc_llcd = NULL; - - CDEBUG(D_RPCTRACE, "Detach llcd %p from ctxt %p (%d)\n", - llcd, ctxt, atomic_read(&lcm->lcm_count)); + CDEBUG(D_RPCTRACE, "Detach llcd %p from ctxt %p\n", + llcd, ctxt); + ctxt->loc_llcd = NULL; llog_ctxt_put(ctxt); return llcd; } @@ -307,10 +339,10 @@ static struct llog_canceld_ctxt *llcd_detach(struct llog_ctxt *ctxt) static struct llog_canceld_ctxt *llcd_get(struct llog_ctxt *ctxt) { struct llog_canceld_ctxt *llcd; - - llcd = llcd_alloc(); + LASSERT(ctxt); + llcd = llcd_alloc(ctxt->loc_lcm); if (!llcd) { - CERROR("Couldn't alloc an llcd for ctxt %p\n", ctxt); + CERROR("Can't alloc an llcd for ctxt %p\n", ctxt); return NULL; } llcd_attach(ctxt, llcd); @@ -322,16 +354,11 @@ static struct llog_canceld_ctxt *llcd_get(struct llog_ctxt *ctxt) */ static void llcd_put(struct llog_ctxt *ctxt) { - struct llog_commit_master *lcm; struct llog_canceld_ctxt *llcd; - lcm = ctxt->loc_lcm; llcd = llcd_detach(ctxt); if (llcd) llcd_free(llcd); - - if (atomic_read(&lcm->lcm_count) == 0) - cfs_waitq_signal(&lcm->lcm_waitq); } /** @@ -344,7 +371,7 @@ static int llcd_push(struct llog_ctxt *ctxt) int rc; /* - * Make sure that this llcd will not be sent again as we detach + * Make sure that this llcd will not be sent again as we detach * it from ctxt. */ llcd = llcd_detach(ctxt); @@ -353,7 +380,7 @@ static int llcd_push(struct llog_ctxt *ctxt) llcd_print(llcd, __FUNCTION__, __LINE__); LBUG(); } - + rc = llcd_send(llcd); if (rc) CERROR("Couldn't send llcd %p (%d)\n", llcd, rc); @@ -370,13 +397,12 @@ int llog_recov_thread_start(struct llog_commit_master *lcm) int rc; ENTRY; - rc = ptlrpcd_start(lcm->lcm_name, &lcm->lcm_pc); + rc = ptlrpcd_start(-1, 1, lcm->lcm_name, &lcm->lcm_pc); if (rc) { - CERROR("Error %d while starting recovery thread %s\n", + CERROR("Error %d while starting recovery thread %s\n", rc, lcm->lcm_name); RETURN(rc); } - lcm->lcm_set = lcm->lcm_pc.pc_set; RETURN(rc); } EXPORT_SYMBOL(llog_recov_thread_start); @@ -386,27 +412,52 @@ EXPORT_SYMBOL(llog_recov_thread_start); */ void llog_recov_thread_stop(struct llog_commit_master *lcm, int force) { - struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL); ENTRY; - /** - * Let all know that we're stopping. This will also make + /* + * Let all know that we're stopping. This will also make * llcd_send() refuse any new llcds. */ - set_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags); + cfs_set_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags); - /** + /* * Stop processing thread. No new rpcs will be accepted for * for processing now. */ ptlrpcd_stop(&lcm->lcm_pc, force); /* - * Wait for llcd number == 0. Note, this is infinite wait. - * All other parts should make sure that no lost llcd is left. + * By this point no alive inflight llcds should be left. Only + * those forgotten in sync may still be attached to ctxt. Let's + * print them. */ - l_wait_event(lcm->lcm_waitq, - atomic_read(&lcm->lcm_count) == 0, &lwi); + if (cfs_atomic_read(&lcm->lcm_count) != 0) { + struct llog_canceld_ctxt *llcd; + cfs_list_t *tmp; + + CERROR("Busy llcds found (%d) on lcm %p\n", + cfs_atomic_read(&lcm->lcm_count), lcm); + + cfs_spin_lock(&lcm->lcm_lock); + cfs_list_for_each(tmp, &lcm->lcm_llcds) { + llcd = cfs_list_entry(tmp, struct llog_canceld_ctxt, + llcd_list); + llcd_print(llcd, __FUNCTION__, __LINE__); + } + cfs_spin_unlock(&lcm->lcm_lock); + + /* + * No point to go further with busy llcds at this point + * as this is clear bug. It might mean we got hanging + * rpc which holds import ref and this means we will not + * be able to cleanup anyways. + * + * Or we just missed to kill them when they were not + * attached to ctxt. In this case our slab will remind + * us about this a bit later. + */ + LBUG(); + } EXIT; } EXPORT_SYMBOL(llog_recov_thread_stop); @@ -427,12 +478,13 @@ struct llog_commit_master *llog_recov_thread_init(char *name) /* * Try to create threads with unique names. */ - snprintf(lcm->lcm_name, sizeof(lcm->lcm_name), - "ll_log_commit_%s", name); + snprintf(lcm->lcm_name, sizeof(lcm->lcm_name), + "lcm_%s", name); - strncpy(lcm->lcm_name, name, sizeof(lcm->lcm_name)); - cfs_waitq_init(&lcm->lcm_waitq); - atomic_set(&lcm->lcm_count, 0); + cfs_atomic_set(&lcm->lcm_count, 0); + cfs_atomic_set(&lcm->lcm_refcount, 1); + cfs_spin_lock_init(&lcm->lcm_lock); + CFS_INIT_LIST_HEAD(&lcm->lcm_llcds); rc = llog_recov_thread_start(lcm); if (rc) { CERROR("Can't start commit thread, rc %d\n", rc); @@ -452,12 +504,12 @@ void llog_recov_thread_fini(struct llog_commit_master *lcm, int force) { ENTRY; llog_recov_thread_stop(lcm, force); - OBD_FREE_PTR(lcm); + lcm_put(lcm); EXIT; } EXPORT_SYMBOL(llog_recov_thread_fini); -static int llog_recov_thread_replay(struct llog_ctxt *ctxt, +static int llog_recov_thread_replay(struct llog_ctxt *ctxt, void *cb, void *arg) { struct obd_device *obd = ctxt->loc_obd; @@ -486,8 +538,7 @@ static int llog_recov_thread_replay(struct llog_ctxt *ctxt, OBD_FREE_PTR(lpca); RETURN(-ENODEV); } - rc = cfs_kernel_thread(llog_cat_process_thread, lpca, - CLONE_VM | CLONE_FILES); + rc = cfs_create_thread(llog_cat_process_thread, lpca, CFS_DAEMON_FLAGS); if (rc < 0) { CERROR("Error starting llog_cat_process_thread(): %d\n", rc); OBD_FREE_PTR(lpca); @@ -507,25 +558,25 @@ int llog_obd_repl_connect(struct llog_ctxt *ctxt, int rc; ENTRY; - /* + /* * Send back cached llcd from llog before recovery if we have any. * This is void is nothing cached is found there. */ llog_sync(ctxt, NULL); - /* - * Start recovery in separate thread. + /* + * Start recovery in separate thread. */ - mutex_down(&ctxt->loc_sem); + cfs_mutex_lock(&ctxt->loc_mutex); ctxt->loc_gen = *gen; rc = llog_recov_thread_replay(ctxt, ctxt->llog_proc_cb, logid); - mutex_up(&ctxt->loc_sem); + cfs_mutex_unlock(&ctxt->loc_mutex); RETURN(rc); } EXPORT_SYMBOL(llog_obd_repl_connect); -/** +/** * Deleted objects have a commit callback that cancels the MDS * log record for the deletion. The commit callback calls this * function. @@ -541,9 +592,14 @@ int llog_obd_repl_cancel(struct llog_ctxt *ctxt, LASSERT(ctxt != NULL); - mutex_down(&ctxt->loc_sem); + cfs_mutex_lock(&ctxt->loc_mutex); + if (!ctxt->loc_lcm) { + CDEBUG(D_RPCTRACE, "No lcm for ctxt %p\n", ctxt); + GOTO(out, rc = -ENODEV); + } lcm = ctxt->loc_lcm; - + CDEBUG(D_INFO, "cancel on lsm %p\n", lcm); + /* * Let's check if we have all structures alive. We also check for * possible shutdown. Do nothing if we're stopping. @@ -553,13 +609,8 @@ int llog_obd_repl_cancel(struct llog_ctxt *ctxt, GOTO(out, rc = -ENODEV); } - if (ctxt->loc_obd->obd_stopping) { - CDEBUG(D_RPCTRACE, "Obd is stopping for ctxt %p\n", ctxt); - GOTO(out, rc = -ENODEV); - } - - if (test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags)) { - CDEBUG(D_RPCTRACE, "Commit thread is stopping for ctxt %p\n", + if (cfs_test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags)) { + CDEBUG(D_RPCTRACE, "Commit thread is stopping for ctxt %p\n", ctxt); GOTO(out, rc = -ENODEV); } @@ -568,7 +619,7 @@ int llog_obd_repl_cancel(struct llog_ctxt *ctxt, if (count > 0 && cookies != NULL) { /* - * Get new llcd from ctxt if required. + * Get new llcd from ctxt if required. */ if (!llcd) { llcd = llcd_get(ctxt); @@ -578,13 +629,13 @@ int llog_obd_repl_cancel(struct llog_ctxt *ctxt, * Allocation is successful, let's check for stop * flag again to fall back as soon as possible. */ - if (test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags)) + if (cfs_test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags)) GOTO(out, rc = -ENODEV); } /* - * Llcd does not have enough room for @cookies. Let's push - * it out and allocate new one. + * Llcd does not have enough room for @cookies. Let's push + * it out and allocate new one. */ if (!llcd_fit(llcd, cookies)) { rc = llcd_push(ctxt); @@ -597,20 +648,23 @@ int llog_obd_repl_cancel(struct llog_ctxt *ctxt, * Allocation is successful, let's check for stop * flag again to fall back as soon as possible. */ - if (test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags)) + if (cfs_test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags)) GOTO(out, rc = -ENODEV); } /* - * Copy cookies to @llcd, no matter old or new allocated one. + * Copy cookies to @llcd, no matter old or new allocated + * one. */ llcd_copy(llcd, cookies); } /* - * Let's check if we need to send copied @cookies asap. If yes - do it. + * Let's check if we need to send copied @cookies asap. If yes + * then do it. */ if (llcd && (flags & OBD_LLOG_FL_SENDNOW)) { + CDEBUG(D_RPCTRACE, "Sync llcd %p\n", llcd); rc = llcd_push(ctxt); if (rc) GOTO(out, rc); @@ -619,7 +673,7 @@ int llog_obd_repl_cancel(struct llog_ctxt *ctxt, out: if (rc) llcd_put(ctxt); - mutex_up(&ctxt->loc_sem); + cfs_mutex_unlock(&ctxt->loc_mutex); return rc; } EXPORT_SYMBOL(llog_obd_repl_cancel); @@ -629,17 +683,26 @@ int llog_obd_repl_sync(struct llog_ctxt *ctxt, struct obd_export *exp) int rc = 0; ENTRY; - mutex_down(&ctxt->loc_sem); + /* + * Flush any remaining llcd. + */ + cfs_mutex_lock(&ctxt->loc_mutex); if (exp && (ctxt->loc_imp == exp->exp_imp_reverse)) { - CDEBUG(D_RPCTRACE, "Reverse import disconnect\n"); /* - * Check for llcd which might be left attached to @ctxt. - * Let's kill it. + * This is ost->mds connection, we can't be sure that mds + * can still receive cookies, let's killed the cached llcd. */ + CDEBUG(D_RPCTRACE, "Kill cached llcd\n"); llcd_put(ctxt); - mutex_up(&ctxt->loc_sem); + cfs_mutex_unlock(&ctxt->loc_mutex); } else { - mutex_up(&ctxt->loc_sem); + /* + * This is either llog_sync() from generic llog code or sync + * on client disconnect. In either way let's do it and send + * llcds to the target with waiting for completion. + */ + CDEBUG(D_RPCTRACE, "Sync cached llcd\n"); + cfs_mutex_unlock(&ctxt->loc_mutex); rc = llog_cancel(ctxt, NULL, 0, NULL, OBD_LLOG_FL_SENDNOW); } RETURN(rc); @@ -663,7 +726,7 @@ int llog_recov_init(void) { int llcd_size; - llcd_size = CFS_PAGE_SIZE - + llcd_size = CFS_PAGE_SIZE - lustre_msg_size(LUSTRE_MSG_MAGIC_V2, 1, NULL); llcd_size += offsetof(struct llog_canceld_ctxt, llcd_cookies); llcd_cache = cfs_mem_cache_create("llcd_cache", llcd_size, 0, 0); @@ -680,7 +743,7 @@ int llog_recov_init(void) void llog_recov_fini(void) { /* - * Kill llcd cache when thread is stopped and we're sure no + * Kill llcd cache when thread is stopped and we're sure no * llcd in use left. */ if (llcd_cache) { @@ -688,9 +751,9 @@ void llog_recov_fini(void) * In 2.6.22 cfs_mem_cache_destroy() will not return error * for busy resources. Let's check it another way. */ - LASSERTF(atomic_read(&llcd_count) == 0, + LASSERTF(cfs_atomic_read(&llcd_count) == 0, "Can't destroy llcd cache! Number of " - "busy llcds: %d\n", atomic_read(&llcd_count)); + "busy llcds: %d\n", cfs_atomic_read(&llcd_count)); cfs_mem_cache_destroy(llcd_cache); llcd_cache = NULL; }