1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/ptlrpc/recov_thread.c
38 * OST<->MDS recovery logging thread.
39 * Invariants in implementation:
40 * - we do not share logs among different OST<->MDS connections, so that
41 * if an OST or MDS fails it need only look at log(s) relevant to itself
43 * Author: Andreas Dilger <adilger@clusterfs.com>
44 * Yury Umanets <yury.umanets@sun.com>
45 * Alexey Lyashkov <alexey.lyashkov@sun.com>
48 #define DEBUG_SUBSYSTEM S_LOG
51 # define EXPORT_SYMTAB
55 # include <libcfs/libcfs.h>
57 # include <libcfs/list.h>
58 # include <liblustre.h>
61 #include <obd_class.h>
62 #include <obd_support.h>
63 #include <obd_class.h>
64 #include <lustre_net.h>
65 #include <lnet/types.h>
66 #include <libcfs/list.h>
67 #include <lustre_log.h>
68 #include "ptlrpc_internal.h"
70 static atomic_t llcd_count = ATOMIC_INIT(0);
71 static cfs_mem_cache_t *llcd_cache = NULL;
75 LLOG_LCM_FL_START = 1 << 0,
76 LLOG_LCM_FL_EXIT = 1 << 1
80 * Allocate new llcd from cache, init it and return to caller.
81 * Bumps number of objects allocated.
83 static struct llog_canceld_ctxt *llcd_alloc(void)
85 struct llog_canceld_ctxt *llcd;
89 * Payload of lustre_msg V2 is bigger.
91 llcd_size = CFS_PAGE_SIZE -
92 lustre_msg_size(LUSTRE_MSG_MAGIC_V2, 1, NULL);
93 llcd_size += offsetof(struct llog_canceld_ctxt, llcd_cookies);
94 OBD_SLAB_ALLOC(llcd, llcd_cache, CFS_ALLOC_STD, llcd_size);
98 llcd->llcd_size = llcd_size;
99 llcd->llcd_cookiebytes = 0;
100 atomic_inc(&llcd_count);
105 * Returns passed llcd to cache.
107 static void llcd_free(struct llog_canceld_ctxt *llcd)
109 OBD_SLAB_FREE(llcd, llcd_cache, llcd->llcd_size);
110 atomic_dec(&llcd_count);
114 * Copy passed @cookies to @llcd.
116 static void llcd_copy(struct llog_canceld_ctxt *llcd,
117 struct llog_cookie *cookies)
119 memcpy((char *)llcd->llcd_cookies + llcd->llcd_cookiebytes,
120 cookies, sizeof(*cookies));
121 llcd->llcd_cookiebytes += sizeof(*cookies);
125 * Checks if passed cookie fits into llcd free space buffer. Returns
126 * 1 if yes and 0 otherwise.
128 static int llcd_fit(struct llog_canceld_ctxt *llcd,
129 struct llog_cookie *cookies)
131 return (llcd->llcd_size -
132 llcd->llcd_cookiebytes) >= sizeof(*cookies);
135 static void llcd_print(struct llog_canceld_ctxt *llcd,
136 const char *func, int line)
138 CDEBUG(D_RPCTRACE, "Llcd (%p) at %s:%d:\n", llcd, func, line);
139 CDEBUG(D_RPCTRACE, " size: %d\n", llcd->llcd_size);
140 CDEBUG(D_RPCTRACE, " ctxt: %p\n", llcd->llcd_ctxt);
141 CDEBUG(D_RPCTRACE, " lcm : %p\n", llcd->llcd_lcm);
142 CDEBUG(D_RPCTRACE, " cookiebytes : %d\n", llcd->llcd_cookiebytes);
146 * Llcd completion function. Called uppon llcd send finish regardless
147 * sending result. Error is passed in @rc. Note, that this will be called
148 * in cleanup time when all inflight rpcs aborted.
151 llcd_interpret(struct ptlrpc_request *req, void *noused, int rc)
153 struct llog_canceld_ctxt *llcd = req->rq_async_args.pointer_arg[0];
154 CDEBUG(D_RPCTRACE, "Sent llcd %p (%d)\n", llcd, rc);
160 * Send @llcd to remote node. Free llcd uppon completion or error. Sending
161 * is performed in async style so this function will return asap without
164 static int llcd_send(struct llog_canceld_ctxt *llcd)
166 char *bufs[2] = { NULL, (char *)llcd->llcd_cookies };
167 struct obd_import *import = NULL;
168 struct llog_commit_master *lcm;
169 struct ptlrpc_request *req;
170 struct llog_ctxt *ctxt;
174 ctxt = llcd->llcd_ctxt;
176 CERROR("Invalid llcd with NULL ctxt found (%p)\n",
178 llcd_print(llcd, __FUNCTION__, __LINE__);
181 LASSERT_SEM_LOCKED(&ctxt->loc_sem);
183 if (llcd->llcd_cookiebytes == 0)
186 lcm = llcd->llcd_lcm;
189 * Check if we're in exit stage. Do not send llcd in
192 if (test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags))
193 GOTO(exit, rc = -ENODEV);
195 CDEBUG(D_RPCTRACE, "Sending llcd %p\n", llcd);
197 import = llcd->llcd_ctxt->loc_imp;
198 if (!import || (import == LP_POISON) ||
199 (import->imp_client == LP_POISON)) {
200 CERROR("Invalid import %p for llcd %p\n",
202 GOTO(exit, rc = -ENODEV);
205 OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_DELAY_RECOV, 10);
208 * No need to get import here as it is already done in
209 * llog_receptor_accept().
211 req = ptlrpc_request_alloc(import, &RQF_LOG_CANCEL);
213 CERROR("Can't allocate request for sending llcd %p\n",
215 GOTO(exit, rc = -ENOMEM);
217 req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES,
218 RCL_CLIENT, llcd->llcd_cookiebytes);
220 rc = ptlrpc_request_bufs_pack(req, LUSTRE_LOG_VERSION,
221 OBD_LOG_CANCEL, bufs, NULL);
223 ptlrpc_request_free(req);
227 ptlrpc_at_set_req_timeout(req);
228 ptlrpc_request_set_replen(req);
231 req->rq_request_portal = LDLM_CANCEL_REQUEST_PORTAL;
232 req->rq_reply_portal = LDLM_CANCEL_REPLY_PORTAL;
233 req->rq_interpret_reply = llcd_interpret;
234 req->rq_async_args.pointer_arg[0] = llcd;
235 rc = ptlrpc_set_add_new_req(&lcm->lcm_pc, req);
241 CDEBUG(D_RPCTRACE, "Refused llcd %p\n", llcd);
247 * Attach @llcd to @ctxt. Establish llcd vs. ctxt reserve connection
248 * so hat they can refer each other.
251 llcd_attach(struct llog_ctxt *ctxt, struct llog_canceld_ctxt *llcd)
253 struct llog_commit_master *lcm;
255 LASSERT(ctxt != NULL && llcd != NULL);
256 LASSERT_SEM_LOCKED(&ctxt->loc_sem);
257 LASSERT(ctxt->loc_llcd == NULL);
259 atomic_inc(&lcm->lcm_count);
260 CDEBUG(D_RPCTRACE, "Attach llcd %p to ctxt %p (%d)\n",
261 llcd, ctxt, atomic_read(&lcm->lcm_count));
262 llcd->llcd_ctxt = llog_ctxt_get(ctxt);
263 llcd->llcd_lcm = ctxt->loc_lcm;
264 ctxt->loc_llcd = llcd;
269 * Opposite to llcd_attach(). Detaches llcd from its @ctxt. This makes
270 * sure that this llcd will not be found another time we try to cancel.
272 static struct llog_canceld_ctxt *llcd_detach(struct llog_ctxt *ctxt)
274 struct llog_commit_master *lcm;
275 struct llog_canceld_ctxt *llcd;
277 LASSERT(ctxt != NULL);
278 LASSERT_SEM_LOCKED(&ctxt->loc_sem);
280 llcd = ctxt->loc_llcd;
285 if (atomic_read(&lcm->lcm_count) == 0) {
286 CERROR("Invalid detach occured %p:%p\n", ctxt, llcd);
287 llcd_print(llcd, __FUNCTION__, __LINE__);
290 atomic_dec(&lcm->lcm_count);
291 ctxt->loc_llcd = NULL;
293 CDEBUG(D_RPCTRACE, "Detach llcd %p from ctxt %p (%d)\n",
294 llcd, ctxt, atomic_read(&lcm->lcm_count));
301 * Return @llcd cached in @ctxt. Allocate new one if required. Attach it
302 * to ctxt so that it may be used for gathering cookies and sending.
304 static struct llog_canceld_ctxt *llcd_get(struct llog_ctxt *ctxt)
306 struct llog_canceld_ctxt *llcd;
310 CERROR("Couldn't alloc an llcd for ctxt %p\n", ctxt);
313 llcd_attach(ctxt, llcd);
318 * Deatch llcd from its @ctxt. Free llcd.
320 static void llcd_put(struct llog_ctxt *ctxt)
322 struct llog_canceld_ctxt *llcd;
324 llcd = llcd_detach(ctxt);
330 * Detach llcd from its @ctxt so that nobody will find it with try to
331 * re-use. Send llcd to remote node.
333 static int llcd_push(struct llog_ctxt *ctxt)
335 struct llog_canceld_ctxt *llcd;
339 * Make sure that this llcd will not be sent again as we detach
342 llcd = llcd_detach(ctxt);
344 CERROR("Invalid detached llcd found %p\n", llcd);
345 llcd_print(llcd, __FUNCTION__, __LINE__);
349 rc = llcd_send(llcd);
351 CERROR("Couldn't send llcd %p (%d)\n", llcd, rc);
355 static atomic_t llog_tcount = ATOMIC_INIT(0);
358 * Start recovery thread which actually deals llcd sending. This
359 * is all ptlrpc standard thread based so there is not much of work
362 int llog_recov_thread_start(struct llog_commit_master *lcm)
367 rc = ptlrpcd_start(lcm->lcm_name, &lcm->lcm_pc);
369 CERROR("Error %d while starting recovery thread %s\n",
373 lcm->lcm_set = lcm->lcm_pc.pc_set;
374 atomic_inc(&llog_tcount);
378 EXPORT_SYMBOL(llog_recov_thread_start);
381 * Stop recovery thread. Complement to llog_recov_thread_start().
383 void llog_recov_thread_stop(struct llog_commit_master *lcm, int force)
388 * Let all know that we're stopping. This will also make
389 * llcd_send() refuse any new llcds.
391 set_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags);
394 * Stop processing thread. No new rpcs will be accepted for
395 * for processing now.
397 ptlrpcd_stop(&lcm->lcm_pc, force);
400 * No llcds on this @lcm should left.
402 LASSERTF(atomic_read(&lcm->lcm_count) == 0,
403 "Busy llcds found on lcm %p - (%d)\n",
404 lcm, atomic_read(&lcm->lcm_count));
407 EXPORT_SYMBOL(llog_recov_thread_stop);
410 * Initialize commit master structure and start recovery thread on it.
412 struct llog_commit_master *llog_recov_thread_init(char *name)
414 struct llog_commit_master *lcm;
423 * Try to create threads with unique names and user id.
425 snprintf(lcm->lcm_name, sizeof(lcm->lcm_name),
426 "ll_log_commit_%s_%02d", name,
427 atomic_read(&llog_tcount));
429 strncpy(lcm->lcm_name, name, sizeof(lcm->lcm_name));
430 atomic_set(&lcm->lcm_count, 0);
431 rc = llog_recov_thread_start(lcm);
433 CERROR("Can't start commit thread, rc %d\n", rc);
441 EXPORT_SYMBOL(llog_recov_thread_init);
444 * Finalize commit master and its recovery thread.
446 void llog_recov_thread_fini(struct llog_commit_master *lcm, int force)
449 llog_recov_thread_stop(lcm, force);
453 EXPORT_SYMBOL(llog_recov_thread_fini);
455 static int llog_obd_repl_generic(struct llog_ctxt *ctxt,
456 void *handle, void *arg)
458 struct obd_device *obd = ctxt->loc_obd;
459 struct llog_process_cat_args *lpca;
463 if (obd->obd_stopping)
467 * This will be balanced in llog_cat_process_thread()
473 lpca->lpca_cb = handle;
474 lpca->lpca_arg = arg;
477 * This will be balanced in llog_cat_process_thread()
479 lpca->lpca_ctxt = llog_ctxt_get(ctxt);
480 if (!lpca->lpca_ctxt) {
484 rc = cfs_kernel_thread(llog_cat_process_thread, lpca,
485 CLONE_VM | CLONE_FILES);
487 CERROR("Error starting llog_cat_process_thread(): %d\n", rc);
491 CDEBUG(D_HA, "Started llog_cat_process_thread(): %d\n", rc);
498 int llog_obd_repl_connect(struct llog_ctxt *ctxt, int count,
499 struct llog_logid *logid, struct llog_gen *gen,
500 struct obd_uuid *uuid)
502 struct llog_canceld_ctxt *llcd;
506 mutex_down(&ctxt->loc_sem);
509 * Send back cached llcd before recovery from llog if we have any.
511 if (ctxt->loc_llcd) {
512 CWARN("Llcd %p:%p is not empty\n", ctxt->loc_llcd, ctxt);
513 mutex_up(&ctxt->loc_sem);
514 llog_sync(ctxt, NULL);
515 mutex_down(&ctxt->loc_sem);
518 llcd = llcd_get(ctxt);
520 mutex_up(&ctxt->loc_sem);
524 ctxt->loc_gen = *gen;
526 rc = llog_obd_repl_generic(ctxt, ctxt->llog_proc_cb, logid);
529 CERROR("Error recovery process: %d\n", rc);
531 mutex_up(&ctxt->loc_sem);
534 EXPORT_SYMBOL(llog_obd_repl_connect);
537 * Deleted objects have a commit callback that cancels the MDS
538 * log record for the deletion. The commit callback calls this
541 int llog_obd_repl_cancel(struct llog_ctxt *ctxt,
542 struct lov_stripe_md *lsm, int count,
543 struct llog_cookie *cookies, int flags)
545 struct llog_canceld_ctxt *llcd;
549 LASSERT(ctxt != NULL);
551 mutex_down(&ctxt->loc_sem);
554 * Let's check if we have all structures alive. We also check for
555 * possible shutdown. Do nothing if we're stopping.
557 if (ctxt->loc_imp == NULL) {
558 CDEBUG(D_RPCTRACE, "No import for ctxt %p\n", ctxt);
559 GOTO(out, rc = -ENODEV);
562 if (ctxt->loc_obd->obd_stopping) {
563 CDEBUG(D_RPCTRACE, "Obd is stopping for ctxt %p\n", ctxt);
564 GOTO(out, rc = -ENODEV);
567 if (test_bit(LLOG_LCM_FL_EXIT, &ctxt->loc_lcm->lcm_flags)) {
568 CDEBUG(D_RPCTRACE, "Commit thread is stopping for ctxt %p\n",
570 GOTO(out, rc = -ENODEV);
573 llcd = ctxt->loc_llcd;
575 if (count > 0 && cookies != NULL) {
577 * Get new llcd from ctxt if required.
580 llcd = llcd_get(ctxt);
582 GOTO(out, rc = -ENOMEM);
586 * Llcd does not have enough room for @cookies. Let's push
587 * it out and allocate new one.
589 if (!llcd_fit(llcd, cookies)) {
590 rc = llcd_push(ctxt);
593 llcd = llcd_get(ctxt);
595 GOTO(out, rc = -ENOMEM);
599 * Copy cookies to @llcd, no matter old or new allocated one.
601 llcd_copy(llcd, cookies);
605 * Let's check if we need to send copied @cookies asap. If yes - do it.
607 if (llcd && (flags & OBD_LLOG_FL_SENDNOW)) {
608 rc = llcd_push(ctxt);
614 mutex_up(&ctxt->loc_sem);
617 EXPORT_SYMBOL(llog_obd_repl_cancel);
619 int llog_obd_repl_sync(struct llog_ctxt *ctxt, struct obd_export *exp)
624 if (exp && (ctxt->loc_imp == exp->exp_imp_reverse)) {
625 CDEBUG(D_RPCTRACE, "Reverse import disconnect\n");
627 * Check for llcd which might be left attached to @ctxt.
630 mutex_down(&ctxt->loc_sem);
632 mutex_up(&ctxt->loc_sem);
634 rc = llog_cancel(ctxt, NULL, 0, NULL, OBD_LLOG_FL_SENDNOW);
638 EXPORT_SYMBOL(llog_obd_repl_sync);
640 #else /* !__KERNEL__ */
642 int llog_obd_repl_cancel(struct llog_ctxt *ctxt,
643 struct lov_stripe_md *lsm, int count,
644 struct llog_cookie *cookies, int flags)
651 * Module init time fucntion. Initializes slab for llcd objects.
653 int llog_recov_init(void)
657 llcd_size = CFS_PAGE_SIZE -
658 lustre_msg_size(LUSTRE_MSG_MAGIC_V2, 1, NULL);
659 llcd_size += offsetof(struct llog_canceld_ctxt, llcd_cookies);
660 llcd_cache = cfs_mem_cache_create("llcd_cache", llcd_size, 0, 0);
662 CERROR("Error allocating llcd cache\n");
669 * Module fini time fucntion. Releases slab for llcd objects.
671 void llog_recov_fini(void)
676 * Kill llcd cache when thread is stopped and we're sure no
681 * In 2.6.22 cfs_mem_cache_destroy() will not return error
682 * for busy resources. Let's check it another way.
684 count = atomic_read(&llcd_count);
685 LASSERTF(count == 0, "Can't destroy llcd cache! Number of "
686 "busy llcds: %d\n", count);
687 cfs_mem_cache_destroy(llcd_cache);