1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/ptlrpc/recov_thread.c
38 * OST<->MDS recovery logging thread.
39 * Invariants in implementation:
40 * - we do not share logs among different OST<->MDS connections, so that
41 * if an OST or MDS fails it need only look at log(s) relevant to itself
43 * Author: Andreas Dilger <adilger@clusterfs.com>
44 * Yury Umanets <yury.umanets@sun.com>
45 * Alexey Lyashkov <alexey.lyashkov@sun.com>
48 #define DEBUG_SUBSYSTEM S_LOG
51 # define EXPORT_SYMTAB
55 # include <libcfs/libcfs.h>
57 # include <libcfs/list.h>
58 # include <liblustre.h>
61 #include <obd_class.h>
62 #include <obd_support.h>
63 #include <obd_class.h>
64 #include <lustre_net.h>
65 #include <lnet/types.h>
66 #include <libcfs/list.h>
67 #include <lustre_log.h>
68 #include "ptlrpc_internal.h"
70 static atomic_t llcd_count = ATOMIC_INIT(0);
71 static cfs_mem_cache_t *llcd_cache = NULL;
75 LLOG_LCM_FL_START = 1 << 0,
76 LLOG_LCM_FL_EXIT = 1 << 1
79 static void llcd_print(struct llog_canceld_ctxt *llcd,
80 const char *func, int line)
82 CDEBUG(D_RPCTRACE, "Llcd (%p) at %s:%d:\n", llcd, func, line);
83 CDEBUG(D_RPCTRACE, " size: %d\n", llcd->llcd_size);
84 CDEBUG(D_RPCTRACE, " ctxt: %p\n", llcd->llcd_ctxt);
85 CDEBUG(D_RPCTRACE, " lcm : %p\n", llcd->llcd_lcm);
86 CDEBUG(D_RPCTRACE, " cookiebytes : %d\n", llcd->llcd_cookiebytes);
90 * Allocate new llcd from cache, init it and return to caller.
91 * Bumps number of objects allocated.
93 static struct llog_canceld_ctxt *llcd_alloc(struct llog_commit_master *lcm)
95 struct llog_canceld_ctxt *llcd;
101 * We want to send one page of cookies with rpc header. This buffer
102 * will be assigned later to the rpc, this is why we preserve the
103 * space for rpc header.
105 size = CFS_PAGE_SIZE - lustre_msg_size(LUSTRE_MSG_MAGIC_V2, 1, NULL);
106 overhead = offsetof(struct llog_canceld_ctxt, llcd_cookies);
107 OBD_SLAB_ALLOC(llcd, llcd_cache, CFS_ALLOC_STD, size + overhead);
111 CFS_INIT_LIST_HEAD(&llcd->llcd_list);
112 llcd->llcd_cookiebytes = 0;
113 llcd->llcd_size = size;
115 spin_lock(&lcm->lcm_lock);
116 llcd->llcd_lcm = lcm;
117 atomic_inc(&lcm->lcm_count);
118 list_add_tail(&llcd->llcd_list, &lcm->lcm_llcds);
119 spin_unlock(&lcm->lcm_lock);
120 atomic_inc(&llcd_count);
122 CDEBUG(D_RPCTRACE, "Alloc llcd %p on lcm %p (%d)\n",
123 llcd, lcm, atomic_read(&lcm->lcm_count));
129 * Returns passed llcd to cache.
131 static void llcd_free(struct llog_canceld_ctxt *llcd)
133 struct llog_commit_master *lcm = llcd->llcd_lcm;
137 if (atomic_read(&lcm->lcm_count) == 0) {
138 CERROR("Invalid llcd free %p\n", llcd);
139 llcd_print(llcd, __FUNCTION__, __LINE__);
142 spin_lock(&lcm->lcm_lock);
143 LASSERT(!list_empty(&llcd->llcd_list));
144 list_del_init(&llcd->llcd_list);
145 atomic_dec(&lcm->lcm_count);
146 spin_unlock(&lcm->lcm_lock);
148 CDEBUG(D_RPCTRACE, "Free llcd %p on lcm %p (%d)\n",
149 llcd, lcm, atomic_read(&lcm->lcm_count));
152 LASSERT(atomic_read(&llcd_count) > 0);
153 atomic_dec(&llcd_count);
155 size = offsetof(struct llog_canceld_ctxt, llcd_cookies) +
157 OBD_SLAB_FREE(llcd, llcd_cache, size);
161 * Checks if passed cookie fits into llcd free space buffer. Returns
162 * 1 if yes and 0 otherwise.
165 llcd_fit(struct llog_canceld_ctxt *llcd, struct llog_cookie *cookies)
167 return (llcd->llcd_size - llcd->llcd_cookiebytes >= sizeof(*cookies));
171 * Copy passed @cookies to @llcd.
174 llcd_copy(struct llog_canceld_ctxt *llcd, struct llog_cookie *cookies)
176 LASSERT(llcd_fit(llcd, cookies));
177 memcpy((char *)llcd->llcd_cookies + llcd->llcd_cookiebytes,
178 cookies, sizeof(*cookies));
179 llcd->llcd_cookiebytes += sizeof(*cookies);
183 * Llcd completion function. Called uppon llcd send finish regardless
184 * sending result. Error is passed in @rc. Note, that this will be called
185 * in cleanup time when all inflight rpcs aborted.
188 llcd_interpret(const struct lu_env *env,
189 struct ptlrpc_request *req, void *noused, int rc)
191 struct llog_canceld_ctxt *llcd = req->rq_async_args.pointer_arg[0];
192 CDEBUG(D_RPCTRACE, "Sent llcd %p (%d) - killing it\n", llcd, rc);
198 * Send @llcd to remote node. Free llcd uppon completion or error. Sending
199 * is performed in async style so this function will return asap without
202 static int llcd_send(struct llog_canceld_ctxt *llcd)
204 char *bufs[2] = { NULL, (char *)llcd->llcd_cookies };
205 struct obd_import *import = NULL;
206 struct llog_commit_master *lcm;
207 struct ptlrpc_request *req;
208 struct llog_ctxt *ctxt;
212 ctxt = llcd->llcd_ctxt;
214 CERROR("Invalid llcd with NULL ctxt found (%p)\n",
216 llcd_print(llcd, __FUNCTION__, __LINE__);
219 LASSERT_SEM_LOCKED(&ctxt->loc_sem);
221 if (llcd->llcd_cookiebytes == 0)
224 lcm = llcd->llcd_lcm;
227 * Check if we're in exit stage. Do not send llcd in
230 if (test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags))
231 GOTO(exit, rc = -ENODEV);
233 CDEBUG(D_RPCTRACE, "Sending llcd %p\n", llcd);
235 import = llcd->llcd_ctxt->loc_imp;
236 if (!import || (import == LP_POISON) ||
237 (import->imp_client == LP_POISON)) {
238 CERROR("Invalid import %p for llcd %p\n",
240 GOTO(exit, rc = -ENODEV);
243 OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_DELAY_RECOV, 10);
246 * No need to get import here as it is already done in
247 * llog_receptor_accept().
249 req = ptlrpc_request_alloc(import, &RQF_LOG_CANCEL);
251 CERROR("Can't allocate request for sending llcd %p\n",
253 GOTO(exit, rc = -ENOMEM);
255 req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES,
256 RCL_CLIENT, llcd->llcd_cookiebytes);
258 rc = ptlrpc_request_bufs_pack(req, LUSTRE_LOG_VERSION,
259 OBD_LOG_CANCEL, bufs, NULL);
261 ptlrpc_request_free(req);
265 ptlrpc_at_set_req_timeout(req);
266 ptlrpc_request_set_replen(req);
269 req->rq_request_portal = LDLM_CANCEL_REQUEST_PORTAL;
270 req->rq_reply_portal = LDLM_CANCEL_REPLY_PORTAL;
271 req->rq_interpret_reply = (ptlrpc_interpterer_t)llcd_interpret;
272 req->rq_async_args.pointer_arg[0] = llcd;
274 /* llog cancels will be replayed after reconnect so this will do twice
275 * first from replay llog, second for resended rpc */
276 req->rq_no_delay = req->rq_no_resend = 1;
278 rc = ptlrpc_set_add_new_req(&lcm->lcm_pc, req);
280 ptlrpc_request_free(req);
285 CDEBUG(D_RPCTRACE, "Refused llcd %p\n", llcd);
291 * Attach @llcd to @ctxt. Establish llcd vs. ctxt reserve connection
292 * so hat they can refer each other.
295 llcd_attach(struct llog_ctxt *ctxt, struct llog_canceld_ctxt *llcd)
297 LASSERT(ctxt != NULL && llcd != NULL);
298 LASSERT_SEM_LOCKED(&ctxt->loc_sem);
299 LASSERT(ctxt->loc_llcd == NULL);
300 llcd->llcd_ctxt = llog_ctxt_get(ctxt);
301 ctxt->loc_llcd = llcd;
303 CDEBUG(D_RPCTRACE, "Attach llcd %p to ctxt %p\n",
310 * Opposite to llcd_attach(). Detaches llcd from its @ctxt. This makes
311 * sure that this llcd will not be found another time we try to cancel.
313 static struct llog_canceld_ctxt *llcd_detach(struct llog_ctxt *ctxt)
315 struct llog_canceld_ctxt *llcd;
317 LASSERT(ctxt != NULL);
318 LASSERT_SEM_LOCKED(&ctxt->loc_sem);
320 llcd = ctxt->loc_llcd;
324 CDEBUG(D_RPCTRACE, "Detach llcd %p from ctxt %p\n",
327 ctxt->loc_llcd = NULL;
333 * Return @llcd cached in @ctxt. Allocate new one if required. Attach it
334 * to ctxt so that it may be used for gathering cookies and sending.
336 static struct llog_canceld_ctxt *llcd_get(struct llog_ctxt *ctxt)
338 struct llog_canceld_ctxt *llcd;
340 llcd = llcd_alloc(ctxt->loc_lcm);
342 CERROR("Can't alloc an llcd for ctxt %p\n", ctxt);
345 llcd_attach(ctxt, llcd);
350 * Deatch llcd from its @ctxt. Free llcd.
352 static void llcd_put(struct llog_ctxt *ctxt)
354 struct llog_canceld_ctxt *llcd;
356 llcd = llcd_detach(ctxt);
362 * Detach llcd from its @ctxt so that nobody will find it with try to
363 * re-use. Send llcd to remote node.
365 static int llcd_push(struct llog_ctxt *ctxt)
367 struct llog_canceld_ctxt *llcd;
371 * Make sure that this llcd will not be sent again as we detach
374 llcd = llcd_detach(ctxt);
376 CERROR("Invalid detached llcd found %p\n", llcd);
377 llcd_print(llcd, __FUNCTION__, __LINE__);
381 rc = llcd_send(llcd);
383 CERROR("Couldn't send llcd %p (%d)\n", llcd, rc);
388 * Start recovery thread which actually deals llcd sending. This
389 * is all ptlrpc standard thread based so there is not much of work
392 int llog_recov_thread_start(struct llog_commit_master *lcm)
397 rc = ptlrpcd_start(lcm->lcm_name, &lcm->lcm_pc);
399 CERROR("Error %d while starting recovery thread %s\n",
405 EXPORT_SYMBOL(llog_recov_thread_start);
408 * Stop recovery thread. Complement to llog_recov_thread_start().
410 void llog_recov_thread_stop(struct llog_commit_master *lcm, int force)
415 * Let all know that we're stopping. This will also make
416 * llcd_send() refuse any new llcds.
418 set_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags);
421 * Stop processing thread. No new rpcs will be accepted for
422 * for processing now.
424 ptlrpcd_stop(&lcm->lcm_pc, force);
427 * By this point no alive inflight llcds should be left. Only
428 * those forgotten in sync may still be attached to ctxt. Let's
431 if (atomic_read(&lcm->lcm_count) != 0) {
432 struct llog_canceld_ctxt *llcd;
433 struct list_head *tmp;
435 CERROR("Busy llcds found (%d) on lcm %p\n",
436 atomic_read(&lcm->lcm_count), lcm);
438 spin_lock(&lcm->lcm_lock);
439 list_for_each(tmp, &lcm->lcm_llcds) {
440 llcd = list_entry(tmp, struct llog_canceld_ctxt,
442 llcd_print(llcd, __FUNCTION__, __LINE__);
444 spin_unlock(&lcm->lcm_lock);
447 * No point to go further with busy llcds at this point
448 * as this is clear bug. It might mean we got hanging
449 * rpc which holds import ref and this means we will not
450 * be able to cleanup anyways.
452 * Or we just missed to kill them when they were not
453 * attached to ctxt. In this case our slab will remind
454 * us about this a bit later.
460 EXPORT_SYMBOL(llog_recov_thread_stop);
463 * Initialize commit master structure and start recovery thread on it.
465 struct llog_commit_master *llog_recov_thread_init(char *name)
467 struct llog_commit_master *lcm;
476 * Try to create threads with unique names.
478 snprintf(lcm->lcm_name, sizeof(lcm->lcm_name),
481 atomic_set(&lcm->lcm_count, 0);
482 atomic_set(&lcm->lcm_refcount, 1);
483 spin_lock_init(&lcm->lcm_lock);
484 CFS_INIT_LIST_HEAD(&lcm->lcm_llcds);
485 rc = llog_recov_thread_start(lcm);
487 CERROR("Can't start commit thread, rc %d\n", rc);
495 EXPORT_SYMBOL(llog_recov_thread_init);
498 * Finalize commit master and its recovery thread.
500 void llog_recov_thread_fini(struct llog_commit_master *lcm, int force)
503 llog_recov_thread_stop(lcm, force);
507 EXPORT_SYMBOL(llog_recov_thread_fini);
509 static int llog_recov_thread_replay(struct llog_ctxt *ctxt,
512 struct obd_device *obd = ctxt->loc_obd;
513 struct llog_process_cat_args *lpca;
517 if (obd->obd_stopping)
521 * This will be balanced in llog_cat_process_thread()
528 lpca->lpca_arg = arg;
531 * This will be balanced in llog_cat_process_thread()
533 lpca->lpca_ctxt = llog_ctxt_get(ctxt);
534 if (!lpca->lpca_ctxt) {
538 rc = cfs_kernel_thread(llog_cat_process_thread, lpca,
539 CLONE_VM | CLONE_FILES);
541 CERROR("Error starting llog_cat_process_thread(): %d\n", rc);
545 CDEBUG(D_HA, "Started llog_cat_process_thread(): %d\n", rc);
552 int llog_obd_repl_connect(struct llog_ctxt *ctxt,
553 struct llog_logid *logid, struct llog_gen *gen,
554 struct obd_uuid *uuid)
560 * Send back cached llcd from llog before recovery if we have any.
561 * This is void is nothing cached is found there.
563 llog_sync(ctxt, NULL);
566 * Start recovery in separate thread.
568 mutex_down(&ctxt->loc_sem);
569 ctxt->loc_gen = *gen;
570 rc = llog_recov_thread_replay(ctxt, ctxt->llog_proc_cb, logid);
571 mutex_up(&ctxt->loc_sem);
575 EXPORT_SYMBOL(llog_obd_repl_connect);
578 * Deleted objects have a commit callback that cancels the MDS
579 * log record for the deletion. The commit callback calls this
582 int llog_obd_repl_cancel(struct llog_ctxt *ctxt,
583 struct lov_stripe_md *lsm, int count,
584 struct llog_cookie *cookies, int flags)
586 struct llog_commit_master *lcm;
587 struct llog_canceld_ctxt *llcd;
591 LASSERT(ctxt != NULL);
593 mutex_down(&ctxt->loc_sem);
594 if (!ctxt->loc_lcm) {
595 CDEBUG(D_RPCTRACE, "No lcm for ctxt %p\n", ctxt);
596 GOTO(out, rc = -ENODEV);
599 CDEBUG(D_INFO, "cancel on lsm %p\n", lcm);
602 * Let's check if we have all structures alive. We also check for
603 * possible shutdown. Do nothing if we're stopping.
605 if (ctxt->loc_imp == NULL) {
606 CDEBUG(D_RPCTRACE, "No import for ctxt %p\n", ctxt);
607 GOTO(out, rc = -ENODEV);
610 if (test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags)) {
611 CDEBUG(D_RPCTRACE, "Commit thread is stopping for ctxt %p\n",
613 GOTO(out, rc = -ENODEV);
616 llcd = ctxt->loc_llcd;
618 if (count > 0 && cookies != NULL) {
620 * Get new llcd from ctxt if required.
623 llcd = llcd_get(ctxt);
625 GOTO(out, rc = -ENOMEM);
627 * Allocation is successful, let's check for stop
628 * flag again to fall back as soon as possible.
630 if (test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags))
631 GOTO(out, rc = -ENODEV);
635 * Llcd does not have enough room for @cookies. Let's push
636 * it out and allocate new one.
638 if (!llcd_fit(llcd, cookies)) {
639 rc = llcd_push(ctxt);
642 llcd = llcd_get(ctxt);
644 GOTO(out, rc = -ENOMEM);
646 * Allocation is successful, let's check for stop
647 * flag again to fall back as soon as possible.
649 if (test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags))
650 GOTO(out, rc = -ENODEV);
654 * Copy cookies to @llcd, no matter old or new allocated
657 llcd_copy(llcd, cookies);
661 * Let's check if we need to send copied @cookies asap. If yes
664 if (llcd && (flags & OBD_LLOG_FL_SENDNOW)) {
665 CDEBUG(D_RPCTRACE, "Sync llcd %p\n", llcd);
666 rc = llcd_push(ctxt);
674 mutex_up(&ctxt->loc_sem);
677 EXPORT_SYMBOL(llog_obd_repl_cancel);
679 int llog_obd_repl_sync(struct llog_ctxt *ctxt, struct obd_export *exp)
685 * Flush any remaining llcd.
687 mutex_down(&ctxt->loc_sem);
688 if (exp && (ctxt->loc_imp == exp->exp_imp_reverse)) {
690 * This is ost->mds connection, we can't be sure that mds
691 * can still receive cookies, let's killed the cached llcd.
693 CDEBUG(D_RPCTRACE, "Kill cached llcd\n");
695 mutex_up(&ctxt->loc_sem);
698 * This is either llog_sync() from generic llog code or sync
699 * on client disconnect. In either way let's do it and send
700 * llcds to the target with waiting for completion.
702 CDEBUG(D_RPCTRACE, "Sync cached llcd\n");
703 mutex_up(&ctxt->loc_sem);
704 rc = llog_cancel(ctxt, NULL, 0, NULL, OBD_LLOG_FL_SENDNOW);
708 EXPORT_SYMBOL(llog_obd_repl_sync);
710 #else /* !__KERNEL__ */
712 int llog_obd_repl_cancel(struct llog_ctxt *ctxt,
713 struct lov_stripe_md *lsm, int count,
714 struct llog_cookie *cookies, int flags)
721 * Module init time fucntion. Initializes slab for llcd objects.
723 int llog_recov_init(void)
727 llcd_size = CFS_PAGE_SIZE -
728 lustre_msg_size(LUSTRE_MSG_MAGIC_V2, 1, NULL);
729 llcd_size += offsetof(struct llog_canceld_ctxt, llcd_cookies);
730 llcd_cache = cfs_mem_cache_create("llcd_cache", llcd_size, 0, 0);
732 CERROR("Error allocating llcd cache\n");
739 * Module fini time fucntion. Releases slab for llcd objects.
741 void llog_recov_fini(void)
744 * Kill llcd cache when thread is stopped and we're sure no
749 * In 2.6.22 cfs_mem_cache_destroy() will not return error
750 * for busy resources. Let's check it another way.
752 LASSERTF(atomic_read(&llcd_count) == 0,
753 "Can't destroy llcd cache! Number of "
754 "busy llcds: %d\n", atomic_read(&llcd_count));
755 cfs_mem_cache_destroy(llcd_cache);