1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2003 Cluster File Systems, Inc.
5 * Author: Andreas Dilger <adilger@clusterfs.com>
7 * This file is part of the Lustre file system, http://www.lustre.org
8 * Lustre is a trademark of Cluster File Systems, Inc.
10 * You may have signed or agreed to another license before downloading
11 * this software. If so, you are bound by the terms and conditions
12 * of that agreement, and the following does not apply to you. See the
13 * LICENSE file included with this distribution for more information.
15 * If you did not agree to a different license, then this copy of Lustre
16 * is open source software; you can redistribute it and/or modify it
17 * under the terms of version 2 of the GNU General Public License as
18 * published by the Free Software Foundation.
20 * In either case, Lustre is distributed in the hope that it will be
21 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
22 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * license text for more details.
25 * OST<->MDS recovery logging thread.
27 * Invariants in implementation:
28 * - we do not share logs among different OST<->MDS connections, so that
29 * if an OST or MDS fails it need only look at log(s) relevant to itself
32 #define DEBUG_SUBSYSTEM S_LOG
35 # define EXPORT_SYMTAB
39 # include <libcfs/libcfs.h>
41 # include <libcfs/list.h>
42 # include <liblustre.h>
45 #include <obd_class.h>
46 #include <lustre_commit_confd.h>
47 #include <obd_support.h>
48 #include <obd_class.h>
49 #include <lustre_net.h>
50 #include <lnet/types.h>
51 #include <libcfs/list.h>
52 #include <lustre_log.h>
53 #include "ptlrpc_internal.h"
57 /* Allocate new commit structs in case we do not have enough.
58 * Make the llcd size small enough that it fits into a single page when we
59 * are sending/receiving it. */
60 static int llcd_alloc(struct llog_commit_master *lcm)
62 struct llog_canceld_ctxt *llcd;
65 /* payload of lustre_msg V2 is bigger */
66 llcd_size = 4096 - lustre_msg_size(LUSTRE_MSG_MAGIC_V2, 1, NULL);
68 llcd_size + offsetof(struct llog_canceld_ctxt, llcd_cookies));
72 llcd->llcd_size = llcd_size;
75 spin_lock(&lcm->lcm_llcd_lock);
76 list_add(&llcd->llcd_list, &lcm->lcm_llcd_free);
77 atomic_inc(&lcm->lcm_llcd_numfree);
78 spin_unlock(&lcm->lcm_llcd_lock);
83 /* Get a free cookie struct from the list */
84 static struct llog_canceld_ctxt *llcd_grab(struct llog_commit_master *lcm)
86 struct llog_canceld_ctxt *llcd;
89 spin_lock(&lcm->lcm_llcd_lock);
90 if (list_empty(&lcm->lcm_llcd_free)) {
91 spin_unlock(&lcm->lcm_llcd_lock);
92 if (llcd_alloc(lcm) < 0) {
93 CERROR("unable to allocate log commit data!\n");
96 /* check new llcd wasn't grabbed while lock dropped, b=7407 */
100 llcd = list_entry(lcm->lcm_llcd_free.next, typeof(*llcd), llcd_list);
101 list_del(&llcd->llcd_list);
102 atomic_dec(&lcm->lcm_llcd_numfree);
103 spin_unlock(&lcm->lcm_llcd_lock);
105 llcd->llcd_cookiebytes = 0;
110 static void llcd_put(struct llog_canceld_ctxt *llcd)
112 struct llog_commit_master *lcm = llcd->llcd_lcm;
114 llog_ctxt_put(llcd->llcd_ctxt);
115 if (atomic_read(&lcm->lcm_llcd_numfree) >= lcm->lcm_llcd_maxfree) {
116 int llcd_size = llcd->llcd_size +
117 offsetof(struct llog_canceld_ctxt, llcd_cookies);
118 OBD_FREE(llcd, llcd_size);
120 spin_lock(&lcm->lcm_llcd_lock);
121 list_add(&llcd->llcd_list, &lcm->lcm_llcd_free);
122 atomic_inc(&lcm->lcm_llcd_numfree);
123 spin_unlock(&lcm->lcm_llcd_lock);
127 /* Send some cookies to the appropriate target */
128 static void llcd_send(struct llog_canceld_ctxt *llcd)
130 if (!(llcd->llcd_lcm->lcm_flags & LLOG_LCM_FL_EXIT)) {
131 spin_lock(&llcd->llcd_lcm->lcm_llcd_lock);
132 list_add_tail(&llcd->llcd_list,
133 &llcd->llcd_lcm->lcm_llcd_pending);
134 spin_unlock(&llcd->llcd_lcm->lcm_llcd_lock);
136 cfs_waitq_signal_nr(&llcd->llcd_lcm->lcm_waitq, 1);
140 * Grab llcd and assign it to passed @ctxt. Also set up backward link
141 * and get ref on @ctxt.
143 static struct llog_canceld_ctxt *ctxt_llcd_grab(struct llog_ctxt *ctxt)
145 struct llog_canceld_ctxt *llcd;
147 LASSERT_SEM_LOCKED(&ctxt->loc_sem);
148 llcd = llcd_grab(ctxt->loc_lcm);
152 llcd->llcd_ctxt = llog_ctxt_get(ctxt);
153 ctxt->loc_llcd = llcd;
155 CDEBUG(D_RPCTRACE,"grab llcd %p:%p\n", ctxt->loc_llcd, ctxt);
160 * Put llcd in passed @ctxt. Set ->loc_llcd to NULL.
162 static void ctxt_llcd_put(struct llog_ctxt *ctxt)
164 mutex_down(&ctxt->loc_sem);
165 if (ctxt->loc_llcd != NULL) {
166 CDEBUG(D_RPCTRACE,"put llcd %p:%p\n", ctxt->loc_llcd, ctxt);
167 llcd_put(ctxt->loc_llcd);
168 ctxt->loc_llcd = NULL;
171 class_import_put(ctxt->loc_imp);
172 ctxt->loc_imp = NULL;
174 mutex_up(&ctxt->loc_sem);
177 /* deleted objects have a commit callback that cancels the MDS
178 * log record for the deletion. The commit callback calls this
181 int llog_obd_repl_cancel(struct llog_ctxt *ctxt,
182 struct lov_stripe_md *lsm, int count,
183 struct llog_cookie *cookies, int flags)
185 struct llog_canceld_ctxt *llcd;
191 mutex_down(&ctxt->loc_sem);
192 llcd = ctxt->loc_llcd;
194 if (ctxt->loc_imp == NULL) {
195 CDEBUG(D_RPCTRACE, "no import for ctxt %p\n", ctxt);
199 if (count > 0 && cookies != NULL) {
201 llcd = ctxt_llcd_grab(ctxt);
203 CERROR("couldn't get an llcd - dropped "LPX64
205 cookies->lgc_lgl.lgl_oid,
206 cookies->lgc_lgl.lgl_ogen,
208 GOTO(out, rc = -ENOMEM);
212 memcpy((char *)llcd->llcd_cookies + llcd->llcd_cookiebytes,
213 cookies, sizeof(*cookies));
214 llcd->llcd_cookiebytes += sizeof(*cookies);
216 if (llcd == NULL || !(flags & OBD_LLOG_FL_SENDNOW))
220 if ((llcd->llcd_size - llcd->llcd_cookiebytes) < sizeof(*cookies) ||
221 (flags & OBD_LLOG_FL_SENDNOW)) {
222 CDEBUG(D_RPCTRACE, "send llcd %p:%p\n", llcd, llcd->llcd_ctxt);
223 ctxt->loc_llcd = NULL;
227 mutex_up(&ctxt->loc_sem);
230 EXPORT_SYMBOL(llog_obd_repl_cancel);
232 int llog_obd_repl_sync(struct llog_ctxt *ctxt, struct obd_export *exp)
237 if (exp && (ctxt->loc_imp == exp->exp_imp_reverse)) {
238 CDEBUG(D_RPCTRACE,"reverse import disconnect\n");
240 * We put llcd because it is not going to sending list and
241 * thus, its refc will not be handled. We will handle it here.
246 * Sending cancel. This means that ctxt->loc_llcd wil be
247 * put on sending list in llog_obd_repl_cancel() and in
248 * this case recovery thread will take care of it refc.
250 rc = llog_cancel(ctxt, NULL, 0, NULL, OBD_LLOG_FL_SENDNOW);
254 EXPORT_SYMBOL(llog_obd_repl_sync);
256 static inline void stop_log_commit(struct llog_commit_master *lcm,
257 struct llog_commit_daemon *lcd,
260 CERROR("error preparing commit: rc %d\n", rc);
262 spin_lock(&lcm->lcm_llcd_lock);
263 list_splice_init(&lcd->lcd_llcd_list, &lcm->lcm_llcd_resend);
264 spin_unlock(&lcm->lcm_llcd_lock);
267 static int log_commit_thread(void *arg)
269 struct llog_commit_master *lcm = arg;
270 struct llog_commit_daemon *lcd;
271 struct llog_canceld_ctxt *llcd, *n;
272 struct obd_import *import = NULL;
275 OBD_ALLOC(lcd, sizeof(*lcd));
279 spin_lock(&lcm->lcm_thread_lock);
280 THREAD_NAME(cfs_curproc_comm(), CFS_CURPROC_COMM_MAX - 1,
281 "ll_log_comt_%02d", atomic_read(&lcm->lcm_thread_total));
282 atomic_inc(&lcm->lcm_thread_total);
283 spin_unlock(&lcm->lcm_thread_lock);
285 ptlrpc_daemonize(cfs_curproc_comm()); /* thread never needs to do IO */
287 CFS_INIT_LIST_HEAD(&lcd->lcd_lcm_list);
288 CFS_INIT_LIST_HEAD(&lcd->lcd_llcd_list);
291 CDEBUG(D_HA, "%s started\n", cfs_curproc_comm());
293 struct ptlrpc_request *request;
294 struct list_head *sending_list;
298 class_import_put(import);
301 /* If we do not have enough pages available, allocate some */
302 while (atomic_read(&lcm->lcm_llcd_numfree) <
303 lcm->lcm_llcd_minfree) {
304 if (llcd_alloc(lcm) < 0)
308 spin_lock(&lcm->lcm_thread_lock);
309 atomic_inc(&lcm->lcm_thread_numidle);
310 list_move(&lcd->lcd_lcm_list, &lcm->lcm_thread_idle);
311 spin_unlock(&lcm->lcm_thread_lock);
313 wait_event_interruptible(lcm->lcm_waitq,
314 !list_empty(&lcm->lcm_llcd_pending) ||
315 lcm->lcm_flags & LLOG_LCM_FL_EXIT);
317 /* If we are the last available thread, start a new one in case
318 * we get blocked on an RPC (nobody else will start a new one)*/
319 spin_lock(&lcm->lcm_thread_lock);
320 atomic_dec(&lcm->lcm_thread_numidle);
321 list_move(&lcd->lcd_lcm_list, &lcm->lcm_thread_busy);
322 spin_unlock(&lcm->lcm_thread_lock);
324 sending_list = &lcm->lcm_llcd_pending;
327 class_import_put(import);
329 if (lcm->lcm_flags & LLOG_LCM_FL_EXIT) {
330 lcm->lcm_llcd_maxfree = 0;
331 lcm->lcm_llcd_minfree = 0;
332 lcm->lcm_thread_max = 0;
334 if (list_empty(&lcm->lcm_llcd_pending) ||
335 lcm->lcm_flags & LLOG_LCM_FL_EXIT_FORCE)
339 if (atomic_read(&lcm->lcm_thread_numidle) <= 1 &&
340 atomic_read(&lcm->lcm_thread_total) < lcm->lcm_thread_max) {
341 rc = llog_start_commit_thread(lcm);
343 CERROR("error starting thread: rc %d\n", rc);
346 /* Move all of the pending cancels from the same OST off of
347 * the list, so we don't get multiple threads blocked and/or
348 * doing upcalls on the same OST in case of failure. */
349 spin_lock(&lcm->lcm_llcd_lock);
350 if (!list_empty(sending_list)) {
351 list_move_tail(sending_list->next,
352 &lcd->lcd_llcd_list);
353 llcd = list_entry(lcd->lcd_llcd_list.next,
354 typeof(*llcd), llcd_list);
355 LASSERT(llcd->llcd_lcm == lcm);
356 import = llcd->llcd_ctxt->loc_imp;
358 class_import_get(import);
360 list_for_each_entry_safe(llcd, n, sending_list, llcd_list) {
361 LASSERT(llcd->llcd_lcm == lcm);
362 if (import == llcd->llcd_ctxt->loc_imp)
363 list_move_tail(&llcd->llcd_list,
364 &lcd->lcd_llcd_list);
366 if (sending_list != &lcm->lcm_llcd_resend) {
367 list_for_each_entry_safe(llcd, n, &lcm->lcm_llcd_resend,
369 LASSERT(llcd->llcd_lcm == lcm);
370 if (import == llcd->llcd_ctxt->loc_imp)
371 list_move_tail(&llcd->llcd_list,
372 &lcd->lcd_llcd_list);
375 spin_unlock(&lcm->lcm_llcd_lock);
377 /* We are the only one manipulating our local list - no lock */
378 list_for_each_entry_safe(llcd,n, &lcd->lcd_llcd_list,llcd_list){
379 char *bufs[2] = { NULL, (char *)llcd->llcd_cookies };
381 list_del(&llcd->llcd_list);
382 if (llcd->llcd_cookiebytes == 0) {
383 CDEBUG(D_RPCTRACE, "put empty llcd %p:%p\n",
384 llcd, llcd->llcd_ctxt);
389 mutex_down(&llcd->llcd_ctxt->loc_sem);
390 if (llcd->llcd_ctxt->loc_imp == NULL) {
391 mutex_up(&llcd->llcd_ctxt->loc_sem);
392 CWARN("import will be destroyed, put "
393 "llcd %p:%p\n", llcd, llcd->llcd_ctxt);
397 mutex_up(&llcd->llcd_ctxt->loc_sem);
399 if (!import || (import == LP_POISON) ||
400 (import->imp_client == LP_POISON)) {
401 CERROR("No import %p (llcd=%p, ctxt=%p)\n",
402 import, llcd, llcd->llcd_ctxt);
407 OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_DELAY_RECOV, 10);
409 request = ptlrpc_request_alloc(import, &RQF_LOG_CANCEL);
410 if (request == NULL) {
412 stop_log_commit(lcm, lcd, rc);
416 req_capsule_set_size(&request->rq_pill, &RMF_LOGCOOKIES,
417 RCL_CLIENT,llcd->llcd_cookiebytes);
419 rc = ptlrpc_request_bufs_pack(request,
421 OBD_LOG_CANCEL, bufs,
424 ptlrpc_request_free(request);
425 stop_log_commit(lcm, lcd, rc);
429 /* XXX FIXME bug 249, 5515 */
430 request->rq_request_portal = LDLM_CANCEL_REQUEST_PORTAL;
431 request->rq_reply_portal = LDLM_CANCEL_REPLY_PORTAL;
433 ptlrpc_request_set_replen(request);
434 mutex_down(&llcd->llcd_ctxt->loc_sem);
435 if (llcd->llcd_ctxt->loc_imp == NULL) {
436 mutex_up(&llcd->llcd_ctxt->loc_sem);
437 CWARN("import will be destroyed, put "
438 "llcd %p:%p\n", llcd, llcd->llcd_ctxt);
440 ptlrpc_req_finished(request);
443 mutex_up(&llcd->llcd_ctxt->loc_sem);
444 rc = ptlrpc_queue_wait(request);
445 ptlrpc_req_finished(request);
447 /* If the RPC failed, we put this and the remaining
448 * messages onto the resend list for another time. */
454 CERROR("commit %p:%p drop %d cookies: rc %d\n",
455 llcd, llcd->llcd_ctxt,
456 (int)(llcd->llcd_cookiebytes /
457 sizeof(*llcd->llcd_cookies)), rc);
462 sending_list = &lcm->lcm_llcd_resend;
463 if (!list_empty(sending_list))
469 class_import_put(import);
471 /* If we are force exiting, just drop all of the cookies. */
472 if (lcm->lcm_flags & LLOG_LCM_FL_EXIT_FORCE) {
473 spin_lock(&lcm->lcm_llcd_lock);
474 list_splice_init(&lcm->lcm_llcd_pending, &lcd->lcd_llcd_list);
475 list_splice_init(&lcm->lcm_llcd_resend, &lcd->lcd_llcd_list);
476 list_splice_init(&lcm->lcm_llcd_free, &lcd->lcd_llcd_list);
477 spin_unlock(&lcm->lcm_llcd_lock);
479 list_for_each_entry_safe(llcd, n, &lcd->lcd_llcd_list,llcd_list)
483 spin_lock(&lcm->lcm_thread_lock);
484 list_del(&lcd->lcd_lcm_list);
485 spin_unlock(&lcm->lcm_thread_lock);
486 OBD_FREE(lcd, sizeof(*lcd));
488 CDEBUG(D_HA, "%s exiting\n", cfs_curproc_comm());
490 spin_lock(&lcm->lcm_thread_lock);
491 atomic_dec(&lcm->lcm_thread_total);
492 spin_unlock(&lcm->lcm_thread_lock);
493 cfs_waitq_signal(&lcm->lcm_waitq);
498 int llog_start_commit_thread(struct llog_commit_master *lcm)
503 if (atomic_read(&lcm->lcm_thread_total) >= lcm->lcm_thread_max)
506 rc = cfs_kernel_thread(log_commit_thread, lcm, CLONE_VM | CLONE_FILES);
508 CERROR("error starting thread #%d: %d\n",
509 atomic_read(&lcm->lcm_thread_total), rc);
515 EXPORT_SYMBOL(llog_start_commit_thread);
517 static struct llog_process_args {
518 struct semaphore llpa_sem;
519 struct llog_ctxt *llpa_ctxt;
524 int llog_init_commit_master(struct llog_commit_master *lcm)
526 CFS_INIT_LIST_HEAD(&lcm->lcm_thread_busy);
527 CFS_INIT_LIST_HEAD(&lcm->lcm_thread_idle);
528 spin_lock_init(&lcm->lcm_thread_lock);
529 atomic_set(&lcm->lcm_thread_numidle, 0);
530 cfs_waitq_init(&lcm->lcm_waitq);
531 CFS_INIT_LIST_HEAD(&lcm->lcm_llcd_pending);
532 CFS_INIT_LIST_HEAD(&lcm->lcm_llcd_resend);
533 CFS_INIT_LIST_HEAD(&lcm->lcm_llcd_free);
534 spin_lock_init(&lcm->lcm_llcd_lock);
535 atomic_set(&lcm->lcm_llcd_numfree, 0);
536 lcm->lcm_llcd_minfree = 0;
537 lcm->lcm_thread_max = 5;
538 /* FIXME initialize semaphore for llog_process_args */
539 sema_init(&llpa.llpa_sem, 1);
542 EXPORT_SYMBOL(llog_init_commit_master);
544 int llog_cleanup_commit_master(struct llog_commit_master *lcm,
547 lcm->lcm_flags |= LLOG_LCM_FL_EXIT;
549 lcm->lcm_flags |= LLOG_LCM_FL_EXIT_FORCE;
550 cfs_waitq_signal(&lcm->lcm_waitq);
552 wait_event_interruptible(lcm->lcm_waitq,
553 atomic_read(&lcm->lcm_thread_total) == 0);
556 EXPORT_SYMBOL(llog_cleanup_commit_master);
558 static int log_process_thread(void *args)
560 struct llog_process_args *data = args;
561 struct llog_ctxt *ctxt = data->llpa_ctxt;
562 void *cb = data->llpa_cb;
563 struct llog_logid logid = *(struct llog_logid *)(data->llpa_arg);
564 struct llog_handle *llh = NULL;
568 mutex_up(&data->llpa_sem);
569 ptlrpc_daemonize("llog_process"); /* thread does IO to log files */
571 rc = llog_create(ctxt, &llh, &logid, NULL);
573 CERROR("llog_create failed %d\n", rc);
576 rc = llog_init_handle(llh, LLOG_F_IS_CAT, NULL);
578 CERROR("llog_init_handle failed %d\n", rc);
579 GOTO(release_llh, rc);
583 rc = llog_cat_process(llh, (llog_cb_t)cb, NULL);
584 if (rc != LLOG_PROC_BREAK)
585 CERROR("llog_cat_process failed %d\n", rc);
587 CWARN("no callback function for recovery\n");
590 CDEBUG(D_HA, "send llcd %p:%p forcibly after recovery\n",
591 ctxt->loc_llcd, ctxt);
592 llog_sync(ctxt, NULL);
595 rc = llog_cat_put(llh);
597 CERROR("llog_cat_put failed %d\n", rc);
603 static int llog_recovery_generic(struct llog_ctxt *ctxt, void *handle,void *arg)
605 struct obd_device *obd = ctxt->loc_obd;
609 if (obd->obd_stopping)
612 mutex_down(&llpa.llpa_sem);
613 llpa.llpa_cb = handle;
615 llpa.llpa_ctxt = llog_ctxt_get(ctxt);
616 if (!llpa.llpa_ctxt) {
620 rc = cfs_kernel_thread(log_process_thread, &llpa, CLONE_VM | CLONE_FILES);
623 CERROR("error starting log_process_thread: %d\n", rc);
625 CDEBUG(D_HA, "log_process_thread: %d\n", rc);
632 int llog_repl_connect(struct llog_ctxt *ctxt, int count,
633 struct llog_logid *logid, struct llog_gen *gen,
634 struct obd_uuid *uuid)
636 struct llog_canceld_ctxt *llcd;
640 /* send back llcd before recovery from llog */
641 if (ctxt->loc_llcd != NULL) {
642 CWARN("llcd %p:%p not empty\n", ctxt->loc_llcd, ctxt);
643 llog_sync(ctxt, NULL);
646 mutex_down(&ctxt->loc_sem);
647 ctxt->loc_gen = *gen;
648 llcd = ctxt_llcd_grab(ctxt);
650 CERROR("couldn't get an llcd\n");
651 mutex_up(&ctxt->loc_sem);
654 mutex_up(&ctxt->loc_sem);
656 rc = llog_recovery_generic(ctxt, ctxt->llog_proc_cb, logid);
659 CERROR("error recovery process: %d\n", rc);
663 EXPORT_SYMBOL(llog_repl_connect);
665 #else /* !__KERNEL__ */
667 int llog_obd_repl_cancel(struct llog_ctxt *ctxt,
668 struct lov_stripe_md *lsm, int count,
669 struct llog_cookie *cookies, int flags)