4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2015, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/obdclass/llog_cat.c
34 * OST<->MDS recovery logging infrastructure.
36 * Invariants in implementation:
37 * - we do not share logs among different OST<->MDS connections, so that
38 * if an OST or MDS fails it need only look at log(s) relevant to itself
40 * Author: Andreas Dilger <adilger@clusterfs.com>
41 * Author: Alexey Zhuravlev <alexey.zhuravlev@intel.com>
42 * Author: Mikhail Pershin <mike.pershin@intel.com>
45 #define DEBUG_SUBSYSTEM S_LOG
48 #include <obd_class.h>
50 #include "llog_internal.h"
52 /* Create a new log handle and add it to the open list.
53 * This log handle will be closed when all of the records in it are removed.
55 * Assumes caller has already pushed us into the kernel context and is locking.
57 static int llog_cat_new_log(const struct lu_env *env,
58 struct llog_handle *cathandle,
59 struct llog_handle *loghandle,
62 struct llog_thread_info *lgi = llog_info(env);
63 struct llog_logid_rec *rec = &lgi->lgi_logid;
64 struct thandle *handle = NULL;
65 struct dt_device *dt = NULL;
66 struct llog_log_hdr *llh = cathandle->lgh_hdr;
71 index = (cathandle->lgh_last_idx + 1) %
72 (OBD_FAIL_PRECHECK(OBD_FAIL_CAT_RECORDS) ? (cfs_fail_val + 1) :
73 LLOG_HDR_BITMAP_SIZE(llh));
75 /* check that new llog index will not overlap with the first one.
76 * - llh_cat_idx is the index just before the first/oldest still in-use
78 * - lgh_last_idx is the last/newest used index in catalog
80 * When catalog is not wrapped yet then lgh_last_idx is always larger
81 * than llh_cat_idx. After the wrap around lgh_last_idx re-starts
82 * from 0 and llh_cat_idx becomes the upper limit for it
84 * Check if catalog has already wrapped around or not by comparing
85 * last_idx and cat_idx */
86 if ((index == llh->llh_cat_idx + 1 && llh->llh_count > 1) ||
87 (index == 0 && llh->llh_cat_idx == 0)) {
88 CWARN("%s: there are no more free slots in catalog\n",
89 loghandle->lgh_ctxt->loc_obd->obd_name);
93 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_LLOG_CREATE_FAILED))
96 if (loghandle->lgh_hdr != NULL) {
97 /* If llog object is remote and creation is failed, lgh_hdr
98 * might be left over here, free it first */
99 LASSERT(!llog_exist(loghandle));
100 OBD_FREE_LARGE(loghandle->lgh_hdr, loghandle->lgh_hdr_size);
101 loghandle->lgh_hdr = NULL;
105 dt = lu2dt_dev(cathandle->lgh_obj->do_lu.lo_dev);
107 handle = dt_trans_create(env, dt);
109 RETURN(PTR_ERR(handle));
111 /* Create update llog object synchronously, which
112 * happens during inialization process see
113 * lod_sub_prep_llog(), to make sure the update
114 * llog object is created before corss-MDT writing
115 * updates into the llog object */
116 if (cathandle->lgh_ctxt->loc_flags & LLOG_CTXT_FLAG_NORMAL_FID)
119 handle->th_wait_submit = 1;
121 rc = llog_declare_create(env, loghandle, handle);
125 rec->lid_hdr.lrh_len = sizeof(*rec);
126 rec->lid_hdr.lrh_type = LLOG_LOGID_MAGIC;
127 rec->lid_id = loghandle->lgh_id;
128 rc = llog_declare_write_rec(env, cathandle, &rec->lid_hdr, -1,
133 rc = dt_trans_start_local(env, dt, handle);
140 rc = llog_create(env, loghandle, th);
141 /* if llog is already created, no need to initialize it */
144 } else if (rc != 0) {
145 CERROR("%s: can't create new plain llog in catalog: rc = %d\n",
146 loghandle->lgh_ctxt->loc_obd->obd_name, rc);
150 rc = llog_init_handle(env, loghandle,
151 LLOG_F_IS_PLAIN | LLOG_F_ZAP_WHEN_EMPTY,
152 &cathandle->lgh_hdr->llh_tgtuuid);
156 /* build the record for this log in the catalog */
157 rec->lid_hdr.lrh_len = sizeof(*rec);
158 rec->lid_hdr.lrh_type = LLOG_LOGID_MAGIC;
159 rec->lid_id = loghandle->lgh_id;
161 /* append the new record into catalog. The new index will be
162 * assigned to the record and updated in rec header */
163 rc = llog_write_rec(env, cathandle, &rec->lid_hdr,
164 &loghandle->u.phd.phd_cookie, LLOG_NEXT_IDX, th);
166 GOTO(out_destroy, rc);
168 CDEBUG(D_OTHER, "new plain log "DOSTID":%x for index %u of catalog"
169 DOSTID"\n", POSTID(&loghandle->lgh_id.lgl_oi),
170 loghandle->lgh_id.lgl_ogen, rec->lid_hdr.lrh_index,
171 POSTID(&cathandle->lgh_id.lgl_oi));
173 loghandle->lgh_hdr->llh_cat_idx = rec->lid_hdr.lrh_index;
175 /* limit max size of plain llog so that space can be
176 * released sooner, especially on small filesystems */
177 /* 2MB for the cases when free space hasn't been learned yet */
178 loghandle->lgh_max_size = 2 << 20;
179 dt = lu2dt_dev(cathandle->lgh_obj->do_lu.lo_dev);
180 rc = dt_statfs(env, dt, &lgi->lgi_statfs);
181 if (rc == 0 && lgi->lgi_statfs.os_bfree > 0) {
182 __u64 freespace = (lgi->lgi_statfs.os_bfree *
183 lgi->lgi_statfs.os_bsize) >> 6;
184 if (freespace < loghandle->lgh_max_size)
185 loghandle->lgh_max_size = freespace;
186 /* shouldn't be > 128MB in any case?
187 * it's 256K records of 512 bytes each */
188 if (freespace > (128 << 20))
189 loghandle->lgh_max_size = 128 << 20;
194 if (handle != NULL) {
195 handle->th_result = rc >= 0 ? 0 : rc;
196 dt_trans_stop(env, dt, handle);
201 /* to signal llog_cat_close() it shouldn't try to destroy the llog,
202 * we want to destroy it in this transaction, otherwise the object
203 * becomes an orphan */
204 loghandle->lgh_hdr->llh_flags &= ~LLOG_F_ZAP_WHEN_EMPTY;
205 /* this is to mimic full log, so another llog_cat_current_log()
206 * can skip it and ask for another onet */
207 loghandle->lgh_last_idx = LLOG_HDR_BITMAP_SIZE(llh) + 1;
208 llog_trans_destroy(env, loghandle, th);
212 /* Open an existent log handle and add it to the open list.
213 * This log handle will be closed when all of the records in it are removed.
215 * Assumes caller has already pushed us into the kernel context and is locking.
216 * We return a lock on the handle to ensure nobody yanks it from us.
218 * This takes extra reference on llog_handle via llog_handle_get() and require
219 * this reference to be put by caller using llog_handle_put()
221 int llog_cat_id2handle(const struct lu_env *env, struct llog_handle *cathandle,
222 struct llog_handle **res, struct llog_logid *logid)
224 struct llog_handle *loghandle;
230 if (cathandle == NULL)
233 fmt = cathandle->lgh_hdr->llh_flags & LLOG_F_EXT_MASK;
234 down_write(&cathandle->lgh_lock);
235 list_for_each_entry(loghandle, &cathandle->u.chd.chd_head,
237 struct llog_logid *cgl = &loghandle->lgh_id;
239 if (ostid_id(&cgl->lgl_oi) == ostid_id(&logid->lgl_oi) &&
240 ostid_seq(&cgl->lgl_oi) == ostid_seq(&logid->lgl_oi)) {
241 if (cgl->lgl_ogen != logid->lgl_ogen) {
242 CERROR("%s: log "DOSTID" generation %x != %x\n",
243 loghandle->lgh_ctxt->loc_obd->obd_name,
244 POSTID(&logid->lgl_oi), cgl->lgl_ogen,
248 loghandle->u.phd.phd_cat_handle = cathandle;
249 up_write(&cathandle->lgh_lock);
253 up_write(&cathandle->lgh_lock);
255 rc = llog_open(env, cathandle->lgh_ctxt, &loghandle, logid, NULL,
258 CERROR("%s: error opening log id "DOSTID":%x: rc = %d\n",
259 cathandle->lgh_ctxt->loc_obd->obd_name,
260 POSTID(&logid->lgl_oi), logid->lgl_ogen, rc);
264 rc = llog_init_handle(env, loghandle, LLOG_F_IS_PLAIN | fmt, NULL);
266 llog_close(env, loghandle);
271 down_write(&cathandle->lgh_lock);
272 list_add_tail(&loghandle->u.phd.phd_entry, &cathandle->u.chd.chd_head);
273 up_write(&cathandle->lgh_lock);
275 loghandle->u.phd.phd_cat_handle = cathandle;
276 loghandle->u.phd.phd_cookie.lgc_lgl = cathandle->lgh_id;
277 loghandle->u.phd.phd_cookie.lgc_index =
278 loghandle->lgh_hdr->llh_cat_idx;
281 llog_handle_get(loghandle);
286 int llog_cat_close(const struct lu_env *env, struct llog_handle *cathandle)
288 struct llog_handle *loghandle, *n;
293 list_for_each_entry_safe(loghandle, n, &cathandle->u.chd.chd_head,
295 struct llog_log_hdr *llh = loghandle->lgh_hdr;
298 /* unlink open-not-created llogs */
299 list_del_init(&loghandle->u.phd.phd_entry);
300 llh = loghandle->lgh_hdr;
301 if (loghandle->lgh_obj != NULL && llh != NULL &&
302 (llh->llh_flags & LLOG_F_ZAP_WHEN_EMPTY) &&
303 (llh->llh_count == 1)) {
304 rc = llog_destroy(env, loghandle);
306 CERROR("%s: failure destroying log during "
307 "cleanup: rc = %d\n",
308 loghandle->lgh_ctxt->loc_obd->obd_name,
311 index = loghandle->u.phd.phd_cookie.lgc_index;
312 llog_cat_cleanup(env, cathandle, NULL, index);
314 llog_close(env, loghandle);
316 /* if handle was stored in ctxt, remove it too */
317 if (cathandle->lgh_ctxt->loc_handle == cathandle)
318 cathandle->lgh_ctxt->loc_handle = NULL;
319 rc = llog_close(env, cathandle);
322 EXPORT_SYMBOL(llog_cat_close);
325 * lockdep markers for nested struct llog_handle::lgh_lock locking.
332 /** Return the currently active log handle. If the current log handle doesn't
333 * have enough space left for the current record, start a new one.
335 * If reclen is 0, we only want to know what the currently active log is,
336 * otherwise we get a lock on this log so nobody can steal our space.
338 * Assumes caller has already pushed us into the kernel context and is locking.
340 * NOTE: loghandle is write-locked upon successful return
342 static struct llog_handle *llog_cat_current_log(struct llog_handle *cathandle,
345 struct llog_handle *loghandle = NULL;
349 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_LLOG_CREATE_FAILED2)) {
350 down_write_nested(&cathandle->lgh_lock, LLOGH_CAT);
351 GOTO(next, loghandle);
354 down_read_nested(&cathandle->lgh_lock, LLOGH_CAT);
355 loghandle = cathandle->u.chd.chd_current_log;
357 struct llog_log_hdr *llh;
359 down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
360 llh = loghandle->lgh_hdr;
361 if (llh == NULL || !llog_is_full(loghandle)) {
362 up_read(&cathandle->lgh_lock);
365 up_write(&loghandle->lgh_lock);
368 up_read(&cathandle->lgh_lock);
370 /* time to use next log */
372 /* first, we have to make sure the state hasn't changed */
373 down_write_nested(&cathandle->lgh_lock, LLOGH_CAT);
374 loghandle = cathandle->u.chd.chd_current_log;
376 struct llog_log_hdr *llh;
378 down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
379 llh = loghandle->lgh_hdr;
381 if (!llog_is_full(loghandle))
382 GOTO(out_unlock, loghandle);
384 up_write(&loghandle->lgh_lock);
388 /* Sigh, the chd_next_log and chd_current_log is initialized
389 * in declare phase, and we do not serialize the catlog
390 * accessing, so it might be possible the llog creation
391 * thread (see llog_cat_declare_add_rec()) did not create
392 * llog successfully, then the following thread might
393 * meet this situation. */
394 if (IS_ERR_OR_NULL(cathandle->u.chd.chd_next_log)) {
395 CERROR("%s: next log does not exist!\n",
396 cathandle->lgh_ctxt->loc_obd->obd_name);
397 loghandle = ERR_PTR(-EIO);
398 if (cathandle->u.chd.chd_next_log == NULL) {
399 /* Store the error in chd_next_log, so
400 * the following process can get correct
402 cathandle->u.chd.chd_next_log = loghandle;
404 GOTO(out_unlock, loghandle);
407 CDEBUG(D_INODE, "use next log\n");
409 loghandle = cathandle->u.chd.chd_next_log;
410 cathandle->u.chd.chd_current_log = loghandle;
411 cathandle->u.chd.chd_next_log = NULL;
412 down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
415 up_write(&cathandle->lgh_lock);
420 static int llog_cat_update_header(const struct lu_env *env,
421 struct llog_handle *cathandle)
423 struct llog_handle *loghandle;
428 down_write(&cathandle->lgh_lock);
429 if (!cathandle->lgh_stale) {
430 up_write(&cathandle->lgh_lock);
433 list_for_each_entry(loghandle, &cathandle->u.chd.chd_head,
435 if (!llog_exist(loghandle))
438 rc = llog_read_header(env, loghandle, NULL);
440 up_write(&cathandle->lgh_lock);
444 rc = llog_read_header(env, cathandle, NULL);
446 cathandle->lgh_stale = 0;
447 up_write(&cathandle->lgh_lock);
454 /* Add a single record to the recovery log(s) using a catalog
455 * Returns as llog_write_record
457 * Assumes caller has already pushed us into the kernel context.
459 int llog_cat_add_rec(const struct lu_env *env, struct llog_handle *cathandle,
460 struct llog_rec_hdr *rec, struct llog_cookie *reccookie,
463 struct llog_handle *loghandle;
467 LASSERT(rec->lrh_len <= cathandle->lgh_ctxt->loc_chunk_size);
470 loghandle = llog_cat_current_log(cathandle, th);
471 if (IS_ERR(loghandle))
472 RETURN(PTR_ERR(loghandle));
474 /* loghandle is already locked by llog_cat_current_log() for us */
475 if (!llog_exist(loghandle)) {
476 rc = llog_cat_new_log(env, cathandle, loghandle, th);
478 up_write(&loghandle->lgh_lock);
479 /* nobody should be trying to use this llog */
480 down_write(&cathandle->lgh_lock);
481 if (cathandle->u.chd.chd_current_log == loghandle)
482 cathandle->u.chd.chd_current_log = NULL;
483 up_write(&cathandle->lgh_lock);
487 /* now let's try to add the record */
488 rc = llog_write_rec(env, loghandle, rec, reccookie, LLOG_NEXT_IDX, th);
490 CDEBUG_LIMIT(rc == -ENOSPC ? D_HA : D_ERROR,
491 "llog_write_rec %d: lh=%p\n", rc, loghandle);
492 /* -ENOSPC is returned if no empty records left
493 * and when it's lack of space on the stogage.
494 * there is no point to try again if it's the second
495 * case. many callers (like llog test) expect ENOSPC,
496 * so we preserve this error code, but look for the
497 * actual cause here */
498 if (rc == -ENOSPC && llog_is_full(loghandle))
501 up_write(&loghandle->lgh_lock);
503 if (rc == -ENOBUFS) {
506 CERROR("%s: error on 2nd llog: rc = %d\n",
507 cathandle->lgh_ctxt->loc_obd->obd_name, rc);
512 EXPORT_SYMBOL(llog_cat_add_rec);
514 int llog_cat_declare_add_rec(const struct lu_env *env,
515 struct llog_handle *cathandle,
516 struct llog_rec_hdr *rec, struct thandle *th)
518 struct llog_thread_info *lgi = llog_info(env);
519 struct llog_logid_rec *lirec = &lgi->lgi_logid;
520 struct llog_handle *loghandle, *next;
525 if (cathandle->u.chd.chd_current_log == NULL) {
526 /* declare new plain llog */
527 down_write(&cathandle->lgh_lock);
528 if (cathandle->u.chd.chd_current_log == NULL) {
529 rc = llog_open(env, cathandle->lgh_ctxt, &loghandle,
530 NULL, NULL, LLOG_OPEN_NEW);
532 cathandle->u.chd.chd_current_log = loghandle;
533 list_add_tail(&loghandle->u.phd.phd_entry,
534 &cathandle->u.chd.chd_head);
537 up_write(&cathandle->lgh_lock);
538 } else if (cathandle->u.chd.chd_next_log == NULL ||
539 IS_ERR(cathandle->u.chd.chd_next_log)) {
540 /* declare next plain llog */
541 down_write(&cathandle->lgh_lock);
542 if (cathandle->u.chd.chd_next_log == NULL ||
543 IS_ERR(cathandle->u.chd.chd_next_log)) {
544 rc = llog_open(env, cathandle->lgh_ctxt, &loghandle,
545 NULL, NULL, LLOG_OPEN_NEW);
547 cathandle->u.chd.chd_next_log = loghandle;
548 list_add_tail(&loghandle->u.phd.phd_entry,
549 &cathandle->u.chd.chd_head);
552 up_write(&cathandle->lgh_lock);
557 lirec->lid_hdr.lrh_len = sizeof(*lirec);
559 if (!llog_exist(cathandle->u.chd.chd_current_log)) {
560 if (dt_object_remote(cathandle->lgh_obj)) {
561 /* For remote operation, if we put the llog object
562 * creation in the current transaction, then the
563 * llog object will not be created on the remote
564 * target until the transaction stop, if other
565 * operations start before the transaction stop,
566 * and use the same llog object, will be dependent
567 * on the success of this transaction. So let's
568 * create the llog object synchronously here to
569 * remove the dependency. */
571 down_read_nested(&cathandle->lgh_lock, LLOGH_CAT);
572 loghandle = cathandle->u.chd.chd_current_log;
573 down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
574 if (cathandle->lgh_stale) {
575 up_write(&loghandle->lgh_lock);
576 up_read(&cathandle->lgh_lock);
577 GOTO(out, rc = -EIO);
579 if (!llog_exist(loghandle)) {
580 rc = llog_cat_new_log(env, cathandle, loghandle,
583 cathandle->lgh_stale = 1;
585 up_write(&loghandle->lgh_lock);
586 up_read(&cathandle->lgh_lock);
588 rc = llog_cat_update_header(env, cathandle);
596 rc = llog_declare_create(env,
597 cathandle->u.chd.chd_current_log, th);
600 llog_declare_write_rec(env, cathandle,
601 &lirec->lid_hdr, -1, th);
606 /* declare records in the llogs */
607 rc = llog_declare_write_rec(env, cathandle->u.chd.chd_current_log,
610 down_write(&cathandle->lgh_lock);
611 if (cathandle->lgh_stale) {
612 up_write(&cathandle->lgh_lock);
613 GOTO(out, rc = -EIO);
616 cathandle->lgh_stale = 1;
617 up_write(&cathandle->lgh_lock);
618 rc = llog_cat_update_header(env, cathandle);
626 next = cathandle->u.chd.chd_next_log;
627 if (!IS_ERR_OR_NULL(next)) {
628 if (!llog_exist(next)) {
629 if (dt_object_remote(cathandle->lgh_obj)) {
630 /* For remote operation, if we put the llog
631 * object creation in the current transaction,
632 * then the llog object will not be created on
633 * the remote target until the transaction stop,
634 * if other operations start before the
635 * transaction stop, and use the same llog
636 * object, will be dependent on the success of
637 * this transaction. So let's create the llog
638 * object synchronously here to remove the
640 down_write_nested(&cathandle->lgh_lock,
642 next = cathandle->u.chd.chd_next_log;
643 if (IS_ERR_OR_NULL(next)) {
644 /* Sigh, another thread just tried,
645 * let's fail as well */
646 up_write(&cathandle->lgh_lock);
654 down_write_nested(&next->lgh_lock, LLOGH_LOG);
655 if (!llog_exist(next)) {
656 rc = llog_cat_new_log(env, cathandle,
659 cathandle->u.chd.chd_next_log =
662 up_write(&next->lgh_lock);
663 up_write(&cathandle->lgh_lock);
667 rc = llog_declare_create(env, next, th);
668 llog_declare_write_rec(env, cathandle,
669 &lirec->lid_hdr, -1, th);
672 /* XXX: we hope for declarations made for existing llog
673 * this might be not correct with some backends
674 * where declarations are expected against specific
675 * object like ZFS with full debugging enabled */
676 /*llog_declare_write_rec(env, next, rec, -1, th);*/
681 EXPORT_SYMBOL(llog_cat_declare_add_rec);
683 int llog_cat_add(const struct lu_env *env, struct llog_handle *cathandle,
684 struct llog_rec_hdr *rec, struct llog_cookie *reccookie)
686 struct llog_ctxt *ctxt;
687 struct dt_device *dt;
688 struct thandle *th = NULL;
691 ctxt = cathandle->lgh_ctxt;
693 LASSERT(ctxt->loc_exp);
695 LASSERT(cathandle->lgh_obj != NULL);
696 dt = lu2dt_dev(cathandle->lgh_obj->do_lu.lo_dev);
698 th = dt_trans_create(env, dt);
702 rc = llog_cat_declare_add_rec(env, cathandle, rec, th);
706 rc = dt_trans_start_local(env, dt, th);
709 rc = llog_cat_add_rec(env, cathandle, rec, reccookie, th);
711 dt_trans_stop(env, dt, th);
714 EXPORT_SYMBOL(llog_cat_add);
716 /* For each cookie in the cookie array, we clear the log in-use bit and either:
717 * - the log is empty, so mark it free in the catalog header and delete it
718 * - the log is not empty, just write out the log header
720 * The cookies may be in different log files, so we need to get new logs
723 * Assumes caller has already pushed us into the kernel context.
725 int llog_cat_cancel_records(const struct lu_env *env,
726 struct llog_handle *cathandle, int count,
727 struct llog_cookie *cookies)
729 int i, index, rc = 0, failed = 0;
733 for (i = 0; i < count; i++, cookies++) {
734 struct llog_handle *loghandle;
735 struct llog_logid *lgl = &cookies->lgc_lgl;
738 rc = llog_cat_id2handle(env, cathandle, &loghandle, lgl);
740 CERROR("%s: cannot find handle for llog "DOSTID": %d\n",
741 cathandle->lgh_ctxt->loc_obd->obd_name,
742 POSTID(&lgl->lgl_oi), rc);
747 lrc = llog_cancel_rec(env, loghandle, cookies->lgc_index);
748 if (lrc == LLOG_DEL_PLAIN) { /* log has been destroyed */
749 index = loghandle->u.phd.phd_cookie.lgc_index;
750 rc = llog_cat_cleanup(env, cathandle, loghandle,
752 } else if (lrc == -ENOENT) {
753 if (rc == 0) /* ENOENT shouldn't rewrite any error */
755 } else if (lrc < 0) {
759 llog_handle_put(loghandle);
762 CERROR("%s: fail to cancel %d of %d llog-records: rc = %d\n",
763 cathandle->lgh_ctxt->loc_obd->obd_name, failed, count,
768 EXPORT_SYMBOL(llog_cat_cancel_records);
770 static int llog_cat_process_cb(const struct lu_env *env,
771 struct llog_handle *cat_llh,
772 struct llog_rec_hdr *rec, void *data)
774 struct llog_process_data *d = data;
775 struct llog_logid_rec *lir = (struct llog_logid_rec *)rec;
776 struct llog_handle *llh;
777 struct llog_log_hdr *hdr;
781 if (rec->lrh_type != LLOG_LOGID_MAGIC) {
782 CERROR("invalid record in catalog\n");
785 CDEBUG(D_HA, "processing log "DOSTID":%x at index %u of catalog "
786 DOSTID"\n", POSTID(&lir->lid_id.lgl_oi), lir->lid_id.lgl_ogen,
787 rec->lrh_index, POSTID(&cat_llh->lgh_id.lgl_oi));
789 rc = llog_cat_id2handle(env, cat_llh, &llh, &lir->lid_id);
791 CERROR("%s: cannot find handle for llog "DOSTID": %d\n",
792 cat_llh->lgh_ctxt->loc_obd->obd_name,
793 POSTID(&lir->lid_id.lgl_oi), rc);
794 if (rc == -ENOENT || rc == -ESTALE) {
795 /* After a server crash, a stub of index
796 * record in catlog could be kept, because
797 * plain log destroy + catlog index record
798 * deletion are not atomic. So we end up with
799 * an index but no actual record. Destroy the
800 * index and move on. */
801 rc = llog_cat_cleanup(env, cat_llh, NULL,
808 /* clean old empty llogs, do not consider current llog in use */
809 /* ignore remote (lgh_obj=NULL) llogs */
811 if ((hdr->llh_flags & LLOG_F_ZAP_WHEN_EMPTY) &&
812 hdr->llh_count == 1 && cat_llh->lgh_obj != NULL &&
813 llh != cat_llh->u.chd.chd_current_log) {
814 rc = llog_destroy(env, llh);
816 CERROR("%s: fail to destroy empty log: rc = %d\n",
817 llh->lgh_ctxt->loc_obd->obd_name, rc);
818 GOTO(out, rc = LLOG_DEL_PLAIN);
821 if (rec->lrh_index < d->lpd_startcat) {
822 /* Skip processing of the logs until startcat */
824 } else if (d->lpd_startidx > 0) {
825 struct llog_process_cat_data cd;
827 cd.lpcd_first_idx = d->lpd_startidx;
828 cd.lpcd_last_idx = 0;
829 rc = llog_process_or_fork(env, llh, d->lpd_cb, d->lpd_data,
831 /* Continue processing the next log from idx 0 */
834 rc = llog_process_or_fork(env, llh, d->lpd_cb, d->lpd_data,
839 /* The empty plain log was destroyed while processing */
840 if (rc == LLOG_DEL_PLAIN)
841 rc = llog_cat_cleanup(env, cat_llh, llh,
842 llh->u.phd.phd_cookie.lgc_index);
843 llog_handle_put(llh);
848 int llog_cat_process_or_fork(const struct lu_env *env,
849 struct llog_handle *cat_llh, llog_cb_t cat_cb,
850 llog_cb_t cb, void *data, int startcat,
851 int startidx, bool fork)
853 struct llog_process_data d;
854 struct llog_log_hdr *llh = cat_llh->lgh_hdr;
858 LASSERT(llh->llh_flags & LLOG_F_IS_CAT);
861 d.lpd_startcat = startcat;
862 d.lpd_startidx = startidx;
864 if (llh->llh_cat_idx >= cat_llh->lgh_last_idx &&
865 llh->llh_count > 1) {
866 struct llog_process_cat_data cd;
868 CWARN("catlog "DOSTID" crosses index zero\n",
869 POSTID(&cat_llh->lgh_id.lgl_oi));
871 cd.lpcd_first_idx = llh->llh_cat_idx;
872 cd.lpcd_last_idx = 0;
873 rc = llog_process_or_fork(env, cat_llh, cat_cb,
878 cd.lpcd_first_idx = 0;
879 cd.lpcd_last_idx = cat_llh->lgh_last_idx;
880 rc = llog_process_or_fork(env, cat_llh, cat_cb,
883 rc = llog_process_or_fork(env, cat_llh, cat_cb,
890 int llog_cat_process(const struct lu_env *env, struct llog_handle *cat_llh,
891 llog_cb_t cb, void *data, int startcat, int startidx)
893 return llog_cat_process_or_fork(env, cat_llh, llog_cat_process_cb,
894 cb, data, startcat, startidx, false);
896 EXPORT_SYMBOL(llog_cat_process);
898 static int llog_cat_size_cb(const struct lu_env *env,
899 struct llog_handle *cat_llh,
900 struct llog_rec_hdr *rec, void *data)
902 struct llog_process_data *d = data;
903 struct llog_logid_rec *lir = (struct llog_logid_rec *)rec;
904 struct llog_handle *llh;
906 __u64 *cum_size = d->lpd_data;
910 if (rec->lrh_type != LLOG_LOGID_MAGIC) {
911 CERROR("%s: invalid record in catalog, rc = %d\n",
912 cat_llh->lgh_ctxt->loc_obd->obd_name, -EINVAL);
915 CDEBUG(D_HA, "processing log "DOSTID":%x at index %u of catalog "
916 DOSTID"\n", POSTID(&lir->lid_id.lgl_oi), lir->lid_id.lgl_ogen,
917 rec->lrh_index, POSTID(&cat_llh->lgh_id.lgl_oi));
919 rc = llog_cat_id2handle(env, cat_llh, &llh, &lir->lid_id);
921 CWARN("%s: cannot find handle for llog "DOSTID": rc = %d\n",
922 cat_llh->lgh_ctxt->loc_obd->obd_name,
923 POSTID(&lir->lid_id.lgl_oi), rc);
926 size = llog_size(env, llh);
929 CDEBUG(D_INFO, "Add llog entry "DOSTID" size %llu\n",
930 POSTID(&llh->lgh_id.lgl_oi), size);
932 llog_handle_put(llh);
938 __u64 llog_cat_size(const struct lu_env *env, struct llog_handle *cat_llh)
940 __u64 size = llog_size(env, cat_llh);
942 llog_cat_process_or_fork(env, cat_llh, llog_cat_size_cb,
943 NULL, &size, 0, 0, false);
947 EXPORT_SYMBOL(llog_cat_size);
949 static int llog_cat_reverse_process_cb(const struct lu_env *env,
950 struct llog_handle *cat_llh,
951 struct llog_rec_hdr *rec, void *data)
953 struct llog_process_data *d = data;
954 struct llog_logid_rec *lir = (struct llog_logid_rec *)rec;
955 struct llog_handle *llh;
956 struct llog_log_hdr *hdr;
959 if (le32_to_cpu(rec->lrh_type) != LLOG_LOGID_MAGIC) {
960 CERROR("invalid record in catalog\n");
963 CDEBUG(D_HA, "processing log "DOSTID":%x at index %u of catalog "
964 DOSTID"\n", POSTID(&lir->lid_id.lgl_oi), lir->lid_id.lgl_ogen,
965 le32_to_cpu(rec->lrh_index), POSTID(&cat_llh->lgh_id.lgl_oi));
967 rc = llog_cat_id2handle(env, cat_llh, &llh, &lir->lid_id);
969 CERROR("%s: cannot find handle for llog "DOSTID": %d\n",
970 cat_llh->lgh_ctxt->loc_obd->obd_name,
971 POSTID(&lir->lid_id.lgl_oi), rc);
972 if (rc == -ENOENT || rc == -ESTALE) {
973 /* After a server crash, a stub of index
974 * record in catlog could be kept, because
975 * plain log destroy + catlog index record
976 * deletion are not atomic. So we end up with
977 * an index but no actual record. Destroy the
978 * index and move on. */
979 rc = llog_cat_cleanup(env, cat_llh, NULL,
986 /* clean old empty llogs, do not consider current llog in use */
988 if ((hdr->llh_flags & LLOG_F_ZAP_WHEN_EMPTY) &&
989 hdr->llh_count == 1 &&
990 llh != cat_llh->u.chd.chd_current_log) {
991 rc = llog_destroy(env, llh);
993 CERROR("%s: fail to destroy empty log: rc = %d\n",
994 llh->lgh_ctxt->loc_obd->obd_name, rc);
995 GOTO(out, rc = LLOG_DEL_PLAIN);
998 rc = llog_reverse_process(env, llh, d->lpd_cb, d->lpd_data, NULL);
1001 /* The empty plain was destroyed while processing */
1002 if (rc == LLOG_DEL_PLAIN)
1003 rc = llog_cat_cleanup(env, cat_llh, llh,
1004 llh->u.phd.phd_cookie.lgc_index);
1006 llog_handle_put(llh);
1010 int llog_cat_reverse_process(const struct lu_env *env,
1011 struct llog_handle *cat_llh,
1012 llog_cb_t cb, void *data)
1014 struct llog_process_data d;
1015 struct llog_process_cat_data cd;
1016 struct llog_log_hdr *llh = cat_llh->lgh_hdr;
1020 LASSERT(llh->llh_flags & LLOG_F_IS_CAT);
1024 if (llh->llh_cat_idx >= cat_llh->lgh_last_idx &&
1025 llh->llh_count > 1) {
1026 CWARN("catalog "DOSTID" crosses index zero\n",
1027 POSTID(&cat_llh->lgh_id.lgl_oi));
1029 cd.lpcd_first_idx = 0;
1030 cd.lpcd_last_idx = cat_llh->lgh_last_idx;
1031 rc = llog_reverse_process(env, cat_llh,
1032 llog_cat_reverse_process_cb,
1037 cd.lpcd_first_idx = le32_to_cpu(llh->llh_cat_idx);
1038 cd.lpcd_last_idx = 0;
1039 rc = llog_reverse_process(env, cat_llh,
1040 llog_cat_reverse_process_cb,
1043 rc = llog_reverse_process(env, cat_llh,
1044 llog_cat_reverse_process_cb,
1050 EXPORT_SYMBOL(llog_cat_reverse_process);
1052 static int llog_cat_set_first_idx(struct llog_handle *cathandle, int idx)
1054 struct llog_log_hdr *llh = cathandle->lgh_hdr;
1059 bitmap_size = LLOG_HDR_BITMAP_SIZE(llh);
1061 * The llh_cat_idx equals to the first used index minus 1
1062 * so if we canceled the first index then llh_cat_idx
1065 if (llh->llh_cat_idx == (idx - 1)) {
1066 llh->llh_cat_idx = idx;
1068 while (idx != cathandle->lgh_last_idx) {
1069 idx = (idx + 1) % bitmap_size;
1070 if (!ext2_test_bit(idx, LLOG_HDR_BITMAP(llh))) {
1071 /* update llh_cat_idx for each unset bit,
1072 * expecting the next one is set */
1073 llh->llh_cat_idx = idx;
1074 } else if (idx == 0) {
1075 /* skip header bit */
1076 llh->llh_cat_idx = 0;
1079 /* the first index is found */
1084 CDEBUG(D_RPCTRACE, "Set catlog "DOSTID" first idx %u,"
1085 " (last_idx %u)\n", POSTID(&cathandle->lgh_id.lgl_oi),
1086 llh->llh_cat_idx, cathandle->lgh_last_idx);
1092 /* Cleanup deleted plain llog traces from catalog */
1093 int llog_cat_cleanup(const struct lu_env *env, struct llog_handle *cathandle,
1094 struct llog_handle *loghandle, int index)
1099 if (loghandle != NULL) {
1100 /* remove destroyed llog from catalog list and
1101 * chd_current_log variable */
1102 down_write(&cathandle->lgh_lock);
1103 if (cathandle->u.chd.chd_current_log == loghandle)
1104 cathandle->u.chd.chd_current_log = NULL;
1105 list_del_init(&loghandle->u.phd.phd_entry);
1106 up_write(&cathandle->lgh_lock);
1107 LASSERT(index == loghandle->u.phd.phd_cookie.lgc_index);
1108 /* llog was opened and keep in a list, close it now */
1109 llog_close(env, loghandle);
1112 /* do not attempt to cleanup on-disk llog if on client side */
1113 if (cathandle->lgh_obj == NULL)
1116 /* remove plain llog entry from catalog by index */
1117 llog_cat_set_first_idx(cathandle, index);
1118 rc = llog_cancel_rec(env, cathandle, index);
1120 CDEBUG(D_HA, "cancel plain log at index"
1121 " %u of catalog "DOSTID"\n",
1122 index, POSTID(&cathandle->lgh_id.lgl_oi));