4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
31 * lustre/obdclass/llog_cat.c
33 * OST<->MDS recovery logging infrastructure.
35 * Invariants in implementation:
36 * - we do not share logs among different OST<->MDS connections, so that
37 * if an OST or MDS fails it need only look at log(s) relevant to itself
39 * Author: Andreas Dilger <adilger@clusterfs.com>
40 * Author: Alexey Zhuravlev <alexey.zhuravlev@intel.com>
41 * Author: Mikhail Pershin <mike.pershin@intel.com>
44 #define DEBUG_SUBSYSTEM S_LOG
47 #include <obd_class.h>
49 #include "llog_internal.h"
53 * lockdep markers for nested struct llog_handle::lgh_lock locking.
60 /* Create a new log handle and add it to the open list.
61 * This log handle will be closed when all of the records in it are removed.
63 * Assumes caller has already pushed us into the kernel context and is locking.
65 static int llog_cat_new_log(const struct lu_env *env,
66 struct llog_handle *cathandle,
67 struct llog_handle *loghandle,
70 struct llog_thread_info *lgi = llog_info(env);
71 struct llog_logid_rec *rec = &lgi->lgi_logid;
72 struct thandle *handle = NULL;
73 struct dt_device *dt = NULL;
74 struct llog_log_hdr *llh = cathandle->lgh_hdr;
79 index = (cathandle->lgh_last_idx + 1) %
80 (OBD_FAIL_PRECHECK(OBD_FAIL_CAT_RECORDS) ? (cfs_fail_val + 1) :
81 LLOG_HDR_BITMAP_SIZE(llh));
83 /* check that new llog index will not overlap with the first one.
84 * - llh_cat_idx is the index just before the first/oldest still in-use
86 * - lgh_last_idx is the last/newest used index in catalog
88 * When catalog is not wrapped yet then lgh_last_idx is always larger
89 * than llh_cat_idx. After the wrap around lgh_last_idx re-starts
90 * from 0 and llh_cat_idx becomes the upper limit for it
92 * Check if catalog has already wrapped around or not by comparing
93 * last_idx and cat_idx */
94 if ((index == llh->llh_cat_idx + 1 && llh->llh_count > 1) ||
95 (index == 0 && llh->llh_cat_idx == 0)) {
96 if (cathandle->lgh_name == NULL) {
97 CWARN("%s: there are no more free slots in catalog "DFID"\n",
98 loghandle2name(loghandle),
99 PLOGID(&cathandle->lgh_id));
101 CWARN("%s: there are no more free slots in catalog %s\n",
102 loghandle2name(loghandle), cathandle->lgh_name);
107 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_LLOG_CREATE_FAILED))
110 if (loghandle->lgh_hdr != NULL) {
111 /* If llog object is remote and creation is failed, lgh_hdr
112 * might be left over here, free it first */
113 LASSERT(!llog_exist(loghandle));
114 OBD_FREE_LARGE(loghandle->lgh_hdr, loghandle->lgh_hdr_size);
115 loghandle->lgh_hdr = NULL;
119 dt = lu2dt_dev(cathandle->lgh_obj->do_lu.lo_dev);
121 handle = dt_trans_create(env, dt);
123 RETURN(PTR_ERR(handle));
125 /* Create update llog object synchronously, which
126 * happens during inialization process see
127 * lod_sub_prep_llog(), to make sure the update
128 * llog object is created before corss-MDT writing
129 * updates into the llog object */
130 if (cathandle->lgh_ctxt->loc_flags & LLOG_CTXT_FLAG_NORMAL_FID)
133 handle->th_wait_submit = 1;
135 rc = llog_declare_create(env, loghandle, handle);
139 rec->lid_hdr.lrh_len = sizeof(*rec);
140 rec->lid_hdr.lrh_type = LLOG_LOGID_MAGIC;
141 rec->lid_id = loghandle->lgh_id;
142 rc = llog_declare_write_rec(env, cathandle, &rec->lid_hdr, -1,
147 rc = dt_trans_start_local(env, dt, handle);
154 rc = llog_create(env, loghandle, th);
155 /* if llog is already created, no need to initialize it */
158 } else if (rc != 0) {
159 CERROR("%s: can't create new plain llog in catalog: rc = %d\n",
160 loghandle2name(loghandle), rc);
164 rc = llog_init_handle(env, loghandle,
165 LLOG_F_IS_PLAIN | LLOG_F_ZAP_WHEN_EMPTY,
166 &cathandle->lgh_hdr->llh_tgtuuid);
170 /* build the record for this log in the catalog */
171 rec->lid_hdr.lrh_len = sizeof(*rec);
172 rec->lid_hdr.lrh_type = LLOG_LOGID_MAGIC;
173 rec->lid_id = loghandle->lgh_id;
175 /* append the new record into catalog. The new index will be
176 * assigned to the record and updated in rec header */
177 rc = llog_write_rec(env, cathandle, &rec->lid_hdr,
178 &loghandle->u.phd.phd_cookie, LLOG_NEXT_IDX, th);
180 GOTO(out_destroy, rc);
182 CDEBUG(D_OTHER, "new plain log "DFID".%u of catalog "DFID"\n",
183 PLOGID(&loghandle->lgh_id), rec->lid_hdr.lrh_index,
184 PLOGID(&cathandle->lgh_id));
186 loghandle->lgh_hdr->llh_cat_idx = rec->lid_hdr.lrh_index;
188 /* limit max size of plain llog so that space can be
189 * released sooner, especially on small filesystems */
190 /* 2MB for the cases when free space hasn't been learned yet */
191 loghandle->lgh_max_size = 2 << 20;
192 dt = lu2dt_dev(cathandle->lgh_obj->do_lu.lo_dev);
193 rc = dt_statfs(env, dt, &lgi->lgi_statfs);
194 if (rc == 0 && lgi->lgi_statfs.os_bfree > 0) {
195 __u64 freespace = (lgi->lgi_statfs.os_bfree *
196 lgi->lgi_statfs.os_bsize) >> 6;
197 if (freespace < loghandle->lgh_max_size)
198 loghandle->lgh_max_size = freespace;
199 /* shouldn't be > 128MB in any case?
200 * it's 256K records of 512 bytes each */
201 if (freespace > (128 << 20))
202 loghandle->lgh_max_size = 128 << 20;
204 if (unlikely(OBD_FAIL_PRECHECK(OBD_FAIL_PLAIN_RECORDS) ||
205 OBD_FAIL_PRECHECK(OBD_FAIL_CATALOG_FULL_CHECK))) {
206 // limit the numer of plain records for test
207 loghandle->lgh_max_size = loghandle->lgh_hdr_size +
214 if (handle != NULL) {
215 handle->th_result = rc >= 0 ? 0 : rc;
216 dt_trans_stop(env, dt, handle);
221 /* to signal llog_cat_close() it shouldn't try to destroy the llog,
222 * we want to destroy it in this transaction, otherwise the object
223 * becomes an orphan */
224 loghandle->lgh_hdr->llh_flags &= ~LLOG_F_ZAP_WHEN_EMPTY;
225 /* this is to mimic full log, so another llog_cat_current_log()
226 * can skip it and ask for another onet */
227 loghandle->lgh_last_idx = LLOG_HDR_BITMAP_SIZE(loghandle->lgh_hdr) + 1;
228 llog_trans_destroy(env, loghandle, th);
230 dt_trans_stop(env, dt, handle);
234 static int llog_cat_refresh(const struct lu_env *env,
235 struct llog_handle *cathandle)
237 struct llog_handle *loghandle;
240 down_write(&cathandle->lgh_lock);
241 list_for_each_entry(loghandle, &cathandle->u.chd.chd_head,
243 if (!llog_exist(loghandle))
246 down_write(&loghandle->lgh_lock);
247 rc = llog_read_header(env, loghandle, NULL);
248 up_write(&loghandle->lgh_lock);
253 rc = llog_read_header(env, cathandle, NULL);
255 up_write(&cathandle->lgh_lock);
261 * prepare current/next log for catalog.
263 * if \a *ploghandle is NULL, open it, and declare create, NB, if \a
264 * *ploghandle is remote, create it synchronously here, see comments
267 * \a cathandle->lgh_lock is down_read-ed, it gets down_write-ed if \a
268 * *ploghandle has to be opened.
270 static int llog_cat_prep_log(const struct lu_env *env,
271 struct llog_handle *cathandle,
272 struct llog_handle **ploghandle,
281 if (IS_ERR_OR_NULL(*ploghandle)) {
282 up_read(&cathandle->lgh_lock);
283 down_write(&cathandle->lgh_lock);
285 if (IS_ERR_OR_NULL(*ploghandle)) {
286 struct llog_handle *loghandle;
288 rc = llog_open(env, cathandle->lgh_ctxt, &loghandle,
289 NULL, NULL, LLOG_OPEN_NEW);
291 *ploghandle = loghandle;
292 list_add_tail(&loghandle->u.phd.phd_entry,
293 &cathandle->u.chd.chd_head);
300 rc = llog_exist(*ploghandle);
306 if (dt_object_remote(cathandle->lgh_obj)) {
307 down_write_nested(&(*ploghandle)->lgh_lock, LLOGH_LOG);
308 if (!llog_exist(*ploghandle)) {
309 /* For remote operation, if we put the llog object
310 * creation in the current transaction, then the
311 * llog object will not be created on the remote
312 * target until the transaction stop, if other
313 * operations start before the transaction stop,
314 * and use the same llog object, will be dependent
315 * on the success of this transaction. So let's
316 * create the llog object synchronously here to
317 * remove the dependency. */
318 rc = llog_cat_new_log(env, cathandle, *ploghandle,
321 up_write(&(*ploghandle)->lgh_lock);
323 up_write(&cathandle->lgh_lock);
325 up_read(&cathandle->lgh_lock);
327 rc = llog_cat_refresh(env, cathandle);
328 down_read_nested(&cathandle->lgh_lock,
332 /* *ploghandle might become NULL, restart */
336 up_write(&(*ploghandle)->lgh_lock);
338 struct llog_thread_info *lgi = llog_info(env);
339 struct llog_logid_rec *lirec = &lgi->lgi_logid;
341 rc = llog_declare_create(env, *ploghandle, th);
345 lirec->lid_hdr.lrh_len = sizeof(*lirec);
346 rc = llog_declare_write_rec(env, cathandle, &lirec->lid_hdr, -1,
352 up_write(&cathandle->lgh_lock);
353 down_read_nested(&cathandle->lgh_lock, LLOGH_CAT);
360 /* Open an existent log handle and add it to the open list.
361 * This log handle will be closed when all of the records in it are removed.
363 * Assumes caller has already pushed us into the kernel context and is locking.
364 * We return a lock on the handle to ensure nobody yanks it from us.
366 * This takes extra reference on llog_handle via llog_handle_get() and require
367 * this reference to be put by caller using llog_handle_put()
369 int llog_cat_id2handle(const struct lu_env *env, struct llog_handle *cathandle,
370 struct llog_handle **res, struct llog_logid *logid)
372 struct llog_handle *loghandle;
378 if (cathandle == NULL)
381 fmt = cathandle->lgh_hdr->llh_flags & LLOG_F_EXT_MASK;
382 down_write(&cathandle->lgh_lock);
383 list_for_each_entry(loghandle, &cathandle->u.chd.chd_head,
385 struct llog_logid *cgl = &loghandle->lgh_id;
387 if (ostid_id(&cgl->lgl_oi) == ostid_id(&logid->lgl_oi) &&
388 ostid_seq(&cgl->lgl_oi) == ostid_seq(&logid->lgl_oi)) {
389 *res = llog_handle_get(loghandle);
391 CERROR("%s: log "DFID" refcount is zero!\n",
392 loghandle2name(loghandle),
396 loghandle->u.phd.phd_cat_handle = cathandle;
397 up_write(&cathandle->lgh_lock);
401 up_write(&cathandle->lgh_lock);
403 rc = llog_open(env, cathandle->lgh_ctxt, &loghandle, logid, NULL,
406 CERROR("%s: error opening log id "DFID": rc = %d\n",
407 loghandle2name(cathandle), PLOGID(logid), rc);
411 rc = llog_init_handle(env, loghandle, LLOG_F_IS_PLAIN |
412 LLOG_F_ZAP_WHEN_EMPTY | fmt, NULL);
414 llog_close(env, loghandle);
419 *res = llog_handle_get(loghandle);
421 down_write(&cathandle->lgh_lock);
422 list_add_tail(&loghandle->u.phd.phd_entry, &cathandle->u.chd.chd_head);
423 up_write(&cathandle->lgh_lock);
425 loghandle->u.phd.phd_cat_handle = cathandle;
426 loghandle->u.phd.phd_cookie.lgc_lgl = cathandle->lgh_id;
427 loghandle->u.phd.phd_cookie.lgc_index =
428 loghandle->lgh_hdr->llh_cat_idx;
432 int llog_cat_close(const struct lu_env *env, struct llog_handle *cathandle)
434 struct llog_handle *loghandle, *n;
439 list_for_each_entry_safe(loghandle, n, &cathandle->u.chd.chd_head,
441 struct llog_log_hdr *llh = loghandle->lgh_hdr;
444 /* unlink open-not-created llogs */
445 list_del_init(&loghandle->u.phd.phd_entry);
446 llh = loghandle->lgh_hdr;
447 if (loghandle->lgh_obj != NULL && llh != NULL &&
448 (llh->llh_flags & LLOG_F_ZAP_WHEN_EMPTY) &&
449 (llh->llh_count == 1)) {
450 rc = llog_destroy(env, loghandle);
452 CERROR("%s: failure destroying log during "
453 "cleanup: rc = %d\n",
454 loghandle2name(loghandle), rc);
456 index = loghandle->u.phd.phd_cookie.lgc_index;
457 llog_cat_cleanup(env, cathandle, NULL, index);
459 llog_close(env, loghandle);
461 /* if handle was stored in ctxt, remove it too */
462 if (cathandle->lgh_ctxt->loc_handle == cathandle)
463 cathandle->lgh_ctxt->loc_handle = NULL;
464 rc = llog_close(env, cathandle);
467 EXPORT_SYMBOL(llog_cat_close);
469 /** Return the currently active log handle. If the current log handle doesn't
470 * have enough space left for the current record, start a new one.
472 * If reclen is 0, we only want to know what the currently active log is,
473 * otherwise we get a lock on this log so nobody can steal our space.
475 * Assumes caller has already pushed us into the kernel context and is locking.
477 * NOTE: loghandle is write-locked upon successful return
479 static struct llog_handle *llog_cat_current_log(struct llog_handle *cathandle,
482 struct llog_handle *loghandle = NULL;
486 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_LLOG_CREATE_FAILED2)) {
487 down_write_nested(&cathandle->lgh_lock, LLOGH_CAT);
488 GOTO(next, loghandle);
491 down_read_nested(&cathandle->lgh_lock, LLOGH_CAT);
492 loghandle = cathandle->u.chd.chd_current_log;
494 struct llog_log_hdr *llh;
496 down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
497 llh = loghandle->lgh_hdr;
498 if (llh == NULL || !llog_is_full(loghandle)) {
499 up_read(&cathandle->lgh_lock);
502 up_write(&loghandle->lgh_lock);
505 up_read(&cathandle->lgh_lock);
507 /* time to use next log */
509 /* first, we have to make sure the state hasn't changed */
510 down_write_nested(&cathandle->lgh_lock, LLOGH_CAT);
511 loghandle = cathandle->u.chd.chd_current_log;
513 struct llog_log_hdr *llh;
515 down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
516 llh = loghandle->lgh_hdr;
517 if (llh == NULL || !llog_is_full(loghandle))
518 GOTO(out_unlock, loghandle);
520 up_write(&loghandle->lgh_lock);
524 /* Sigh, the chd_next_log and chd_current_log is initialized
525 * in declare phase, and we do not serialize the catlog
526 * accessing, so it might be possible the llog creation
527 * thread (see llog_cat_declare_add_rec()) did not create
528 * llog successfully, then the following thread might
529 * meet this situation. */
530 if (IS_ERR_OR_NULL(cathandle->u.chd.chd_next_log)) {
531 CERROR("%s: next log does not exist!\n",
532 loghandle2name(cathandle));
533 loghandle = ERR_PTR(-EIO);
534 if (cathandle->u.chd.chd_next_log == NULL) {
535 /* Store the error in chd_next_log, so
536 * the following process can get correct
538 cathandle->u.chd.chd_next_log = loghandle;
540 GOTO(out_unlock, loghandle);
543 CDEBUG(D_INODE, "use next log\n");
545 loghandle = cathandle->u.chd.chd_next_log;
546 cathandle->u.chd.chd_current_log = loghandle;
547 cathandle->u.chd.chd_next_log = NULL;
548 down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
551 up_write(&cathandle->lgh_lock);
556 /* Add a single record to the recovery log(s) using a catalog
557 * Returns as llog_write_record
559 * Assumes caller has already pushed us into the kernel context.
561 int llog_cat_add_rec(const struct lu_env *env, struct llog_handle *cathandle,
562 struct llog_rec_hdr *rec, struct llog_cookie *reccookie,
565 struct llog_handle *loghandle;
569 LASSERT(rec->lrh_len <= cathandle->lgh_ctxt->loc_chunk_size);
572 loghandle = llog_cat_current_log(cathandle, th);
573 if (IS_ERR(loghandle))
574 RETURN(PTR_ERR(loghandle));
576 /* loghandle is already locked by llog_cat_current_log() for us */
577 if (!llog_exist(loghandle)) {
578 rc = llog_cat_new_log(env, cathandle, loghandle, th);
580 up_write(&loghandle->lgh_lock);
581 /* nobody should be trying to use this llog */
582 down_write(&cathandle->lgh_lock);
583 if (cathandle->u.chd.chd_current_log == loghandle)
584 cathandle->u.chd.chd_current_log = NULL;
585 up_write(&cathandle->lgh_lock);
589 /* now let's try to add the record */
590 rc = llog_write_rec(env, loghandle, rec, reccookie, LLOG_NEXT_IDX, th);
592 CDEBUG_LIMIT(rc == -ENOSPC ? D_HA : D_ERROR,
593 "llog_write_rec %d: lh=%p\n", rc, loghandle);
594 /* -ENOSPC is returned if no empty records left
595 * and when it's lack of space on the stogage.
596 * there is no point to try again if it's the second
597 * case. many callers (like llog test) expect ENOSPC,
598 * so we preserve this error code, but look for the
599 * actual cause here */
600 if (rc == -ENOSPC && llog_is_full(loghandle))
603 up_write(&loghandle->lgh_lock);
605 if (rc == -ENOBUFS) {
608 CERROR("%s: error on 2nd llog: rc = %d\n",
609 loghandle2name(cathandle), rc);
614 EXPORT_SYMBOL(llog_cat_add_rec);
616 int llog_cat_declare_add_rec(const struct lu_env *env,
617 struct llog_handle *cathandle,
618 struct llog_rec_hdr *rec, struct thandle *th)
625 down_read_nested(&cathandle->lgh_lock, LLOGH_CAT);
626 rc = llog_cat_prep_log(env, cathandle,
627 &cathandle->u.chd.chd_current_log, th);
631 rc = llog_cat_prep_log(env, cathandle, &cathandle->u.chd.chd_next_log,
636 rc = llog_declare_write_rec(env, cathandle->u.chd.chd_current_log,
638 if (rc == -ESTALE && dt_object_remote(cathandle->lgh_obj)) {
639 up_read(&cathandle->lgh_lock);
640 rc = llog_cat_refresh(env, cathandle);
648 * XXX: we hope for declarations made for existing llog this might be
649 * not correct with some backends where declarations are expected
650 * against specific object like ZFS with full debugging enabled.
652 rc = llog_declare_write_rec(env, cathandle->u.chd.chd_next_log, rec, -1,
656 up_read(&cathandle->lgh_lock);
659 EXPORT_SYMBOL(llog_cat_declare_add_rec);
661 int llog_cat_add(const struct lu_env *env, struct llog_handle *cathandle,
662 struct llog_rec_hdr *rec, struct llog_cookie *reccookie)
664 struct llog_ctxt *ctxt;
665 struct dt_device *dt;
666 struct thandle *th = NULL;
669 ctxt = cathandle->lgh_ctxt;
671 LASSERT(ctxt->loc_exp);
673 LASSERT(cathandle->lgh_obj != NULL);
674 dt = lu2dt_dev(cathandle->lgh_obj->do_lu.lo_dev);
676 th = dt_trans_create(env, dt);
680 rc = llog_cat_declare_add_rec(env, cathandle, rec, th);
684 rc = dt_trans_start_local(env, dt, th);
687 rc = llog_cat_add_rec(env, cathandle, rec, reccookie, th);
689 dt_trans_stop(env, dt, th);
692 EXPORT_SYMBOL(llog_cat_add);
694 int llog_cat_cancel_arr_rec(const struct lu_env *env,
695 struct llog_handle *cathandle,
696 struct llog_logid *lgl, int count, int *index)
698 struct llog_handle *loghandle;
702 rc = llog_cat_id2handle(env, cathandle, &loghandle, lgl);
704 CDEBUG(D_HA, "%s: can't find llog handle for "DFID": rc = %d\n",
705 loghandle2name(cathandle), PLOGID(lgl), rc);
709 if ((cathandle->lgh_ctxt->loc_flags &
710 LLOG_CTXT_FLAG_NORMAL_FID) && !llog_exist(loghandle)) {
711 /* For update log, some of loghandles of cathandle
712 * might not exist because remote llog creation might
713 * be failed, so let's skip the record cancellation
714 * for these non-exist llogs.
717 CDEBUG(D_HA, "%s: llog "DFID" does not exist: rc = %d\n",
718 loghandle2name(cathandle), PLOGID(lgl), rc);
719 llog_handle_put(env, loghandle);
723 rc = llog_cancel_arr_rec(env, loghandle, count, index);
724 if (rc == LLOG_DEL_PLAIN) { /* log has been destroyed */
727 cat_index = loghandle->u.phd.phd_cookie.lgc_index;
728 rc = llog_cat_cleanup(env, cathandle, loghandle, cat_index);
730 CERROR("%s: fail to cancel catalog record: rc = %d\n",
731 loghandle2name(cathandle), rc);
735 llog_handle_put(env, loghandle);
738 CERROR("%s: fail to cancel %d llog-records: rc = %d\n",
739 loghandle2name(cathandle), count, rc);
743 EXPORT_SYMBOL(llog_cat_cancel_arr_rec);
745 /* For each cookie in the cookie array, we clear the log in-use bit and either:
746 * - the log is empty, so mark it free in the catalog header and delete it
747 * - the log is not empty, just write out the log header
749 * The cookies may be in different log files, so we need to get new logs
752 * Assumes caller has already pushed us into the kernel context.
754 int llog_cat_cancel_records(const struct lu_env *env,
755 struct llog_handle *cathandle, int count,
756 struct llog_cookie *cookies)
758 int i, rc = 0, failed = 0;
762 for (i = 0; i < count; i++, cookies++) {
765 lrc = llog_cat_cancel_arr_rec(env, cathandle, &cookies->lgc_lgl,
766 1, &cookies->lgc_index);
774 CERROR("%s: fail to cancel %d of %d llog-records: rc = %d\n",
775 loghandle2name(cathandle), failed, count, rc);
778 EXPORT_SYMBOL(llog_cat_cancel_records);
780 static int llog_cat_process_common(const struct lu_env *env,
781 struct llog_handle *cat_llh,
782 struct llog_rec_hdr *rec,
783 struct llog_handle **llhp)
785 struct llog_logid_rec *lir = container_of(rec, typeof(*lir), lid_hdr);
786 struct llog_log_hdr *hdr;
790 if (rec->lrh_type != le32_to_cpu(LLOG_LOGID_MAGIC)) {
792 CWARN("%s: invalid record in catalog "DFID": rc = %d\n",
793 loghandle2name(cat_llh), PLOGID(&cat_llh->lgh_id), rc);
796 CDEBUG(D_HA, "processing log "DFID" at index %u of catalog "DFID"\n",
797 PLOGID(&lir->lid_id), le32_to_cpu(rec->lrh_index),
798 PLOGID(&cat_llh->lgh_id));
800 rc = llog_cat_id2handle(env, cat_llh, llhp, &lir->lid_id);
802 /* After a server crash, a stub of index record in catlog could
803 * be kept, because plain log destroy + catlog index record
804 * deletion are not atomic. So we end up with an index but no
805 * actual record. Destroy the index and move on. */
806 if (rc == -ENOENT || rc == -ESTALE)
807 rc = LLOG_DEL_RECORD;
809 CWARN("%s: can't find llog handle "DFID": rc = %d\n",
810 loghandle2name(cat_llh), PLOGID(&lir->lid_id),
816 /* clean old empty llogs, do not consider current llog in use */
817 /* ignore remote (lgh_obj == NULL) llogs */
818 hdr = (*llhp)->lgh_hdr;
819 if ((hdr->llh_flags & LLOG_F_ZAP_WHEN_EMPTY) &&
820 hdr->llh_count == 1 && cat_llh->lgh_obj != NULL &&
821 *llhp != cat_llh->u.chd.chd_current_log &&
822 *llhp != cat_llh->u.chd.chd_next_log) {
823 rc = llog_destroy(env, *llhp);
825 CWARN("%s: can't destroy empty log "DFID": rc = %d\n",
826 loghandle2name((*llhp)), PLOGID(&lir->lid_id),
834 static int llog_cat_process_cb(const struct lu_env *env,
835 struct llog_handle *cat_llh,
836 struct llog_rec_hdr *rec, void *data)
838 struct llog_process_data *d = data;
839 struct llog_handle *llh = NULL;
843 rc = llog_cat_process_common(env, cat_llh, rec, &llh);
847 if (rec->lrh_index < d->lpd_startcat) {
848 /* Skip processing of the logs until startcat */
850 } else if (d->lpd_startidx > 0) {
851 struct llog_process_cat_data cd;
853 cd.lpcd_read_mode = LLOG_READ_MODE_NORMAL;
854 cd.lpcd_first_idx = d->lpd_startidx;
855 cd.lpcd_last_idx = 0;
856 rc = llog_process_or_fork(env, llh, d->lpd_cb, d->lpd_data,
858 /* Continue processing the next log from idx 0 */
861 rc = llog_process_or_fork(env, llh, d->lpd_cb, d->lpd_data,
864 if (rc == -ENOENT && (cat_llh->lgh_hdr->llh_flags & LLOG_F_RM_ON_ERR)) {
866 * plain llog is reported corrupted, so better to just remove
867 * it if the caller is fine with that.
869 CERROR("%s: remove corrupted/missing llog "DFID"\n",
870 loghandle2name(cat_llh), PLOGID(&llh->lgh_id));
875 /* The empty plain log was destroyed while processing */
876 if (rc == LLOG_DEL_PLAIN || rc == LLOG_DEL_RECORD)
877 /* clear wrong catalog entry */
878 rc = llog_cat_cleanup(env, cat_llh, llh, rec->lrh_index);
879 else if (rc == LLOG_SKIP_PLAIN)
880 /* processing callback ask to skip the llog -> continue */
884 llog_handle_put(env, llh);
889 int llog_cat_process_or_fork(const struct lu_env *env,
890 struct llog_handle *cat_llh, llog_cb_t cat_cb,
891 llog_cb_t cb, void *data, int startcat,
892 int startidx, bool fork)
894 struct llog_process_data d;
895 struct llog_log_hdr *llh = cat_llh->lgh_hdr;
900 LASSERT(llh->llh_flags & LLOG_F_IS_CAT);
903 d.lpd_startcat = (startcat == LLOG_CAT_FIRST ? 0 : startcat);
904 d.lpd_startidx = startidx;
906 if (llh->llh_cat_idx >= cat_llh->lgh_last_idx &&
907 llh->llh_count > 1) {
908 struct llog_process_cat_data cd = {
909 .lpcd_read_mode = LLOG_READ_MODE_NORMAL
912 CWARN("%s: catlog "DFID" crosses index zero\n",
913 loghandle2name(cat_llh), PLOGID(&cat_llh->lgh_id));
914 /*startcat = 0 is default value for general processing */
915 if ((startcat != LLOG_CAT_FIRST &&
916 startcat >= llh->llh_cat_idx) || !startcat) {
917 /* processing the catalog part at the end */
918 cd.lpcd_first_idx = (startcat ? startcat :
920 if (OBD_FAIL_PRECHECK(OBD_FAIL_CAT_RECORDS))
921 cd.lpcd_last_idx = cfs_fail_val;
923 cd.lpcd_last_idx = 0;
924 rc = llog_process_or_fork(env, cat_llh, cat_cb,
926 /* Reset the startcat becasue it has already reached
934 /* processing the catalog part at the begining */
935 cd.lpcd_first_idx = (startcat == LLOG_CAT_FIRST) ? 0 : startcat;
936 /* Note, the processing will stop at the lgh_last_idx value,
937 * and it could be increased during processing. So records
938 * between current lgh_last_idx and lgh_last_idx in future
939 * would left unprocessed.
941 cd.lpcd_last_idx = cat_llh->lgh_last_idx;
942 rc = llog_process_or_fork(env, cat_llh, cat_cb,
945 rc = llog_process_or_fork(env, cat_llh, cat_cb,
951 EXPORT_SYMBOL(llog_cat_process_or_fork);
953 int llog_cat_process(const struct lu_env *env, struct llog_handle *cat_llh,
954 llog_cb_t cb, void *data, int startcat, int startidx)
956 return llog_cat_process_or_fork(env, cat_llh, llog_cat_process_cb,
957 cb, data, startcat, startidx, false);
959 EXPORT_SYMBOL(llog_cat_process);
961 static int llog_cat_size_cb(const struct lu_env *env,
962 struct llog_handle *cat_llh,
963 struct llog_rec_hdr *rec, void *data)
965 struct llog_process_data *d = data;
966 struct llog_handle *llh = NULL;
967 __u64 *cum_size = d->lpd_data;
972 rc = llog_cat_process_common(env, cat_llh, rec, &llh);
974 if (rc == LLOG_DEL_PLAIN) {
975 /* empty log was deleted, don't count it */
976 rc = llog_cat_cleanup(env, cat_llh, llh,
977 llh->u.phd.phd_cookie.lgc_index);
978 } else if (rc == LLOG_DEL_RECORD) {
979 /* clear wrong catalog entry */
980 rc = llog_cat_cleanup(env, cat_llh, NULL, rec->lrh_index);
982 size = llog_size(env, llh);
985 CDEBUG(D_INFO, "Add llog entry "DFID" size=%llu, tot=%llu\n",
986 PLOGID(&llh->lgh_id), size, *cum_size);
990 llog_handle_put(env, llh);
995 __u64 llog_cat_size(const struct lu_env *env, struct llog_handle *cat_llh)
997 __u64 size = llog_size(env, cat_llh);
999 llog_cat_process_or_fork(env, cat_llh, llog_cat_size_cb,
1000 NULL, &size, 0, 0, false);
1004 EXPORT_SYMBOL(llog_cat_size);
1006 /* currently returns the number of "free" entries in catalog,
1007 * ie the available entries for a new plain LLOG file creation,
1008 * even if catalog has wrapped
1010 __u32 llog_cat_free_space(struct llog_handle *cat_llh)
1012 /* simulate almost full Catalog */
1013 if (OBD_FAIL_CHECK(OBD_FAIL_CAT_FREE_RECORDS))
1014 return cfs_fail_val;
1016 if (cat_llh->lgh_hdr->llh_count == 1)
1017 return LLOG_HDR_BITMAP_SIZE(cat_llh->lgh_hdr) - 1;
1019 if (cat_llh->lgh_last_idx > cat_llh->lgh_hdr->llh_cat_idx)
1020 return LLOG_HDR_BITMAP_SIZE(cat_llh->lgh_hdr) - 1 +
1021 cat_llh->lgh_hdr->llh_cat_idx - cat_llh->lgh_last_idx;
1023 /* catalog is presently wrapped */
1024 return cat_llh->lgh_hdr->llh_cat_idx - cat_llh->lgh_last_idx;
1026 EXPORT_SYMBOL(llog_cat_free_space);
1028 static int llog_cat_reverse_process_cb(const struct lu_env *env,
1029 struct llog_handle *cat_llh,
1030 struct llog_rec_hdr *rec, void *data)
1032 struct llog_process_data *d = data;
1033 struct llog_handle *llh;
1037 rc = llog_cat_process_common(env, cat_llh, rec, &llh);
1039 /* The empty plain log was destroyed while processing */
1040 if (rc == LLOG_DEL_PLAIN) {
1041 rc = llog_cat_cleanup(env, cat_llh, llh,
1042 llh->u.phd.phd_cookie.lgc_index);
1043 } else if (rc == LLOG_DEL_RECORD) {
1044 /* clear wrong catalog entry */
1045 rc = llog_cat_cleanup(env, cat_llh, NULL, rec->lrh_index);
1046 } else if (rc == LLOG_SKIP_PLAIN) {
1047 /* processing callback ask to skip the llog -> continue */
1053 rc = llog_reverse_process(env, llh, d->lpd_cb, d->lpd_data, NULL);
1055 /* The empty plain was destroyed while processing */
1056 if (rc == LLOG_DEL_PLAIN)
1057 rc = llog_cat_cleanup(env, cat_llh, llh,
1058 llh->u.phd.phd_cookie.lgc_index);
1060 llog_handle_put(env, llh);
1064 int llog_cat_reverse_process(const struct lu_env *env,
1065 struct llog_handle *cat_llh,
1066 llog_cb_t cb, void *data)
1068 struct llog_process_data d;
1069 struct llog_process_cat_data cd;
1070 struct llog_log_hdr *llh = cat_llh->lgh_hdr;
1074 LASSERT(llh->llh_flags & LLOG_F_IS_CAT);
1075 cd.lpcd_read_mode = LLOG_READ_MODE_NORMAL;
1079 if (llh->llh_cat_idx >= cat_llh->lgh_last_idx &&
1080 llh->llh_count > 1) {
1081 CWARN("%s: catalog "DFID" crosses index zero\n",
1082 loghandle2name(cat_llh),
1083 PLOGID(&cat_llh->lgh_id));
1085 cd.lpcd_first_idx = 0;
1086 cd.lpcd_last_idx = cat_llh->lgh_last_idx;
1087 rc = llog_reverse_process(env, cat_llh,
1088 llog_cat_reverse_process_cb,
1093 cd.lpcd_first_idx = le32_to_cpu(llh->llh_cat_idx);
1094 cd.lpcd_last_idx = 0;
1095 rc = llog_reverse_process(env, cat_llh,
1096 llog_cat_reverse_process_cb,
1099 rc = llog_reverse_process(env, cat_llh,
1100 llog_cat_reverse_process_cb,
1106 EXPORT_SYMBOL(llog_cat_reverse_process);
1108 static int llog_cat_set_first_idx(struct llog_handle *cathandle, int idx)
1110 struct llog_log_hdr *llh = cathandle->lgh_hdr;
1115 bitmap_size = LLOG_HDR_BITMAP_SIZE(llh);
1117 * The llh_cat_idx equals to the first used index minus 1
1118 * so if we canceled the first index then llh_cat_idx
1121 if (llh->llh_cat_idx == (idx - 1)) {
1122 llh->llh_cat_idx = idx;
1124 while (idx != cathandle->lgh_last_idx) {
1125 idx = (idx + 1) % bitmap_size;
1126 if (!test_bit_le(idx, LLOG_HDR_BITMAP(llh))) {
1127 /* update llh_cat_idx for each unset bit,
1128 * expecting the next one is set */
1129 llh->llh_cat_idx = idx;
1130 } else if (idx == 0) {
1131 /* skip header bit */
1132 llh->llh_cat_idx = 0;
1135 /* the first index is found */
1140 CDEBUG(D_HA, "catlog "DFID" first idx %u, last_idx %u\n",
1141 PLOGID(&cathandle->lgh_id), llh->llh_cat_idx,
1142 cathandle->lgh_last_idx);
1148 /* Cleanup deleted plain llog traces from catalog */
1149 int llog_cat_cleanup(const struct lu_env *env, struct llog_handle *cathandle,
1150 struct llog_handle *loghandle, int index)
1155 if (loghandle != NULL) {
1156 /* remove destroyed llog from catalog list and
1157 * chd_current_log variable */
1158 down_write(&cathandle->lgh_lock);
1159 if (cathandle->u.chd.chd_current_log == loghandle)
1160 cathandle->u.chd.chd_current_log = NULL;
1161 list_del_init(&loghandle->u.phd.phd_entry);
1162 up_write(&cathandle->lgh_lock);
1163 LASSERT(index == loghandle->u.phd.phd_cookie.lgc_index ||
1164 loghandle->u.phd.phd_cookie.lgc_index == 0);
1165 /* llog was opened and keep in a list, close it now */
1166 llog_close(env, loghandle);
1169 /* do not attempt to cleanup on-disk llog if on client side */
1170 if (cathandle->lgh_obj == NULL)
1173 /* remove plain llog entry from catalog by index */
1174 llog_cat_set_first_idx(cathandle, index);
1175 rc = llog_cancel_rec(env, cathandle, index);
1176 if (!rc && loghandle)
1178 "cancel plain log "DFID" at index %u of catalog "DFID"\n",
1179 PLOGID(&loghandle->lgh_id), index,
1180 PLOGID(&cathandle->lgh_id));