4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
31 * lustre/obdclass/llog_cat.c
33 * OST<->MDS recovery logging infrastructure.
35 * Invariants in implementation:
36 * - we do not share logs among different OST<->MDS connections, so that
37 * if an OST or MDS fails it need only look at log(s) relevant to itself
39 * Author: Andreas Dilger <adilger@clusterfs.com>
40 * Author: Alexey Zhuravlev <alexey.zhuravlev@intel.com>
41 * Author: Mikhail Pershin <mike.pershin@intel.com>
44 #define DEBUG_SUBSYSTEM S_LOG
47 #include <obd_class.h>
49 #include "llog_internal.h"
53 * lockdep markers for nested struct llog_handle::lgh_lock locking.
60 /* Create a new log handle and add it to the open list.
61 * This log handle will be closed when all of the records in it are removed.
63 * Assumes caller has already pushed us into the kernel context and is locking.
65 static int llog_cat_new_log(const struct lu_env *env,
66 struct llog_handle *cathandle,
67 struct llog_handle *loghandle,
70 struct llog_thread_info *lgi = llog_info(env);
71 struct llog_logid_rec *rec = &lgi->lgi_logid;
72 struct thandle *handle = NULL;
73 struct dt_device *dt = NULL;
74 struct llog_log_hdr *llh = cathandle->lgh_hdr;
79 index = (cathandle->lgh_last_idx + 1) % (llog_max_idx(llh) + 1);
81 /* check that new llog index will not overlap with the first one.
82 * - llh_cat_idx is the index just before the first/oldest still in-use
84 * - lgh_last_idx is the last/newest used index in catalog
86 * When catalog is not wrapped yet then lgh_last_idx is always larger
87 * than llh_cat_idx. After the wrap around lgh_last_idx re-starts
88 * from 0 and llh_cat_idx becomes the upper limit for it
90 * Check if catalog has already wrapped around or not by comparing
91 * last_idx and cat_idx */
92 if ((index == llh->llh_cat_idx + 1 && llh->llh_count > 1) ||
93 (index == 0 && llh->llh_cat_idx == 0)) {
94 if (cathandle->lgh_name == NULL) {
95 CWARN("%s: there are no more free slots in catalog "DFID"\n",
96 loghandle2name(loghandle),
97 PLOGID(&cathandle->lgh_id));
99 CWARN("%s: there are no more free slots in catalog %s\n",
100 loghandle2name(loghandle), cathandle->lgh_name);
105 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_LLOG_CREATE_FAILED))
108 if (loghandle->lgh_hdr != NULL) {
109 /* If llog object is remote and creation is failed, lgh_hdr
110 * might be left over here, free it first */
111 LASSERT(!llog_exist(loghandle));
112 OBD_FREE_LARGE(loghandle->lgh_hdr, loghandle->lgh_hdr_size);
113 loghandle->lgh_hdr = NULL;
117 dt = lu2dt_dev(cathandle->lgh_obj->do_lu.lo_dev);
119 handle = dt_trans_create(env, dt);
121 RETURN(PTR_ERR(handle));
123 /* Create update llog object synchronously, which
124 * happens during inialization process see
125 * lod_sub_prep_llog(), to make sure the update
126 * llog object is created before corss-MDT writing
127 * updates into the llog object */
128 if (cathandle->lgh_ctxt->loc_flags & LLOG_CTXT_FLAG_NORMAL_FID)
131 handle->th_wait_submit = 1;
133 rc = llog_declare_create(env, loghandle, handle);
137 rec->lid_hdr.lrh_len = sizeof(*rec);
138 rec->lid_hdr.lrh_type = LLOG_LOGID_MAGIC;
139 rec->lid_id = loghandle->lgh_id;
140 rc = llog_declare_write_rec(env, cathandle, &rec->lid_hdr, -1,
145 rc = dt_trans_start_local(env, dt, handle);
152 rc = llog_create(env, loghandle, th);
153 /* if llog is already created, no need to initialize it */
156 } else if (rc != 0) {
157 CERROR("%s: can't create new plain llog in catalog: rc = %d\n",
158 loghandle2name(loghandle), rc);
162 rc = llog_init_handle(env, loghandle,
163 LLOG_F_IS_PLAIN | LLOG_F_ZAP_WHEN_EMPTY,
164 &cathandle->lgh_hdr->llh_tgtuuid);
168 /* build the record for this log in the catalog */
169 rec->lid_hdr.lrh_len = sizeof(*rec);
170 rec->lid_hdr.lrh_type = LLOG_LOGID_MAGIC;
171 rec->lid_id = loghandle->lgh_id;
173 /* append the new record into catalog. The new index will be
174 * assigned to the record and updated in rec header */
175 rc = llog_write_rec(env, cathandle, &rec->lid_hdr,
176 &loghandle->u.phd.phd_cookie, LLOG_NEXT_IDX, th);
178 GOTO(out_destroy, rc);
180 CDEBUG(D_OTHER, "new plain log "DFID".%u of catalog "DFID"\n",
181 PLOGID(&loghandle->lgh_id), rec->lid_hdr.lrh_index,
182 PLOGID(&cathandle->lgh_id));
184 loghandle->lgh_hdr->llh_cat_idx = rec->lid_hdr.lrh_index;
186 /* limit max size of plain llog so that space can be
187 * released sooner, especially on small filesystems */
188 /* 2MB for the cases when free space hasn't been learned yet */
189 loghandle->lgh_max_size = 2 << 20;
190 dt = lu2dt_dev(cathandle->lgh_obj->do_lu.lo_dev);
191 rc = dt_statfs(env, dt, &lgi->lgi_statfs);
192 if (rc == 0 && lgi->lgi_statfs.os_bfree > 0) {
193 __u64 freespace = (lgi->lgi_statfs.os_bfree *
194 lgi->lgi_statfs.os_bsize) >> 6;
195 if (freespace < loghandle->lgh_max_size)
196 loghandle->lgh_max_size = freespace;
197 /* shouldn't be > 128MB in any case?
198 * it's 256K records of 512 bytes each */
199 if (freespace > (128 << 20))
200 loghandle->lgh_max_size = 128 << 20;
202 if (unlikely(OBD_FAIL_PRECHECK(OBD_FAIL_PLAIN_RECORDS) ||
203 OBD_FAIL_PRECHECK(OBD_FAIL_CATALOG_FULL_CHECK))) {
204 // limit the numer of plain records for test
205 loghandle->lgh_max_size = loghandle->lgh_hdr_size +
212 if (handle != NULL) {
213 handle->th_result = rc >= 0 ? 0 : rc;
214 dt_trans_stop(env, dt, handle);
219 /* to signal llog_cat_close() it shouldn't try to destroy the llog,
220 * we want to destroy it in this transaction, otherwise the object
221 * becomes an orphan */
222 loghandle->lgh_hdr->llh_flags &= ~LLOG_F_ZAP_WHEN_EMPTY;
223 /* this is to mimic full log, so another llog_cat_current_log()
224 * can skip it and ask for another onet */
225 loghandle->lgh_last_idx = llog_max_idx(loghandle->lgh_hdr) + 1;
226 llog_trans_destroy(env, loghandle, th);
228 dt_trans_stop(env, dt, handle);
232 static int llog_cat_refresh(const struct lu_env *env,
233 struct llog_handle *cathandle)
235 struct llog_handle *loghandle;
238 down_write(&cathandle->lgh_lock);
239 list_for_each_entry(loghandle, &cathandle->u.chd.chd_head,
241 if (!llog_exist(loghandle))
244 down_write(&loghandle->lgh_lock);
245 rc = llog_read_header(env, loghandle, NULL);
246 up_write(&loghandle->lgh_lock);
251 rc = llog_read_header(env, cathandle, NULL);
253 up_write(&cathandle->lgh_lock);
259 * prepare current/next log for catalog.
261 * if \a *ploghandle is NULL, open it, and declare create, NB, if \a
262 * *ploghandle is remote, create it synchronously here, see comments
265 * \a cathandle->lgh_lock is down_read-ed, it gets down_write-ed if \a
266 * *ploghandle has to be opened.
268 static int llog_cat_prep_log(const struct lu_env *env,
269 struct llog_handle *cathandle,
270 struct llog_handle **ploghandle,
279 if (IS_ERR_OR_NULL(*ploghandle)) {
280 up_read(&cathandle->lgh_lock);
281 down_write(&cathandle->lgh_lock);
283 if (IS_ERR_OR_NULL(*ploghandle)) {
284 struct llog_handle *loghandle;
286 rc = llog_open(env, cathandle->lgh_ctxt, &loghandle,
287 NULL, NULL, LLOG_OPEN_NEW);
289 *ploghandle = loghandle;
290 list_add_tail(&loghandle->u.phd.phd_entry,
291 &cathandle->u.chd.chd_head);
298 rc = llog_exist(*ploghandle);
304 if (dt_object_remote(cathandle->lgh_obj)) {
305 down_write_nested(&(*ploghandle)->lgh_lock, LLOGH_LOG);
306 if (!llog_exist(*ploghandle)) {
307 /* For remote operation, if we put the llog object
308 * creation in the current transaction, then the
309 * llog object will not be created on the remote
310 * target until the transaction stop, if other
311 * operations start before the transaction stop,
312 * and use the same llog object, will be dependent
313 * on the success of this transaction. So let's
314 * create the llog object synchronously here to
315 * remove the dependency. */
316 rc = llog_cat_new_log(env, cathandle, *ploghandle,
319 up_write(&(*ploghandle)->lgh_lock);
321 up_write(&cathandle->lgh_lock);
323 up_read(&cathandle->lgh_lock);
325 rc = llog_cat_refresh(env, cathandle);
326 down_read_nested(&cathandle->lgh_lock,
330 /* *ploghandle might become NULL, restart */
334 up_write(&(*ploghandle)->lgh_lock);
336 struct llog_thread_info *lgi = llog_info(env);
337 struct llog_logid_rec *lirec = &lgi->lgi_logid;
339 rc = llog_declare_create(env, *ploghandle, th);
343 lirec->lid_hdr.lrh_len = sizeof(*lirec);
344 rc = llog_declare_write_rec(env, cathandle, &lirec->lid_hdr, -1,
350 up_write(&cathandle->lgh_lock);
351 down_read_nested(&cathandle->lgh_lock, LLOGH_CAT);
358 /* Open an existent log handle and add it to the open list.
359 * This log handle will be closed when all of the records in it are removed.
361 * Assumes caller has already pushed us into the kernel context and is locking.
362 * We return a lock on the handle to ensure nobody yanks it from us.
364 * This takes extra reference on llog_handle via llog_handle_get() and require
365 * this reference to be put by caller using llog_handle_put()
367 int llog_cat_id2handle(const struct lu_env *env, struct llog_handle *cathandle,
368 struct llog_handle **res, struct llog_logid *logid)
370 struct llog_handle *loghandle;
376 if (cathandle == NULL)
379 fmt = cathandle->lgh_hdr->llh_flags & LLOG_F_EXT_MASK;
380 down_write(&cathandle->lgh_lock);
381 list_for_each_entry(loghandle, &cathandle->u.chd.chd_head,
383 struct llog_logid *cgl = &loghandle->lgh_id;
385 if (ostid_id(&cgl->lgl_oi) == ostid_id(&logid->lgl_oi) &&
386 ostid_seq(&cgl->lgl_oi) == ostid_seq(&logid->lgl_oi)) {
387 *res = llog_handle_get(loghandle);
389 CERROR("%s: log "DFID" refcount is zero!\n",
390 loghandle2name(loghandle),
394 loghandle->u.phd.phd_cat_handle = cathandle;
395 up_write(&cathandle->lgh_lock);
399 up_write(&cathandle->lgh_lock);
401 rc = llog_open(env, cathandle->lgh_ctxt, &loghandle, logid, NULL,
404 CERROR("%s: error opening log id "DFID": rc = %d\n",
405 loghandle2name(cathandle), PLOGID(logid), rc);
409 rc = llog_init_handle(env, loghandle, LLOG_F_IS_PLAIN |
410 LLOG_F_ZAP_WHEN_EMPTY | fmt, NULL);
412 llog_close(env, loghandle);
417 *res = llog_handle_get(loghandle);
419 down_write(&cathandle->lgh_lock);
420 list_add_tail(&loghandle->u.phd.phd_entry, &cathandle->u.chd.chd_head);
421 up_write(&cathandle->lgh_lock);
423 loghandle->u.phd.phd_cat_handle = cathandle;
424 loghandle->u.phd.phd_cookie.lgc_lgl = cathandle->lgh_id;
425 loghandle->u.phd.phd_cookie.lgc_index =
426 loghandle->lgh_hdr->llh_cat_idx;
430 int llog_cat_close(const struct lu_env *env, struct llog_handle *cathandle)
432 struct llog_handle *loghandle, *n;
437 list_for_each_entry_safe(loghandle, n, &cathandle->u.chd.chd_head,
439 struct llog_log_hdr *llh = loghandle->lgh_hdr;
442 /* unlink open-not-created llogs */
443 list_del_init(&loghandle->u.phd.phd_entry);
444 llh = loghandle->lgh_hdr;
445 if (loghandle->lgh_obj != NULL && llh != NULL &&
446 (llh->llh_flags & LLOG_F_ZAP_WHEN_EMPTY) &&
447 (llh->llh_count == 1)) {
448 rc = llog_destroy(env, loghandle);
450 CERROR("%s: failure destroying log during "
451 "cleanup: rc = %d\n",
452 loghandle2name(loghandle), rc);
454 index = loghandle->u.phd.phd_cookie.lgc_index;
455 llog_cat_cleanup(env, cathandle, NULL, index);
457 llog_close(env, loghandle);
459 /* if handle was stored in ctxt, remove it too */
460 if (cathandle->lgh_ctxt->loc_handle == cathandle)
461 cathandle->lgh_ctxt->loc_handle = NULL;
462 rc = llog_close(env, cathandle);
465 EXPORT_SYMBOL(llog_cat_close);
467 /** Return the currently active log handle. If the current log handle doesn't
468 * have enough space left for the current record, start a new one.
470 * If reclen is 0, we only want to know what the currently active log is,
471 * otherwise we get a lock on this log so nobody can steal our space.
473 * Assumes caller has already pushed us into the kernel context and is locking.
475 * NOTE: loghandle is write-locked upon successful return
477 static struct llog_handle *llog_cat_current_log(struct llog_handle *cathandle,
480 struct llog_handle *loghandle = NULL;
484 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_LLOG_CREATE_FAILED2)) {
485 down_write_nested(&cathandle->lgh_lock, LLOGH_CAT);
486 GOTO(next, loghandle);
489 down_read_nested(&cathandle->lgh_lock, LLOGH_CAT);
490 loghandle = cathandle->u.chd.chd_current_log;
492 struct llog_log_hdr *llh;
494 down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
495 llh = loghandle->lgh_hdr;
496 if (llh == NULL || !llog_is_full(loghandle)) {
497 up_read(&cathandle->lgh_lock);
500 up_write(&loghandle->lgh_lock);
503 up_read(&cathandle->lgh_lock);
505 /* time to use next log */
507 /* first, we have to make sure the state hasn't changed */
508 down_write_nested(&cathandle->lgh_lock, LLOGH_CAT);
509 loghandle = cathandle->u.chd.chd_current_log;
511 struct llog_log_hdr *llh;
513 down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
514 llh = loghandle->lgh_hdr;
515 if (llh == NULL || !llog_is_full(loghandle))
516 GOTO(out_unlock, loghandle);
518 up_write(&loghandle->lgh_lock);
522 /* Sigh, the chd_next_log and chd_current_log is initialized
523 * in declare phase, and we do not serialize the catlog
524 * accessing, so it might be possible the llog creation
525 * thread (see llog_cat_declare_add_rec()) did not create
526 * llog successfully, then the following thread might
527 * meet this situation. */
528 if (IS_ERR_OR_NULL(cathandle->u.chd.chd_next_log)) {
529 CERROR("%s: next log does not exist!\n",
530 loghandle2name(cathandle));
531 loghandle = ERR_PTR(-EIO);
532 if (cathandle->u.chd.chd_next_log == NULL) {
533 /* Store the error in chd_next_log, so
534 * the following process can get correct
536 cathandle->u.chd.chd_next_log = loghandle;
538 GOTO(out_unlock, loghandle);
541 CDEBUG(D_INODE, "use next log\n");
543 loghandle = cathandle->u.chd.chd_next_log;
544 cathandle->u.chd.chd_current_log = loghandle;
545 cathandle->u.chd.chd_next_log = NULL;
546 down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
549 up_write(&cathandle->lgh_lock);
554 /* Add a single record to the recovery log(s) using a catalog
555 * Returns as llog_write_record
557 * Assumes caller has already pushed us into the kernel context.
559 int llog_cat_add_rec(const struct lu_env *env, struct llog_handle *cathandle,
560 struct llog_rec_hdr *rec, struct llog_cookie *reccookie,
563 struct llog_handle *loghandle;
567 LASSERT(rec->lrh_len <= cathandle->lgh_ctxt->loc_chunk_size);
570 loghandle = llog_cat_current_log(cathandle, th);
571 if (IS_ERR(loghandle))
572 RETURN(PTR_ERR(loghandle));
574 /* loghandle is already locked by llog_cat_current_log() for us */
575 if (!llog_exist(loghandle)) {
576 rc = llog_cat_new_log(env, cathandle, loghandle, th);
578 up_write(&loghandle->lgh_lock);
579 /* nobody should be trying to use this llog */
580 down_write(&cathandle->lgh_lock);
581 if (cathandle->u.chd.chd_current_log == loghandle)
582 cathandle->u.chd.chd_current_log = NULL;
583 up_write(&cathandle->lgh_lock);
587 /* now let's try to add the record */
588 rc = llog_write_rec(env, loghandle, rec, reccookie, LLOG_NEXT_IDX, th);
590 CDEBUG_LIMIT(rc == -ENOSPC ? D_HA : D_ERROR,
591 "llog_write_rec %d: lh=%p\n", rc, loghandle);
592 /* -ENOSPC is returned if no empty records left
593 * and when it's lack of space on the stogage.
594 * there is no point to try again if it's the second
595 * case. many callers (like llog test) expect ENOSPC,
596 * so we preserve this error code, but look for the
597 * actual cause here */
598 if (rc == -ENOSPC && llog_is_full(loghandle))
601 up_write(&loghandle->lgh_lock);
603 if (rc == -ENOBUFS) {
606 CERROR("%s: error on 2nd llog: rc = %d\n",
607 loghandle2name(cathandle), rc);
612 EXPORT_SYMBOL(llog_cat_add_rec);
614 int llog_cat_declare_add_rec(const struct lu_env *env,
615 struct llog_handle *cathandle,
616 struct llog_rec_hdr *rec, struct thandle *th)
623 down_read_nested(&cathandle->lgh_lock, LLOGH_CAT);
624 rc = llog_cat_prep_log(env, cathandle,
625 &cathandle->u.chd.chd_current_log, th);
629 rc = llog_cat_prep_log(env, cathandle, &cathandle->u.chd.chd_next_log,
634 rc = llog_declare_write_rec(env, cathandle->u.chd.chd_current_log,
636 if (rc == -ESTALE && dt_object_remote(cathandle->lgh_obj)) {
637 up_read(&cathandle->lgh_lock);
638 rc = llog_cat_refresh(env, cathandle);
646 * XXX: we hope for declarations made for existing llog this might be
647 * not correct with some backends where declarations are expected
648 * against specific object like ZFS with full debugging enabled.
650 rc = llog_declare_write_rec(env, cathandle->u.chd.chd_next_log, rec, -1,
654 up_read(&cathandle->lgh_lock);
657 EXPORT_SYMBOL(llog_cat_declare_add_rec);
659 int llog_cat_add(const struct lu_env *env, struct llog_handle *cathandle,
660 struct llog_rec_hdr *rec, struct llog_cookie *reccookie)
662 struct llog_ctxt *ctxt;
663 struct dt_device *dt;
664 struct thandle *th = NULL;
667 ctxt = cathandle->lgh_ctxt;
669 LASSERT(ctxt->loc_exp);
671 LASSERT(cathandle->lgh_obj != NULL);
672 dt = lu2dt_dev(cathandle->lgh_obj->do_lu.lo_dev);
674 th = dt_trans_create(env, dt);
678 rc = llog_cat_declare_add_rec(env, cathandle, rec, th);
682 rc = dt_trans_start_local(env, dt, th);
685 rc = llog_cat_add_rec(env, cathandle, rec, reccookie, th);
687 dt_trans_stop(env, dt, th);
690 EXPORT_SYMBOL(llog_cat_add);
692 int llog_cat_cancel_arr_rec(const struct lu_env *env,
693 struct llog_handle *cathandle,
694 struct llog_logid *lgl, int count, int *index)
696 struct llog_handle *loghandle;
700 rc = llog_cat_id2handle(env, cathandle, &loghandle, lgl);
702 CDEBUG(D_HA, "%s: can't find llog handle for "DFID": rc = %d\n",
703 loghandle2name(cathandle), PLOGID(lgl), rc);
707 if ((cathandle->lgh_ctxt->loc_flags &
708 LLOG_CTXT_FLAG_NORMAL_FID) && !llog_exist(loghandle)) {
709 /* For update log, some of loghandles of cathandle
710 * might not exist because remote llog creation might
711 * be failed, so let's skip the record cancellation
712 * for these non-exist llogs.
715 CDEBUG(D_HA, "%s: llog "DFID" does not exist: rc = %d\n",
716 loghandle2name(cathandle), PLOGID(lgl), rc);
717 llog_handle_put(env, loghandle);
721 rc = llog_cancel_arr_rec(env, loghandle, count, index);
722 if (rc == LLOG_DEL_PLAIN) { /* log has been destroyed */
725 cat_index = loghandle->u.phd.phd_cookie.lgc_index;
726 rc = llog_cat_cleanup(env, cathandle, loghandle, cat_index);
728 CERROR("%s: fail to cancel catalog record: rc = %d\n",
729 loghandle2name(cathandle), rc);
733 llog_handle_put(env, loghandle);
736 CERROR("%s: fail to cancel %d llog-records: rc = %d\n",
737 loghandle2name(cathandle), count, rc);
741 EXPORT_SYMBOL(llog_cat_cancel_arr_rec);
743 /* For each cookie in the cookie array, we clear the log in-use bit and either:
744 * - the log is empty, so mark it free in the catalog header and delete it
745 * - the log is not empty, just write out the log header
747 * The cookies may be in different log files, so we need to get new logs
750 * Assumes caller has already pushed us into the kernel context.
752 int llog_cat_cancel_records(const struct lu_env *env,
753 struct llog_handle *cathandle, int count,
754 struct llog_cookie *cookies)
756 int i, rc = 0, failed = 0;
760 for (i = 0; i < count; i++, cookies++) {
763 lrc = llog_cat_cancel_arr_rec(env, cathandle, &cookies->lgc_lgl,
764 1, &cookies->lgc_index);
772 CERROR("%s: fail to cancel %d of %d llog-records: rc = %d\n",
773 loghandle2name(cathandle), failed, count, rc);
776 EXPORT_SYMBOL(llog_cat_cancel_records);
778 static int llog_cat_process_common(const struct lu_env *env,
779 struct llog_handle *cat_llh,
780 struct llog_rec_hdr *rec,
781 struct llog_handle **llhp)
783 struct llog_logid_rec *lir = container_of(rec, typeof(*lir), lid_hdr);
784 struct llog_log_hdr *hdr;
788 if (rec->lrh_type != le32_to_cpu(LLOG_LOGID_MAGIC)) {
790 CWARN("%s: invalid record in catalog "DFID": rc = %d\n",
791 loghandle2name(cat_llh), PLOGID(&cat_llh->lgh_id), rc);
794 CDEBUG(D_HA, "processing log "DFID" at index %u of catalog "DFID"\n",
795 PLOGID(&lir->lid_id), le32_to_cpu(rec->lrh_index),
796 PLOGID(&cat_llh->lgh_id));
798 rc = llog_cat_id2handle(env, cat_llh, llhp, &lir->lid_id);
800 /* After a server crash, a stub of index record in catlog could
801 * be kept, because plain log destroy + catlog index record
802 * deletion are not atomic. So we end up with an index but no
803 * actual record. Destroy the index and move on. */
804 if (rc == -ENOENT || rc == -ESTALE)
805 rc = LLOG_DEL_RECORD;
807 CWARN("%s: can't find llog handle "DFID": rc = %d\n",
808 loghandle2name(cat_llh), PLOGID(&lir->lid_id),
814 /* clean old empty llogs, do not consider current llog in use */
815 /* ignore remote (lgh_obj == NULL) llogs */
816 hdr = (*llhp)->lgh_hdr;
817 if ((hdr->llh_flags & LLOG_F_ZAP_WHEN_EMPTY) &&
818 hdr->llh_count == 1 && cat_llh->lgh_obj != NULL &&
819 *llhp != cat_llh->u.chd.chd_current_log &&
820 *llhp != cat_llh->u.chd.chd_next_log) {
821 rc = llog_destroy(env, *llhp);
823 CWARN("%s: can't destroy empty log "DFID": rc = %d\n",
824 loghandle2name((*llhp)), PLOGID(&lir->lid_id),
832 static int llog_cat_process_cb(const struct lu_env *env,
833 struct llog_handle *cat_llh,
834 struct llog_rec_hdr *rec, void *data)
836 struct llog_process_data *d = data;
837 struct llog_handle *llh = NULL;
842 /* Skip processing of the logs until startcat */
843 if (rec->lrh_index < d->lpd_startcat)
846 rc = llog_cat_process_common(env, cat_llh, rec, &llh);
850 if (d->lpd_startidx > 0) {
851 struct llog_process_cat_data cd = {
854 .lpcd_read_mode = LLOG_READ_MODE_NORMAL,
857 /* startidx is always associated with a catalog index */
858 if (d->lpd_startcat == rec->lrh_index)
859 cd.lpcd_first_idx = d->lpd_startidx;
861 rc = llog_process_or_fork(env, llh, d->lpd_cb, d->lpd_data,
863 /* Continue processing the next log from idx 0 */
866 rc = llog_process_or_fork(env, llh, d->lpd_cb, d->lpd_data,
869 if (rc == -ENOENT && (cat_llh->lgh_hdr->llh_flags & LLOG_F_RM_ON_ERR)) {
871 * plain llog is reported corrupted, so better to just remove
872 * it if the caller is fine with that.
874 CERROR("%s: remove corrupted/missing llog "DFID"\n",
875 loghandle2name(cat_llh), PLOGID(&llh->lgh_id));
880 /* The empty plain log was destroyed while processing */
881 if (rc == LLOG_DEL_PLAIN || rc == LLOG_DEL_RECORD)
882 /* clear wrong catalog entry */
883 rc = llog_cat_cleanup(env, cat_llh, llh, rec->lrh_index);
884 else if (rc == LLOG_SKIP_PLAIN)
885 /* processing callback ask to skip the llog -> continue */
889 llog_handle_put(env, llh);
894 int llog_cat_process_or_fork(const struct lu_env *env,
895 struct llog_handle *cat_llh, llog_cb_t cat_cb,
896 llog_cb_t cb, void *data, int startcat,
897 int startidx, bool fork)
899 struct llog_log_hdr *llh = cat_llh->lgh_hdr;
900 struct llog_process_data d;
901 struct llog_process_cat_data cd;
906 LASSERT(llh->llh_flags & LLOG_F_IS_CAT);
910 /* default: start from the oldest record */
912 d.lpd_startcat = llh->llh_cat_idx + 1;
913 cd.lpcd_first_idx = llh->llh_cat_idx;
914 cd.lpcd_last_idx = 0;
915 cd.lpcd_read_mode = LLOG_READ_MODE_NORMAL;
917 if (startcat > 0 && startcat <= llog_max_idx(llh)) {
918 /* start from a custom catalog/llog plain indexes*/
919 d.lpd_startidx = startidx;
920 d.lpd_startcat = startcat;
921 cd.lpcd_first_idx = startcat - 1;
922 } else if (startcat != 0) {
923 CWARN("%s: startcat %d out of range for catlog "DFID"\n",
924 loghandle2name(cat_llh), startcat,
925 PLOGID(&cat_llh->lgh_id));
929 startcat = d.lpd_startcat;
931 /* if startcat <= lgh_last_idx, we only need to process the first part
932 * of the catalog (from startcat).
934 if (llog_cat_is_wrapped(cat_llh) && startcat > cat_llh->lgh_last_idx) {
935 int cat_idx_origin = llh->llh_cat_idx;
937 CWARN("%s: catlog "DFID" crosses index zero\n",
938 loghandle2name(cat_llh),
939 PLOGID(&cat_llh->lgh_id));
941 /* processing the catalog part at the end */
942 rc = llog_process_or_fork(env, cat_llh, cat_cb, &d, &cd, fork);
946 /* Reset the startcat because it has already reached catalog
948 * lgh_last_idx value could be increased during processing. So
949 * we process the remaining of catalog entries to be sure.
953 cd.lpcd_first_idx = 0;
954 cd.lpcd_last_idx = max(cat_idx_origin, cat_llh->lgh_last_idx);
955 } else if (llog_cat_is_wrapped(cat_llh)) {
956 /* only process 1st part -> stop before reaching 2sd part */
957 cd.lpcd_last_idx = llh->llh_cat_idx;
960 /* processing the catalog part at the begining */
961 rc = llog_process_or_fork(env, cat_llh, cat_cb, &d, &cd, fork);
965 EXPORT_SYMBOL(llog_cat_process_or_fork);
968 * Process catalog records with a callback
971 * If "starcat = 0", this is the default processing. "startidx" argument is
972 * ignored and processing begin from the oldest record.
973 * If "startcat > 0", this is a custom starting point. Processing begin with
974 * the llog plain defined in the catalog record at index "startcat". The first
975 * llog plain record to process is at index "startidx + 1".
977 * \param env Lustre environnement
978 * \param cat_llh Catalog llog handler
979 * \param cb Callback executed for each records (in llog plain files)
980 * \param data Callback data argument
981 * \param startcat Catalog index of the llog plain to start with.
982 * \param startidx Index of the llog plain to start processing. The first
983 * record to process is at startidx + 1.
985 * \retval 0 processing successfully completed
986 * \retval LLOG_PROC_BREAK processing was stopped by the callback.
987 * \retval -errno on error.
989 int llog_cat_process(const struct lu_env *env, struct llog_handle *cat_llh,
990 llog_cb_t cb, void *data, int startcat, int startidx)
992 return llog_cat_process_or_fork(env, cat_llh, llog_cat_process_cb,
993 cb, data, startcat, startidx, false);
995 EXPORT_SYMBOL(llog_cat_process);
997 static int llog_cat_size_cb(const struct lu_env *env,
998 struct llog_handle *cat_llh,
999 struct llog_rec_hdr *rec, void *data)
1001 struct llog_process_data *d = data;
1002 struct llog_handle *llh = NULL;
1003 __u64 *cum_size = d->lpd_data;
1008 rc = llog_cat_process_common(env, cat_llh, rec, &llh);
1010 if (rc == LLOG_DEL_PLAIN) {
1011 /* empty log was deleted, don't count it */
1012 rc = llog_cat_cleanup(env, cat_llh, llh,
1013 llh->u.phd.phd_cookie.lgc_index);
1014 } else if (rc == LLOG_DEL_RECORD) {
1015 /* clear wrong catalog entry */
1016 rc = llog_cat_cleanup(env, cat_llh, NULL, rec->lrh_index);
1018 size = llog_size(env, llh);
1021 CDEBUG(D_INFO, "Add llog entry "DFID" size=%llu, tot=%llu\n",
1022 PLOGID(&llh->lgh_id), size, *cum_size);
1026 llog_handle_put(env, llh);
1031 __u64 llog_cat_size(const struct lu_env *env, struct llog_handle *cat_llh)
1033 __u64 size = llog_size(env, cat_llh);
1035 llog_cat_process_or_fork(env, cat_llh, llog_cat_size_cb,
1036 NULL, &size, 0, 0, false);
1040 EXPORT_SYMBOL(llog_cat_size);
1042 /* currently returns the number of "free" entries in catalog,
1043 * ie the available entries for a new plain LLOG file creation,
1044 * even if catalog has wrapped
1046 __u32 llog_cat_free_space(struct llog_handle *cat_llh)
1048 /* simulate almost full Catalog */
1049 if (OBD_FAIL_CHECK(OBD_FAIL_CAT_FREE_RECORDS))
1050 return cfs_fail_val;
1052 if (cat_llh->lgh_hdr->llh_count == 1)
1053 return llog_max_idx(cat_llh->lgh_hdr);
1055 if (cat_llh->lgh_last_idx > cat_llh->lgh_hdr->llh_cat_idx)
1056 return llog_max_idx(cat_llh->lgh_hdr) +
1057 cat_llh->lgh_hdr->llh_cat_idx - cat_llh->lgh_last_idx;
1059 /* catalog is presently wrapped */
1060 return cat_llh->lgh_hdr->llh_cat_idx - cat_llh->lgh_last_idx;
1062 EXPORT_SYMBOL(llog_cat_free_space);
1064 static int llog_cat_reverse_process_cb(const struct lu_env *env,
1065 struct llog_handle *cat_llh,
1066 struct llog_rec_hdr *rec, void *data)
1068 struct llog_process_data *d = data;
1069 struct llog_handle *llh;
1073 rc = llog_cat_process_common(env, cat_llh, rec, &llh);
1075 /* The empty plain log was destroyed while processing */
1076 if (rc == LLOG_DEL_PLAIN) {
1077 rc = llog_cat_cleanup(env, cat_llh, llh,
1078 llh->u.phd.phd_cookie.lgc_index);
1079 } else if (rc == LLOG_DEL_RECORD) {
1080 /* clear wrong catalog entry */
1081 rc = llog_cat_cleanup(env, cat_llh, NULL, rec->lrh_index);
1082 } else if (rc == LLOG_SKIP_PLAIN) {
1083 /* processing callback ask to skip the llog -> continue */
1089 rc = llog_reverse_process(env, llh, d->lpd_cb, d->lpd_data, NULL);
1091 /* The empty plain was destroyed while processing */
1092 if (rc == LLOG_DEL_PLAIN)
1093 rc = llog_cat_cleanup(env, cat_llh, llh,
1094 llh->u.phd.phd_cookie.lgc_index);
1096 llog_handle_put(env, llh);
1100 int llog_cat_reverse_process(const struct lu_env *env,
1101 struct llog_handle *cat_llh,
1102 llog_cb_t cb, void *data)
1104 struct llog_process_data d;
1105 struct llog_process_cat_data cd;
1106 struct llog_log_hdr *llh = cat_llh->lgh_hdr;
1110 LASSERT(llh->llh_flags & LLOG_F_IS_CAT);
1111 cd.lpcd_read_mode = LLOG_READ_MODE_NORMAL;
1115 if (llh->llh_cat_idx >= cat_llh->lgh_last_idx &&
1116 llh->llh_count > 1) {
1117 CWARN("%s: catalog "DFID" crosses index zero\n",
1118 loghandle2name(cat_llh),
1119 PLOGID(&cat_llh->lgh_id));
1121 cd.lpcd_first_idx = 0;
1122 cd.lpcd_last_idx = cat_llh->lgh_last_idx;
1123 rc = llog_reverse_process(env, cat_llh,
1124 llog_cat_reverse_process_cb,
1129 cd.lpcd_first_idx = le32_to_cpu(llh->llh_cat_idx);
1130 cd.lpcd_last_idx = 0;
1131 rc = llog_reverse_process(env, cat_llh,
1132 llog_cat_reverse_process_cb,
1135 rc = llog_reverse_process(env, cat_llh,
1136 llog_cat_reverse_process_cb,
1142 EXPORT_SYMBOL(llog_cat_reverse_process);
1144 static int llog_cat_set_first_idx(struct llog_handle *cathandle, int idx)
1146 struct llog_log_hdr *llh = cathandle->lgh_hdr;
1151 idx_nbr = llog_max_idx(llh) + 1;
1153 * The llh_cat_idx equals to the first used index minus 1
1154 * so if we canceled the first index then llh_cat_idx
1157 if (llh->llh_cat_idx == (idx - 1)) {
1158 llh->llh_cat_idx = idx;
1160 while (idx != cathandle->lgh_last_idx) {
1161 idx = (idx + 1) % idx_nbr;
1162 if (!test_bit_le(idx, LLOG_HDR_BITMAP(llh))) {
1163 /* update llh_cat_idx for each unset bit,
1164 * expecting the next one is set */
1165 llh->llh_cat_idx = idx;
1166 } else if (idx == 0) {
1167 /* skip header bit */
1168 llh->llh_cat_idx = 0;
1171 /* the first index is found */
1176 CDEBUG(D_HA, "catlog "DFID" first idx %u, last_idx %u\n",
1177 PLOGID(&cathandle->lgh_id), llh->llh_cat_idx,
1178 cathandle->lgh_last_idx);
1184 /* Cleanup deleted plain llog traces from catalog */
1185 int llog_cat_cleanup(const struct lu_env *env, struct llog_handle *cathandle,
1186 struct llog_handle *loghandle, int index)
1191 if (loghandle != NULL) {
1192 /* remove destroyed llog from catalog list and
1193 * chd_current_log variable */
1194 down_write(&cathandle->lgh_lock);
1195 if (cathandle->u.chd.chd_current_log == loghandle)
1196 cathandle->u.chd.chd_current_log = NULL;
1197 list_del_init(&loghandle->u.phd.phd_entry);
1198 up_write(&cathandle->lgh_lock);
1199 LASSERT(index == loghandle->u.phd.phd_cookie.lgc_index ||
1200 loghandle->u.phd.phd_cookie.lgc_index == 0);
1201 /* llog was opened and keep in a list, close it now */
1202 llog_close(env, loghandle);
1205 /* do not attempt to cleanup on-disk llog if on client side */
1206 if (cathandle->lgh_obj == NULL)
1209 /* remove plain llog entry from catalog by index */
1210 llog_cat_set_first_idx(cathandle, index);
1211 rc = llog_cancel_rec(env, cathandle, index);
1212 if (!rc && loghandle)
1214 "cancel plain log "DFID" at index %u of catalog "DFID"\n",
1215 PLOGID(&loghandle->lgh_id), index,
1216 PLOGID(&cathandle->lgh_id));
1220 /* retain log in catalog, and zap it if log is empty */
1221 int llog_cat_retain_cb(const struct lu_env *env, struct llog_handle *cat,
1222 struct llog_rec_hdr *rec, void *data)
1224 struct llog_handle *log = NULL;
1227 rc = llog_cat_process_common(env, cat, rec, &log);
1229 /* The empty plain log was destroyed while processing */
1230 if (rc == LLOG_DEL_PLAIN || rc == LLOG_DEL_RECORD)
1231 /* clear wrong catalog entry */
1232 rc = llog_cat_cleanup(env, cat, log, rec->lrh_index);
1234 llog_retain(env, log);
1237 llog_handle_put(env, log);
1241 EXPORT_SYMBOL(llog_cat_retain_cb);