4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/obdclass/llog_cat.c
34 * OST<->MDS recovery logging infrastructure.
36 * Invariants in implementation:
37 * - we do not share logs among different OST<->MDS connections, so that
38 * if an OST or MDS fails it need only look at log(s) relevant to itself
40 * Author: Andreas Dilger <adilger@clusterfs.com>
41 * Author: Alexey Zhuravlev <alexey.zhuravlev@intel.com>
42 * Author: Mikhail Pershin <mike.pershin@intel.com>
45 #define DEBUG_SUBSYSTEM S_LOG
48 #include <obd_class.h>
50 #include "llog_internal.h"
54 * lockdep markers for nested struct llog_handle::lgh_lock locking.
61 /* Create a new log handle and add it to the open list.
62 * This log handle will be closed when all of the records in it are removed.
64 * Assumes caller has already pushed us into the kernel context and is locking.
66 static int llog_cat_new_log(const struct lu_env *env,
67 struct llog_handle *cathandle,
68 struct llog_handle *loghandle,
71 struct llog_thread_info *lgi = llog_info(env);
72 struct llog_logid_rec *rec = &lgi->lgi_logid;
73 struct thandle *handle = NULL;
74 struct dt_device *dt = NULL;
75 struct llog_log_hdr *llh = cathandle->lgh_hdr;
80 index = (cathandle->lgh_last_idx + 1) %
81 (OBD_FAIL_PRECHECK(OBD_FAIL_CAT_RECORDS) ? (cfs_fail_val + 1) :
82 LLOG_HDR_BITMAP_SIZE(llh));
84 /* check that new llog index will not overlap with the first one.
85 * - llh_cat_idx is the index just before the first/oldest still in-use
87 * - lgh_last_idx is the last/newest used index in catalog
89 * When catalog is not wrapped yet then lgh_last_idx is always larger
90 * than llh_cat_idx. After the wrap around lgh_last_idx re-starts
91 * from 0 and llh_cat_idx becomes the upper limit for it
93 * Check if catalog has already wrapped around or not by comparing
94 * last_idx and cat_idx */
95 if ((index == llh->llh_cat_idx + 1 && llh->llh_count > 1) ||
96 (index == 0 && llh->llh_cat_idx == 0)) {
97 if (cathandle->lgh_name == NULL) {
98 CWARN("%s: there are no more free slots in catalog "
100 loghandle->lgh_ctxt->loc_obd->obd_name,
101 PFID(&cathandle->lgh_id.lgl_oi.oi_fid),
102 cathandle->lgh_id.lgl_ogen);
104 CWARN("%s: there are no more free slots in "
106 loghandle->lgh_ctxt->loc_obd->obd_name,
107 cathandle->lgh_name);
112 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_LLOG_CREATE_FAILED))
115 if (loghandle->lgh_hdr != NULL) {
116 /* If llog object is remote and creation is failed, lgh_hdr
117 * might be left over here, free it first */
118 LASSERT(!llog_exist(loghandle));
119 OBD_FREE_LARGE(loghandle->lgh_hdr, loghandle->lgh_hdr_size);
120 loghandle->lgh_hdr = NULL;
124 dt = lu2dt_dev(cathandle->lgh_obj->do_lu.lo_dev);
126 handle = dt_trans_create(env, dt);
128 RETURN(PTR_ERR(handle));
130 /* Create update llog object synchronously, which
131 * happens during inialization process see
132 * lod_sub_prep_llog(), to make sure the update
133 * llog object is created before corss-MDT writing
134 * updates into the llog object */
135 if (cathandle->lgh_ctxt->loc_flags & LLOG_CTXT_FLAG_NORMAL_FID)
138 handle->th_wait_submit = 1;
140 rc = llog_declare_create(env, loghandle, handle);
144 rec->lid_hdr.lrh_len = sizeof(*rec);
145 rec->lid_hdr.lrh_type = LLOG_LOGID_MAGIC;
146 rec->lid_id = loghandle->lgh_id;
147 rc = llog_declare_write_rec(env, cathandle, &rec->lid_hdr, -1,
152 rc = dt_trans_start_local(env, dt, handle);
159 rc = llog_create(env, loghandle, th);
160 /* if llog is already created, no need to initialize it */
163 } else if (rc != 0) {
164 CERROR("%s: can't create new plain llog in catalog: rc = %d\n",
165 loghandle->lgh_ctxt->loc_obd->obd_name, rc);
169 rc = llog_init_handle(env, loghandle,
170 LLOG_F_IS_PLAIN | LLOG_F_ZAP_WHEN_EMPTY,
171 &cathandle->lgh_hdr->llh_tgtuuid);
175 /* build the record for this log in the catalog */
176 rec->lid_hdr.lrh_len = sizeof(*rec);
177 rec->lid_hdr.lrh_type = LLOG_LOGID_MAGIC;
178 rec->lid_id = loghandle->lgh_id;
180 /* append the new record into catalog. The new index will be
181 * assigned to the record and updated in rec header */
182 rc = llog_write_rec(env, cathandle, &rec->lid_hdr,
183 &loghandle->u.phd.phd_cookie, LLOG_NEXT_IDX, th);
185 GOTO(out_destroy, rc);
187 CDEBUG(D_OTHER, "new plain log "DFID".%u of catalog "DFID"\n",
188 PFID(&loghandle->lgh_id.lgl_oi.oi_fid), rec->lid_hdr.lrh_index,
189 PFID(&cathandle->lgh_id.lgl_oi.oi_fid));
191 loghandle->lgh_hdr->llh_cat_idx = rec->lid_hdr.lrh_index;
193 /* limit max size of plain llog so that space can be
194 * released sooner, especially on small filesystems */
195 /* 2MB for the cases when free space hasn't been learned yet */
196 loghandle->lgh_max_size = 2 << 20;
197 dt = lu2dt_dev(cathandle->lgh_obj->do_lu.lo_dev);
198 rc = dt_statfs(env, dt, &lgi->lgi_statfs);
199 if (rc == 0 && lgi->lgi_statfs.os_bfree > 0) {
200 __u64 freespace = (lgi->lgi_statfs.os_bfree *
201 lgi->lgi_statfs.os_bsize) >> 6;
202 if (freespace < loghandle->lgh_max_size)
203 loghandle->lgh_max_size = freespace;
204 /* shouldn't be > 128MB in any case?
205 * it's 256K records of 512 bytes each */
206 if (freespace > (128 << 20))
207 loghandle->lgh_max_size = 128 << 20;
212 if (handle != NULL) {
213 handle->th_result = rc >= 0 ? 0 : rc;
214 dt_trans_stop(env, dt, handle);
219 /* to signal llog_cat_close() it shouldn't try to destroy the llog,
220 * we want to destroy it in this transaction, otherwise the object
221 * becomes an orphan */
222 loghandle->lgh_hdr->llh_flags &= ~LLOG_F_ZAP_WHEN_EMPTY;
223 /* this is to mimic full log, so another llog_cat_current_log()
224 * can skip it and ask for another onet */
225 loghandle->lgh_last_idx = LLOG_HDR_BITMAP_SIZE(loghandle->lgh_hdr) + 1;
226 llog_trans_destroy(env, loghandle, th);
228 dt_trans_stop(env, dt, handle);
232 static int llog_cat_refresh(const struct lu_env *env,
233 struct llog_handle *cathandle)
235 struct llog_handle *loghandle;
238 down_write(&cathandle->lgh_lock);
239 list_for_each_entry(loghandle, &cathandle->u.chd.chd_head,
241 if (!llog_exist(loghandle))
244 rc = llog_read_header(env, loghandle, NULL);
249 rc = llog_read_header(env, cathandle, NULL);
251 up_write(&loghandle->lgh_lock);
257 * prepare current/next log for catalog.
259 * if \a *ploghandle is NULL, open it, and declare create, NB, if \a
260 * *ploghandle is remote, create it synchronously here, see comments
263 * \a cathandle->lgh_lock is down_read-ed, it gets down_write-ed if \a
264 * *ploghandle has to be opened.
266 static int llog_cat_prep_log(const struct lu_env *env,
267 struct llog_handle *cathandle,
268 struct llog_handle **ploghandle,
277 if (IS_ERR_OR_NULL(*ploghandle)) {
278 up_read(&cathandle->lgh_lock);
279 down_write(&cathandle->lgh_lock);
281 if (IS_ERR_OR_NULL(*ploghandle)) {
282 struct llog_handle *loghandle;
284 rc = llog_open(env, cathandle->lgh_ctxt, &loghandle,
285 NULL, NULL, LLOG_OPEN_NEW);
287 *ploghandle = loghandle;
288 list_add_tail(&loghandle->u.phd.phd_entry,
289 &cathandle->u.chd.chd_head);
296 rc = llog_exist(*ploghandle);
302 if (dt_object_remote(cathandle->lgh_obj)) {
303 down_write_nested(&(*ploghandle)->lgh_lock, LLOGH_LOG);
304 if (!llog_exist(*ploghandle)) {
305 /* For remote operation, if we put the llog object
306 * creation in the current transaction, then the
307 * llog object will not be created on the remote
308 * target until the transaction stop, if other
309 * operations start before the transaction stop,
310 * and use the same llog object, will be dependent
311 * on the success of this transaction. So let's
312 * create the llog object synchronously here to
313 * remove the dependency. */
314 rc = llog_cat_new_log(env, cathandle, *ploghandle,
317 up_write(&(*ploghandle)->lgh_lock);
319 up_write(&cathandle->lgh_lock);
321 up_read(&cathandle->lgh_lock);
323 rc = llog_cat_refresh(env, cathandle);
324 down_read_nested(&cathandle->lgh_lock,
328 /* *ploghandle might become NULL, restart */
332 up_write(&(*ploghandle)->lgh_lock);
334 struct llog_thread_info *lgi = llog_info(env);
335 struct llog_logid_rec *lirec = &lgi->lgi_logid;
337 rc = llog_declare_create(env, *ploghandle, th);
341 lirec->lid_hdr.lrh_len = sizeof(*lirec);
342 rc = llog_declare_write_rec(env, cathandle, &lirec->lid_hdr, -1,
348 up_write(&cathandle->lgh_lock);
349 down_read_nested(&cathandle->lgh_lock, LLOGH_CAT);
356 /* Open an existent log handle and add it to the open list.
357 * This log handle will be closed when all of the records in it are removed.
359 * Assumes caller has already pushed us into the kernel context and is locking.
360 * We return a lock on the handle to ensure nobody yanks it from us.
362 * This takes extra reference on llog_handle via llog_handle_get() and require
363 * this reference to be put by caller using llog_handle_put()
365 int llog_cat_id2handle(const struct lu_env *env, struct llog_handle *cathandle,
366 struct llog_handle **res, struct llog_logid *logid)
368 struct llog_handle *loghandle;
374 if (cathandle == NULL)
377 fmt = cathandle->lgh_hdr->llh_flags & LLOG_F_EXT_MASK;
378 down_write(&cathandle->lgh_lock);
379 list_for_each_entry(loghandle, &cathandle->u.chd.chd_head,
381 struct llog_logid *cgl = &loghandle->lgh_id;
383 if (ostid_id(&cgl->lgl_oi) == ostid_id(&logid->lgl_oi) &&
384 ostid_seq(&cgl->lgl_oi) == ostid_seq(&logid->lgl_oi)) {
385 if (cgl->lgl_ogen != logid->lgl_ogen) {
386 CWARN("%s: log "DFID" generation %x != %x\n",
387 loghandle->lgh_ctxt->loc_obd->obd_name,
388 PFID(&logid->lgl_oi.oi_fid),
389 cgl->lgl_ogen, logid->lgl_ogen);
392 loghandle->u.phd.phd_cat_handle = cathandle;
393 up_write(&cathandle->lgh_lock);
397 up_write(&cathandle->lgh_lock);
399 rc = llog_open(env, cathandle->lgh_ctxt, &loghandle, logid, NULL,
402 CERROR("%s: error opening log id "DFID":%x: rc = %d\n",
403 cathandle->lgh_ctxt->loc_obd->obd_name,
404 PFID(&logid->lgl_oi.oi_fid), logid->lgl_ogen, rc);
408 rc = llog_init_handle(env, loghandle, LLOG_F_IS_PLAIN | fmt, NULL);
410 llog_close(env, loghandle);
415 down_write(&cathandle->lgh_lock);
416 list_add_tail(&loghandle->u.phd.phd_entry, &cathandle->u.chd.chd_head);
417 up_write(&cathandle->lgh_lock);
419 loghandle->u.phd.phd_cat_handle = cathandle;
420 loghandle->u.phd.phd_cookie.lgc_lgl = cathandle->lgh_id;
421 loghandle->u.phd.phd_cookie.lgc_index =
422 loghandle->lgh_hdr->llh_cat_idx;
425 llog_handle_get(loghandle);
430 int llog_cat_close(const struct lu_env *env, struct llog_handle *cathandle)
432 struct llog_handle *loghandle, *n;
437 list_for_each_entry_safe(loghandle, n, &cathandle->u.chd.chd_head,
439 struct llog_log_hdr *llh = loghandle->lgh_hdr;
442 /* unlink open-not-created llogs */
443 list_del_init(&loghandle->u.phd.phd_entry);
444 llh = loghandle->lgh_hdr;
445 if (loghandle->lgh_obj != NULL && llh != NULL &&
446 (llh->llh_flags & LLOG_F_ZAP_WHEN_EMPTY) &&
447 (llh->llh_count == 1)) {
448 rc = llog_destroy(env, loghandle);
450 CERROR("%s: failure destroying log during "
451 "cleanup: rc = %d\n",
452 loghandle->lgh_ctxt->loc_obd->obd_name,
455 index = loghandle->u.phd.phd_cookie.lgc_index;
456 llog_cat_cleanup(env, cathandle, NULL, index);
458 llog_close(env, loghandle);
460 /* if handle was stored in ctxt, remove it too */
461 if (cathandle->lgh_ctxt->loc_handle == cathandle)
462 cathandle->lgh_ctxt->loc_handle = NULL;
463 rc = llog_close(env, cathandle);
466 EXPORT_SYMBOL(llog_cat_close);
468 /** Return the currently active log handle. If the current log handle doesn't
469 * have enough space left for the current record, start a new one.
471 * If reclen is 0, we only want to know what the currently active log is,
472 * otherwise we get a lock on this log so nobody can steal our space.
474 * Assumes caller has already pushed us into the kernel context and is locking.
476 * NOTE: loghandle is write-locked upon successful return
478 static struct llog_handle *llog_cat_current_log(struct llog_handle *cathandle,
481 struct llog_handle *loghandle = NULL;
485 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_LLOG_CREATE_FAILED2)) {
486 down_write_nested(&cathandle->lgh_lock, LLOGH_CAT);
487 GOTO(next, loghandle);
490 down_read_nested(&cathandle->lgh_lock, LLOGH_CAT);
491 loghandle = cathandle->u.chd.chd_current_log;
493 struct llog_log_hdr *llh;
495 down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
496 llh = loghandle->lgh_hdr;
497 if (llh == NULL || !llog_is_full(loghandle)) {
498 up_read(&cathandle->lgh_lock);
501 up_write(&loghandle->lgh_lock);
504 up_read(&cathandle->lgh_lock);
506 /* time to use next log */
508 /* first, we have to make sure the state hasn't changed */
509 down_write_nested(&cathandle->lgh_lock, LLOGH_CAT);
510 loghandle = cathandle->u.chd.chd_current_log;
512 struct llog_log_hdr *llh;
514 down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
515 llh = loghandle->lgh_hdr;
516 if (llh == NULL || !llog_is_full(loghandle))
517 GOTO(out_unlock, loghandle);
519 up_write(&loghandle->lgh_lock);
523 /* Sigh, the chd_next_log and chd_current_log is initialized
524 * in declare phase, and we do not serialize the catlog
525 * accessing, so it might be possible the llog creation
526 * thread (see llog_cat_declare_add_rec()) did not create
527 * llog successfully, then the following thread might
528 * meet this situation. */
529 if (IS_ERR_OR_NULL(cathandle->u.chd.chd_next_log)) {
530 CERROR("%s: next log does not exist!\n",
531 cathandle->lgh_ctxt->loc_obd->obd_name);
532 loghandle = ERR_PTR(-EIO);
533 if (cathandle->u.chd.chd_next_log == NULL) {
534 /* Store the error in chd_next_log, so
535 * the following process can get correct
537 cathandle->u.chd.chd_next_log = loghandle;
539 GOTO(out_unlock, loghandle);
542 CDEBUG(D_INODE, "use next log\n");
544 loghandle = cathandle->u.chd.chd_next_log;
545 cathandle->u.chd.chd_current_log = loghandle;
546 cathandle->u.chd.chd_next_log = NULL;
547 down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
550 up_write(&cathandle->lgh_lock);
555 /* Add a single record to the recovery log(s) using a catalog
556 * Returns as llog_write_record
558 * Assumes caller has already pushed us into the kernel context.
560 int llog_cat_add_rec(const struct lu_env *env, struct llog_handle *cathandle,
561 struct llog_rec_hdr *rec, struct llog_cookie *reccookie,
564 struct llog_handle *loghandle;
568 LASSERT(rec->lrh_len <= cathandle->lgh_ctxt->loc_chunk_size);
571 loghandle = llog_cat_current_log(cathandle, th);
572 if (IS_ERR(loghandle))
573 RETURN(PTR_ERR(loghandle));
575 /* loghandle is already locked by llog_cat_current_log() for us */
576 if (!llog_exist(loghandle)) {
577 rc = llog_cat_new_log(env, cathandle, loghandle, th);
579 up_write(&loghandle->lgh_lock);
580 /* nobody should be trying to use this llog */
581 down_write(&cathandle->lgh_lock);
582 /* only reset current log if still room in catalog, to
583 * avoid unnecessarily and racy creation of new and
584 * partially initialized llog_handle
586 if ((cathandle->u.chd.chd_current_log == loghandle) &&
588 cathandle->u.chd.chd_current_log = NULL;
589 up_write(&cathandle->lgh_lock);
593 /* now let's try to add the record */
594 rc = llog_write_rec(env, loghandle, rec, reccookie, LLOG_NEXT_IDX, th);
596 CDEBUG_LIMIT(rc == -ENOSPC ? D_HA : D_ERROR,
597 "llog_write_rec %d: lh=%p\n", rc, loghandle);
598 /* -ENOSPC is returned if no empty records left
599 * and when it's lack of space on the stogage.
600 * there is no point to try again if it's the second
601 * case. many callers (like llog test) expect ENOSPC,
602 * so we preserve this error code, but look for the
603 * actual cause here */
604 if (rc == -ENOSPC && llog_is_full(loghandle))
607 up_write(&loghandle->lgh_lock);
609 if (rc == -ENOBUFS) {
612 CERROR("%s: error on 2nd llog: rc = %d\n",
613 cathandle->lgh_ctxt->loc_obd->obd_name, rc);
618 EXPORT_SYMBOL(llog_cat_add_rec);
620 int llog_cat_declare_add_rec(const struct lu_env *env,
621 struct llog_handle *cathandle,
622 struct llog_rec_hdr *rec, struct thandle *th)
629 down_read_nested(&cathandle->lgh_lock, LLOGH_CAT);
630 rc = llog_cat_prep_log(env, cathandle,
631 &cathandle->u.chd.chd_current_log, th);
635 rc = llog_cat_prep_log(env, cathandle, &cathandle->u.chd.chd_next_log,
640 rc = llog_declare_write_rec(env, cathandle->u.chd.chd_current_log,
642 if (rc == -ESTALE && dt_object_remote(cathandle->lgh_obj)) {
643 up_read(&cathandle->lgh_lock);
644 rc = llog_cat_refresh(env, cathandle);
652 * XXX: we hope for declarations made for existing llog this might be
653 * not correct with some backends where declarations are expected
654 * against specific object like ZFS with full debugging enabled.
656 rc = llog_declare_write_rec(env, cathandle->u.chd.chd_next_log, rec, -1,
660 up_read(&cathandle->lgh_lock);
663 EXPORT_SYMBOL(llog_cat_declare_add_rec);
665 int llog_cat_add(const struct lu_env *env, struct llog_handle *cathandle,
666 struct llog_rec_hdr *rec, struct llog_cookie *reccookie)
668 struct llog_ctxt *ctxt;
669 struct dt_device *dt;
670 struct thandle *th = NULL;
673 ctxt = cathandle->lgh_ctxt;
675 LASSERT(ctxt->loc_exp);
677 LASSERT(cathandle->lgh_obj != NULL);
678 dt = lu2dt_dev(cathandle->lgh_obj->do_lu.lo_dev);
680 th = dt_trans_create(env, dt);
684 rc = llog_cat_declare_add_rec(env, cathandle, rec, th);
688 rc = dt_trans_start_local(env, dt, th);
691 rc = llog_cat_add_rec(env, cathandle, rec, reccookie, th);
693 dt_trans_stop(env, dt, th);
696 EXPORT_SYMBOL(llog_cat_add);
698 int llog_cat_cancel_arr_rec(const struct lu_env *env,
699 struct llog_handle *cathandle,
700 struct llog_logid *lgl, int count, int *index)
702 struct llog_handle *loghandle;
706 rc = llog_cat_id2handle(env, cathandle, &loghandle, lgl);
708 CDEBUG(D_HA, "%s: cannot find llog for handle "DFID":%x"
710 cathandle->lgh_ctxt->loc_obd->obd_name,
711 PFID(&lgl->lgl_oi.oi_fid), lgl->lgl_ogen, rc);
715 if ((cathandle->lgh_ctxt->loc_flags &
716 LLOG_CTXT_FLAG_NORMAL_FID) && !llog_exist(loghandle)) {
717 /* For update log, some of loghandles of cathandle
718 * might not exist because remote llog creation might
719 * be failed, so let's skip the record cancellation
720 * for these non-exist llogs.
723 CDEBUG(D_HA, "%s: llog "DFID":%x does not exist"
725 cathandle->lgh_ctxt->loc_obd->obd_name,
726 PFID(&lgl->lgl_oi.oi_fid), lgl->lgl_ogen, rc);
728 llog_handle_put(loghandle);
732 rc = llog_cancel_arr_rec(env, loghandle, count, index);
733 if (rc == LLOG_DEL_PLAIN) { /* log has been destroyed */
736 cat_index = loghandle->u.phd.phd_cookie.lgc_index;
737 rc = llog_cat_cleanup(env, cathandle, loghandle, cat_index);
739 CERROR("%s: fail to cancel catalog record: rc = %d\n",
740 cathandle->lgh_ctxt->loc_obd->obd_name, rc);
744 llog_handle_put(loghandle);
747 CERROR("%s: fail to cancel %d llog-records: rc = %d\n",
748 cathandle->lgh_ctxt->loc_obd->obd_name, count,
753 EXPORT_SYMBOL(llog_cat_cancel_arr_rec);
755 /* For each cookie in the cookie array, we clear the log in-use bit and either:
756 * - the log is empty, so mark it free in the catalog header and delete it
757 * - the log is not empty, just write out the log header
759 * The cookies may be in different log files, so we need to get new logs
762 * Assumes caller has already pushed us into the kernel context.
764 int llog_cat_cancel_records(const struct lu_env *env,
765 struct llog_handle *cathandle, int count,
766 struct llog_cookie *cookies)
768 int i, rc = 0, failed = 0;
772 for (i = 0; i < count; i++, cookies++) {
775 lrc = llog_cat_cancel_arr_rec(env, cathandle, &cookies->lgc_lgl,
776 1, &cookies->lgc_index);
784 CERROR("%s: fail to cancel %d of %d llog-records: rc = %d\n",
785 cathandle->lgh_ctxt->loc_obd->obd_name, failed, count,
789 EXPORT_SYMBOL(llog_cat_cancel_records);
791 static int llog_cat_process_common(const struct lu_env *env,
792 struct llog_handle *cat_llh,
793 struct llog_rec_hdr *rec,
794 struct llog_handle **llhp)
796 struct llog_logid_rec *lir = container_of(rec, typeof(*lir), lid_hdr);
797 struct llog_log_hdr *hdr;
801 if (rec->lrh_type != le32_to_cpu(LLOG_LOGID_MAGIC)) {
803 CWARN("%s: invalid record in catalog "DFID":%x: rc = %d\n",
804 cat_llh->lgh_ctxt->loc_obd->obd_name,
805 PFID(&cat_llh->lgh_id.lgl_oi.oi_fid),
806 cat_llh->lgh_id.lgl_ogen, rc);
809 CDEBUG(D_HA, "processing log "DFID":%x at index %u of catalog "DFID"\n",
810 PFID(&lir->lid_id.lgl_oi.oi_fid), lir->lid_id.lgl_ogen,
811 le32_to_cpu(rec->lrh_index),
812 PFID(&cat_llh->lgh_id.lgl_oi.oi_fid));
814 rc = llog_cat_id2handle(env, cat_llh, llhp, &lir->lid_id);
816 /* After a server crash, a stub of index record in catlog could
817 * be kept, because plain log destroy + catlog index record
818 * deletion are not atomic. So we end up with an index but no
819 * actual record. Destroy the index and move on. */
820 if (rc == -ENOENT || rc == -ESTALE)
821 rc = LLOG_DEL_RECORD;
823 CWARN("%s: can't find llog handle "DFID":%x: rc = %d\n",
824 cat_llh->lgh_ctxt->loc_obd->obd_name,
825 PFID(&lir->lid_id.lgl_oi.oi_fid),
826 lir->lid_id.lgl_ogen, rc);
831 /* clean old empty llogs, do not consider current llog in use */
832 /* ignore remote (lgh_obj == NULL) llogs */
833 hdr = (*llhp)->lgh_hdr;
834 if ((hdr->llh_flags & LLOG_F_ZAP_WHEN_EMPTY) &&
835 hdr->llh_count == 1 && cat_llh->lgh_obj != NULL &&
836 *llhp != cat_llh->u.chd.chd_current_log) {
837 rc = llog_destroy(env, *llhp);
839 CWARN("%s: can't destroy empty log "DFID": rc = %d\n",
840 (*llhp)->lgh_ctxt->loc_obd->obd_name,
841 PFID(&lir->lid_id.lgl_oi.oi_fid), rc);
848 static int llog_cat_process_cb(const struct lu_env *env,
849 struct llog_handle *cat_llh,
850 struct llog_rec_hdr *rec, void *data)
852 struct llog_process_data *d = data;
853 struct llog_handle *llh = NULL;
857 rc = llog_cat_process_common(env, cat_llh, rec, &llh);
861 if (rec->lrh_index < d->lpd_startcat) {
862 /* Skip processing of the logs until startcat */
864 } else if (d->lpd_startidx > 0) {
865 struct llog_process_cat_data cd;
867 cd.lpcd_first_idx = d->lpd_startidx;
868 cd.lpcd_last_idx = 0;
869 rc = llog_process_or_fork(env, llh, d->lpd_cb, d->lpd_data,
871 /* Continue processing the next log from idx 0 */
874 rc = llog_process_or_fork(env, llh, d->lpd_cb, d->lpd_data,
879 /* The empty plain log was destroyed while processing */
880 if (rc == LLOG_DEL_PLAIN) {
881 rc = llog_cat_cleanup(env, cat_llh, llh,
882 llh->u.phd.phd_cookie.lgc_index);
883 } else if (rc == LLOG_DEL_RECORD) {
884 /* clear wrong catalog entry */
885 rc = llog_cat_cleanup(env, cat_llh, NULL, rec->lrh_index);
889 llog_handle_put(llh);
894 int llog_cat_process_or_fork(const struct lu_env *env,
895 struct llog_handle *cat_llh, llog_cb_t cat_cb,
896 llog_cb_t cb, void *data, int startcat,
897 int startidx, bool fork)
899 struct llog_process_data d;
900 struct llog_log_hdr *llh = cat_llh->lgh_hdr;
905 LASSERT(llh->llh_flags & LLOG_F_IS_CAT);
908 d.lpd_startcat = (startcat == LLOG_CAT_FIRST ? 0 : startcat);
909 d.lpd_startidx = startidx;
911 if (llh->llh_cat_idx >= cat_llh->lgh_last_idx &&
912 llh->llh_count > 1) {
913 struct llog_process_cat_data cd;
915 CWARN("%s: catlog "DFID" crosses index zero\n",
916 cat_llh->lgh_ctxt->loc_obd->obd_name,
917 PFID(&cat_llh->lgh_id.lgl_oi.oi_fid));
918 /*startcat = 0 is default value for general processing */
919 if ((startcat != LLOG_CAT_FIRST &&
920 startcat >= llh->llh_cat_idx) || !startcat) {
921 /* processing the catalog part at the end */
922 cd.lpcd_first_idx = (startcat ? startcat :
924 if (OBD_FAIL_PRECHECK(OBD_FAIL_CAT_RECORDS))
925 cd.lpcd_last_idx = cfs_fail_val;
927 cd.lpcd_last_idx = 0;
928 rc = llog_process_or_fork(env, cat_llh, cat_cb,
930 /* Reset the startcat becasue it has already reached
937 /* processing the catalog part at the begining */
938 cd.lpcd_first_idx = (startcat == LLOG_CAT_FIRST) ? 0 : startcat;
939 /* Note, the processing will stop at the lgh_last_idx value,
940 * and it could be increased during processing. So records
941 * between current lgh_last_idx and lgh_last_idx in future
942 * would left unprocessed.
944 cd.lpcd_last_idx = cat_llh->lgh_last_idx;
945 rc = llog_process_or_fork(env, cat_llh, cat_cb,
948 rc = llog_process_or_fork(env, cat_llh, cat_cb,
954 EXPORT_SYMBOL(llog_cat_process_or_fork);
956 int llog_cat_process(const struct lu_env *env, struct llog_handle *cat_llh,
957 llog_cb_t cb, void *data, int startcat, int startidx)
959 return llog_cat_process_or_fork(env, cat_llh, llog_cat_process_cb,
960 cb, data, startcat, startidx, false);
962 EXPORT_SYMBOL(llog_cat_process);
964 static int llog_cat_size_cb(const struct lu_env *env,
965 struct llog_handle *cat_llh,
966 struct llog_rec_hdr *rec, void *data)
968 struct llog_process_data *d = data;
969 struct llog_handle *llh = NULL;
970 __u64 *cum_size = d->lpd_data;
975 rc = llog_cat_process_common(env, cat_llh, rec, &llh);
977 if (rc == LLOG_DEL_PLAIN) {
978 /* empty log was deleted, don't count it */
979 rc = llog_cat_cleanup(env, cat_llh, llh,
980 llh->u.phd.phd_cookie.lgc_index);
981 } else if (rc == LLOG_DEL_RECORD) {
982 /* clear wrong catalog entry */
983 rc = llog_cat_cleanup(env, cat_llh, NULL, rec->lrh_index);
985 size = llog_size(env, llh);
988 CDEBUG(D_INFO, "Add llog entry "DFID" size=%llu, tot=%llu\n",
989 PFID(&llh->lgh_id.lgl_oi.oi_fid), size, *cum_size);
993 llog_handle_put(llh);
998 __u64 llog_cat_size(const struct lu_env *env, struct llog_handle *cat_llh)
1000 __u64 size = llog_size(env, cat_llh);
1002 llog_cat_process_or_fork(env, cat_llh, llog_cat_size_cb,
1003 NULL, &size, 0, 0, false);
1007 EXPORT_SYMBOL(llog_cat_size);
1009 /* currently returns the number of "free" entries in catalog,
1010 * ie the available entries for a new plain LLOG file creation,
1011 * even if catalog has wrapped
1013 __u32 llog_cat_free_space(struct llog_handle *cat_llh)
1015 /* simulate almost full Catalog */
1016 if (OBD_FAIL_CHECK(OBD_FAIL_CAT_FREE_RECORDS))
1017 return cfs_fail_val;
1019 if (cat_llh->lgh_hdr->llh_count == 1)
1020 return LLOG_HDR_BITMAP_SIZE(cat_llh->lgh_hdr) - 1;
1022 if (cat_llh->lgh_last_idx > cat_llh->lgh_hdr->llh_cat_idx)
1023 return LLOG_HDR_BITMAP_SIZE(cat_llh->lgh_hdr) - 1 +
1024 cat_llh->lgh_hdr->llh_cat_idx - cat_llh->lgh_last_idx;
1026 /* catalog is presently wrapped */
1027 return cat_llh->lgh_hdr->llh_cat_idx - cat_llh->lgh_last_idx;
1029 EXPORT_SYMBOL(llog_cat_free_space);
1031 static int llog_cat_reverse_process_cb(const struct lu_env *env,
1032 struct llog_handle *cat_llh,
1033 struct llog_rec_hdr *rec, void *data)
1035 struct llog_process_data *d = data;
1036 struct llog_handle *llh;
1040 rc = llog_cat_process_common(env, cat_llh, rec, &llh);
1042 /* The empty plain log was destroyed while processing */
1043 if (rc == LLOG_DEL_PLAIN) {
1044 rc = llog_cat_cleanup(env, cat_llh, llh,
1045 llh->u.phd.phd_cookie.lgc_index);
1046 } else if (rc == LLOG_DEL_RECORD) {
1047 /* clear wrong catalog entry */
1048 rc = llog_cat_cleanup(env, cat_llh, NULL, rec->lrh_index);
1053 rc = llog_reverse_process(env, llh, d->lpd_cb, d->lpd_data, NULL);
1055 /* The empty plain was destroyed while processing */
1056 if (rc == LLOG_DEL_PLAIN)
1057 rc = llog_cat_cleanup(env, cat_llh, llh,
1058 llh->u.phd.phd_cookie.lgc_index);
1060 llog_handle_put(llh);
1064 int llog_cat_reverse_process(const struct lu_env *env,
1065 struct llog_handle *cat_llh,
1066 llog_cb_t cb, void *data)
1068 struct llog_process_data d;
1069 struct llog_process_cat_data cd;
1070 struct llog_log_hdr *llh = cat_llh->lgh_hdr;
1074 LASSERT(llh->llh_flags & LLOG_F_IS_CAT);
1078 if (llh->llh_cat_idx >= cat_llh->lgh_last_idx &&
1079 llh->llh_count > 1) {
1080 CWARN("%s: catalog "DFID" crosses index zero\n",
1081 cat_llh->lgh_ctxt->loc_obd->obd_name,
1082 PFID(&cat_llh->lgh_id.lgl_oi.oi_fid));
1084 cd.lpcd_first_idx = 0;
1085 cd.lpcd_last_idx = cat_llh->lgh_last_idx;
1086 rc = llog_reverse_process(env, cat_llh,
1087 llog_cat_reverse_process_cb,
1092 cd.lpcd_first_idx = le32_to_cpu(llh->llh_cat_idx);
1093 cd.lpcd_last_idx = 0;
1094 rc = llog_reverse_process(env, cat_llh,
1095 llog_cat_reverse_process_cb,
1098 rc = llog_reverse_process(env, cat_llh,
1099 llog_cat_reverse_process_cb,
1105 EXPORT_SYMBOL(llog_cat_reverse_process);
1107 static int llog_cat_set_first_idx(struct llog_handle *cathandle, int idx)
1109 struct llog_log_hdr *llh = cathandle->lgh_hdr;
1114 bitmap_size = LLOG_HDR_BITMAP_SIZE(llh);
1116 * The llh_cat_idx equals to the first used index minus 1
1117 * so if we canceled the first index then llh_cat_idx
1120 if (llh->llh_cat_idx == (idx - 1)) {
1121 llh->llh_cat_idx = idx;
1123 while (idx != cathandle->lgh_last_idx) {
1124 idx = (idx + 1) % bitmap_size;
1125 if (!ext2_test_bit(idx, LLOG_HDR_BITMAP(llh))) {
1126 /* update llh_cat_idx for each unset bit,
1127 * expecting the next one is set */
1128 llh->llh_cat_idx = idx;
1129 } else if (idx == 0) {
1130 /* skip header bit */
1131 llh->llh_cat_idx = 0;
1134 /* the first index is found */
1139 CDEBUG(D_RPCTRACE, "catlog "DFID" first idx %u, last_idx %u\n",
1140 PFID(&cathandle->lgh_id.lgl_oi.oi_fid),
1141 llh->llh_cat_idx, cathandle->lgh_last_idx);
1147 /* Cleanup deleted plain llog traces from catalog */
1148 int llog_cat_cleanup(const struct lu_env *env, struct llog_handle *cathandle,
1149 struct llog_handle *loghandle, int index)
1154 if (loghandle != NULL) {
1155 /* remove destroyed llog from catalog list and
1156 * chd_current_log variable */
1157 down_write(&cathandle->lgh_lock);
1158 if (cathandle->u.chd.chd_current_log == loghandle)
1159 cathandle->u.chd.chd_current_log = NULL;
1160 list_del_init(&loghandle->u.phd.phd_entry);
1161 up_write(&cathandle->lgh_lock);
1162 LASSERT(index == loghandle->u.phd.phd_cookie.lgc_index);
1163 /* llog was opened and keep in a list, close it now */
1164 llog_close(env, loghandle);
1167 /* do not attempt to cleanup on-disk llog if on client side */
1168 if (cathandle->lgh_obj == NULL)
1171 /* remove plain llog entry from catalog by index */
1172 llog_cat_set_first_idx(cathandle, index);
1173 rc = llog_cancel_rec(env, cathandle, index);
1175 CDEBUG(D_HA, "cancel plain log at index %u of catalog "DFID"\n",
1176 index, PFID(&cathandle->lgh_id.lgl_oi.oi_fid));