4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/obdclass/llog_cat.c
34 * OST<->MDS recovery logging infrastructure.
36 * Invariants in implementation:
37 * - we do not share logs among different OST<->MDS connections, so that
38 * if an OST or MDS fails it need only look at log(s) relevant to itself
40 * Author: Andreas Dilger <adilger@clusterfs.com>
41 * Author: Alexey Zhuravlev <alexey.zhuravlev@intel.com>
42 * Author: Mikhail Pershin <mike.pershin@intel.com>
45 #define DEBUG_SUBSYSTEM S_LOG
48 #include <obd_class.h>
50 #include "llog_internal.h"
52 /* Create a new log handle and add it to the open list.
53 * This log handle will be closed when all of the records in it are removed.
55 * Assumes caller has already pushed us into the kernel context and is locking.
57 static int llog_cat_new_log(const struct lu_env *env,
58 struct llog_handle *cathandle,
59 struct llog_handle *loghandle,
62 struct llog_thread_info *lgi = llog_info(env);
63 struct llog_logid_rec *rec = &lgi->lgi_logid;
64 struct thandle *handle = NULL;
65 struct dt_device *dt = NULL;
66 struct llog_log_hdr *llh = cathandle->lgh_hdr;
71 index = (cathandle->lgh_last_idx + 1) %
72 (OBD_FAIL_PRECHECK(OBD_FAIL_CAT_RECORDS) ? (cfs_fail_val + 1) :
73 LLOG_HDR_BITMAP_SIZE(llh));
75 /* check that new llog index will not overlap with the first one.
76 * - llh_cat_idx is the index just before the first/oldest still in-use
78 * - lgh_last_idx is the last/newest used index in catalog
80 * When catalog is not wrapped yet then lgh_last_idx is always larger
81 * than llh_cat_idx. After the wrap around lgh_last_idx re-starts
82 * from 0 and llh_cat_idx becomes the upper limit for it
84 * Check if catalog has already wrapped around or not by comparing
85 * last_idx and cat_idx */
86 if ((index == llh->llh_cat_idx + 1 && llh->llh_count > 1) ||
87 (index == 0 && llh->llh_cat_idx == 0)) {
88 if (cathandle->lgh_name == NULL) {
89 CWARN("%s: there are no more free slots in catalog "
91 loghandle->lgh_ctxt->loc_obd->obd_name,
92 PFID(&cathandle->lgh_id.lgl_oi.oi_fid),
93 cathandle->lgh_id.lgl_ogen);
95 CWARN("%s: there are no more free slots in "
97 loghandle->lgh_ctxt->loc_obd->obd_name,
103 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_LLOG_CREATE_FAILED))
106 if (loghandle->lgh_hdr != NULL) {
107 /* If llog object is remote and creation is failed, lgh_hdr
108 * might be left over here, free it first */
109 LASSERT(!llog_exist(loghandle));
110 OBD_FREE_LARGE(loghandle->lgh_hdr, loghandle->lgh_hdr_size);
111 loghandle->lgh_hdr = NULL;
115 dt = lu2dt_dev(cathandle->lgh_obj->do_lu.lo_dev);
117 handle = dt_trans_create(env, dt);
119 RETURN(PTR_ERR(handle));
121 /* Create update llog object synchronously, which
122 * happens during inialization process see
123 * lod_sub_prep_llog(), to make sure the update
124 * llog object is created before corss-MDT writing
125 * updates into the llog object */
126 if (cathandle->lgh_ctxt->loc_flags & LLOG_CTXT_FLAG_NORMAL_FID)
129 handle->th_wait_submit = 1;
131 rc = llog_declare_create(env, loghandle, handle);
135 rec->lid_hdr.lrh_len = sizeof(*rec);
136 rec->lid_hdr.lrh_type = LLOG_LOGID_MAGIC;
137 rec->lid_id = loghandle->lgh_id;
138 rc = llog_declare_write_rec(env, cathandle, &rec->lid_hdr, -1,
143 rc = dt_trans_start_local(env, dt, handle);
150 rc = llog_create(env, loghandle, th);
151 /* if llog is already created, no need to initialize it */
154 } else if (rc != 0) {
155 CERROR("%s: can't create new plain llog in catalog: rc = %d\n",
156 loghandle->lgh_ctxt->loc_obd->obd_name, rc);
160 rc = llog_init_handle(env, loghandle,
161 LLOG_F_IS_PLAIN | LLOG_F_ZAP_WHEN_EMPTY,
162 &cathandle->lgh_hdr->llh_tgtuuid);
166 /* build the record for this log in the catalog */
167 rec->lid_hdr.lrh_len = sizeof(*rec);
168 rec->lid_hdr.lrh_type = LLOG_LOGID_MAGIC;
169 rec->lid_id = loghandle->lgh_id;
171 /* append the new record into catalog. The new index will be
172 * assigned to the record and updated in rec header */
173 rc = llog_write_rec(env, cathandle, &rec->lid_hdr,
174 &loghandle->u.phd.phd_cookie, LLOG_NEXT_IDX, th);
176 GOTO(out_destroy, rc);
178 CDEBUG(D_OTHER, "new plain log "DFID".%u of catalog "DFID"\n",
179 PFID(&loghandle->lgh_id.lgl_oi.oi_fid), rec->lid_hdr.lrh_index,
180 PFID(&cathandle->lgh_id.lgl_oi.oi_fid));
182 loghandle->lgh_hdr->llh_cat_idx = rec->lid_hdr.lrh_index;
184 /* limit max size of plain llog so that space can be
185 * released sooner, especially on small filesystems */
186 /* 2MB for the cases when free space hasn't been learned yet */
187 loghandle->lgh_max_size = 2 << 20;
188 dt = lu2dt_dev(cathandle->lgh_obj->do_lu.lo_dev);
189 rc = dt_statfs(env, dt, &lgi->lgi_statfs);
190 if (rc == 0 && lgi->lgi_statfs.os_bfree > 0) {
191 __u64 freespace = (lgi->lgi_statfs.os_bfree *
192 lgi->lgi_statfs.os_bsize) >> 6;
193 if (freespace < loghandle->lgh_max_size)
194 loghandle->lgh_max_size = freespace;
195 /* shouldn't be > 128MB in any case?
196 * it's 256K records of 512 bytes each */
197 if (freespace > (128 << 20))
198 loghandle->lgh_max_size = 128 << 20;
203 if (handle != NULL) {
204 handle->th_result = rc >= 0 ? 0 : rc;
205 dt_trans_stop(env, dt, handle);
210 /* to signal llog_cat_close() it shouldn't try to destroy the llog,
211 * we want to destroy it in this transaction, otherwise the object
212 * becomes an orphan */
213 loghandle->lgh_hdr->llh_flags &= ~LLOG_F_ZAP_WHEN_EMPTY;
214 /* this is to mimic full log, so another llog_cat_current_log()
215 * can skip it and ask for another onet */
216 loghandle->lgh_last_idx = LLOG_HDR_BITMAP_SIZE(loghandle->lgh_hdr) + 1;
217 llog_trans_destroy(env, loghandle, th);
219 dt_trans_stop(env, dt, handle);
223 /* Open an existent log handle and add it to the open list.
224 * This log handle will be closed when all of the records in it are removed.
226 * Assumes caller has already pushed us into the kernel context and is locking.
227 * We return a lock on the handle to ensure nobody yanks it from us.
229 * This takes extra reference on llog_handle via llog_handle_get() and require
230 * this reference to be put by caller using llog_handle_put()
232 int llog_cat_id2handle(const struct lu_env *env, struct llog_handle *cathandle,
233 struct llog_handle **res, struct llog_logid *logid)
235 struct llog_handle *loghandle;
241 if (cathandle == NULL)
244 fmt = cathandle->lgh_hdr->llh_flags & LLOG_F_EXT_MASK;
245 down_write(&cathandle->lgh_lock);
246 list_for_each_entry(loghandle, &cathandle->u.chd.chd_head,
248 struct llog_logid *cgl = &loghandle->lgh_id;
250 if (ostid_id(&cgl->lgl_oi) == ostid_id(&logid->lgl_oi) &&
251 ostid_seq(&cgl->lgl_oi) == ostid_seq(&logid->lgl_oi)) {
252 if (cgl->lgl_ogen != logid->lgl_ogen) {
253 CWARN("%s: log "DFID" generation %x != %x\n",
254 loghandle->lgh_ctxt->loc_obd->obd_name,
255 PFID(&logid->lgl_oi.oi_fid),
256 cgl->lgl_ogen, logid->lgl_ogen);
259 loghandle->u.phd.phd_cat_handle = cathandle;
260 up_write(&cathandle->lgh_lock);
264 up_write(&cathandle->lgh_lock);
266 rc = llog_open(env, cathandle->lgh_ctxt, &loghandle, logid, NULL,
269 CERROR("%s: error opening log id "DFID":%x: rc = %d\n",
270 cathandle->lgh_ctxt->loc_obd->obd_name,
271 PFID(&logid->lgl_oi.oi_fid), logid->lgl_ogen, rc);
275 rc = llog_init_handle(env, loghandle, LLOG_F_IS_PLAIN | fmt, NULL);
277 llog_close(env, loghandle);
282 down_write(&cathandle->lgh_lock);
283 list_add_tail(&loghandle->u.phd.phd_entry, &cathandle->u.chd.chd_head);
284 up_write(&cathandle->lgh_lock);
286 loghandle->u.phd.phd_cat_handle = cathandle;
287 loghandle->u.phd.phd_cookie.lgc_lgl = cathandle->lgh_id;
288 loghandle->u.phd.phd_cookie.lgc_index =
289 loghandle->lgh_hdr->llh_cat_idx;
292 llog_handle_get(loghandle);
297 int llog_cat_close(const struct lu_env *env, struct llog_handle *cathandle)
299 struct llog_handle *loghandle, *n;
304 list_for_each_entry_safe(loghandle, n, &cathandle->u.chd.chd_head,
306 struct llog_log_hdr *llh = loghandle->lgh_hdr;
309 /* unlink open-not-created llogs */
310 list_del_init(&loghandle->u.phd.phd_entry);
311 llh = loghandle->lgh_hdr;
312 if (loghandle->lgh_obj != NULL && llh != NULL &&
313 (llh->llh_flags & LLOG_F_ZAP_WHEN_EMPTY) &&
314 (llh->llh_count == 1)) {
315 rc = llog_destroy(env, loghandle);
317 CERROR("%s: failure destroying log during "
318 "cleanup: rc = %d\n",
319 loghandle->lgh_ctxt->loc_obd->obd_name,
322 index = loghandle->u.phd.phd_cookie.lgc_index;
323 llog_cat_cleanup(env, cathandle, NULL, index);
325 llog_close(env, loghandle);
327 /* if handle was stored in ctxt, remove it too */
328 if (cathandle->lgh_ctxt->loc_handle == cathandle)
329 cathandle->lgh_ctxt->loc_handle = NULL;
330 rc = llog_close(env, cathandle);
333 EXPORT_SYMBOL(llog_cat_close);
336 * lockdep markers for nested struct llog_handle::lgh_lock locking.
343 /** Return the currently active log handle. If the current log handle doesn't
344 * have enough space left for the current record, start a new one.
346 * If reclen is 0, we only want to know what the currently active log is,
347 * otherwise we get a lock on this log so nobody can steal our space.
349 * Assumes caller has already pushed us into the kernel context and is locking.
351 * NOTE: loghandle is write-locked upon successful return
353 static struct llog_handle *llog_cat_current_log(struct llog_handle *cathandle,
356 struct llog_handle *loghandle = NULL;
360 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_LLOG_CREATE_FAILED2)) {
361 down_write_nested(&cathandle->lgh_lock, LLOGH_CAT);
362 GOTO(next, loghandle);
365 down_read_nested(&cathandle->lgh_lock, LLOGH_CAT);
366 loghandle = cathandle->u.chd.chd_current_log;
368 struct llog_log_hdr *llh;
370 down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
371 llh = loghandle->lgh_hdr;
372 if (llh == NULL || !llog_is_full(loghandle)) {
373 up_read(&cathandle->lgh_lock);
376 up_write(&loghandle->lgh_lock);
379 up_read(&cathandle->lgh_lock);
381 /* time to use next log */
383 /* first, we have to make sure the state hasn't changed */
384 down_write_nested(&cathandle->lgh_lock, LLOGH_CAT);
385 loghandle = cathandle->u.chd.chd_current_log;
387 struct llog_log_hdr *llh;
389 down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
390 llh = loghandle->lgh_hdr;
391 if (llh == NULL || !llog_is_full(loghandle))
392 GOTO(out_unlock, loghandle);
394 up_write(&loghandle->lgh_lock);
398 /* Sigh, the chd_next_log and chd_current_log is initialized
399 * in declare phase, and we do not serialize the catlog
400 * accessing, so it might be possible the llog creation
401 * thread (see llog_cat_declare_add_rec()) did not create
402 * llog successfully, then the following thread might
403 * meet this situation. */
404 if (IS_ERR_OR_NULL(cathandle->u.chd.chd_next_log)) {
405 CERROR("%s: next log does not exist!\n",
406 cathandle->lgh_ctxt->loc_obd->obd_name);
407 loghandle = ERR_PTR(-EIO);
408 if (cathandle->u.chd.chd_next_log == NULL) {
409 /* Store the error in chd_next_log, so
410 * the following process can get correct
412 cathandle->u.chd.chd_next_log = loghandle;
414 GOTO(out_unlock, loghandle);
417 CDEBUG(D_INODE, "use next log\n");
419 loghandle = cathandle->u.chd.chd_next_log;
420 cathandle->u.chd.chd_current_log = loghandle;
421 cathandle->u.chd.chd_next_log = NULL;
422 down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
425 up_write(&cathandle->lgh_lock);
430 static int llog_cat_update_header(const struct lu_env *env,
431 struct llog_handle *cathandle)
433 struct llog_handle *loghandle;
438 down_write(&cathandle->lgh_lock);
439 if (!cathandle->lgh_stale) {
440 up_write(&cathandle->lgh_lock);
443 list_for_each_entry(loghandle, &cathandle->u.chd.chd_head,
445 if (!llog_exist(loghandle))
448 rc = llog_read_header(env, loghandle, NULL);
450 up_write(&cathandle->lgh_lock);
454 rc = llog_read_header(env, cathandle, NULL);
456 cathandle->lgh_stale = 0;
457 up_write(&cathandle->lgh_lock);
464 /* Add a single record to the recovery log(s) using a catalog
465 * Returns as llog_write_record
467 * Assumes caller has already pushed us into the kernel context.
469 int llog_cat_add_rec(const struct lu_env *env, struct llog_handle *cathandle,
470 struct llog_rec_hdr *rec, struct llog_cookie *reccookie,
473 struct llog_handle *loghandle;
477 LASSERT(rec->lrh_len <= cathandle->lgh_ctxt->loc_chunk_size);
480 loghandle = llog_cat_current_log(cathandle, th);
481 if (IS_ERR(loghandle))
482 RETURN(PTR_ERR(loghandle));
484 /* loghandle is already locked by llog_cat_current_log() for us */
485 if (!llog_exist(loghandle)) {
486 rc = llog_cat_new_log(env, cathandle, loghandle, th);
488 up_write(&loghandle->lgh_lock);
489 /* nobody should be trying to use this llog */
490 down_write(&cathandle->lgh_lock);
491 /* only reset current log if still room in catalog, to
492 * avoid unnecessarily and racy creation of new and
493 * partially initialized llog_handle
495 if ((cathandle->u.chd.chd_current_log == loghandle) &&
497 cathandle->u.chd.chd_current_log = NULL;
498 up_write(&cathandle->lgh_lock);
502 /* now let's try to add the record */
503 rc = llog_write_rec(env, loghandle, rec, reccookie, LLOG_NEXT_IDX, th);
505 CDEBUG_LIMIT(rc == -ENOSPC ? D_HA : D_ERROR,
506 "llog_write_rec %d: lh=%p\n", rc, loghandle);
507 /* -ENOSPC is returned if no empty records left
508 * and when it's lack of space on the stogage.
509 * there is no point to try again if it's the second
510 * case. many callers (like llog test) expect ENOSPC,
511 * so we preserve this error code, but look for the
512 * actual cause here */
513 if (rc == -ENOSPC && llog_is_full(loghandle))
516 up_write(&loghandle->lgh_lock);
518 if (rc == -ENOBUFS) {
521 CERROR("%s: error on 2nd llog: rc = %d\n",
522 cathandle->lgh_ctxt->loc_obd->obd_name, rc);
527 EXPORT_SYMBOL(llog_cat_add_rec);
529 int llog_cat_declare_add_rec(const struct lu_env *env,
530 struct llog_handle *cathandle,
531 struct llog_rec_hdr *rec, struct thandle *th)
533 struct llog_thread_info *lgi = llog_info(env);
534 struct llog_logid_rec *lirec = &lgi->lgi_logid;
535 struct llog_handle *loghandle, *next;
540 if (cathandle->u.chd.chd_current_log == NULL) {
541 /* declare new plain llog */
542 down_write(&cathandle->lgh_lock);
543 if (cathandle->u.chd.chd_current_log == NULL) {
544 rc = llog_open(env, cathandle->lgh_ctxt, &loghandle,
545 NULL, NULL, LLOG_OPEN_NEW);
547 cathandle->u.chd.chd_current_log = loghandle;
548 list_add_tail(&loghandle->u.phd.phd_entry,
549 &cathandle->u.chd.chd_head);
552 up_write(&cathandle->lgh_lock);
553 } else if (cathandle->u.chd.chd_next_log == NULL ||
554 IS_ERR(cathandle->u.chd.chd_next_log)) {
555 /* declare next plain llog */
556 down_write(&cathandle->lgh_lock);
557 if (cathandle->u.chd.chd_next_log == NULL ||
558 IS_ERR(cathandle->u.chd.chd_next_log)) {
559 rc = llog_open(env, cathandle->lgh_ctxt, &loghandle,
560 NULL, NULL, LLOG_OPEN_NEW);
562 cathandle->u.chd.chd_next_log = loghandle;
563 list_add_tail(&loghandle->u.phd.phd_entry,
564 &cathandle->u.chd.chd_head);
567 up_write(&cathandle->lgh_lock);
572 lirec->lid_hdr.lrh_len = sizeof(*lirec);
574 if (!llog_exist(cathandle->u.chd.chd_current_log)) {
575 if (dt_object_remote(cathandle->lgh_obj)) {
576 /* For remote operation, if we put the llog object
577 * creation in the current transaction, then the
578 * llog object will not be created on the remote
579 * target until the transaction stop, if other
580 * operations start before the transaction stop,
581 * and use the same llog object, will be dependent
582 * on the success of this transaction. So let's
583 * create the llog object synchronously here to
584 * remove the dependency. */
586 down_read_nested(&cathandle->lgh_lock, LLOGH_CAT);
587 loghandle = cathandle->u.chd.chd_current_log;
588 down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
589 if (cathandle->lgh_stale) {
590 up_write(&loghandle->lgh_lock);
591 up_read(&cathandle->lgh_lock);
592 GOTO(out, rc = -EIO);
594 if (!llog_exist(loghandle)) {
595 rc = llog_cat_new_log(env, cathandle, loghandle,
598 cathandle->lgh_stale = 1;
600 up_write(&loghandle->lgh_lock);
601 up_read(&cathandle->lgh_lock);
603 rc = llog_cat_update_header(env, cathandle);
611 rc = llog_declare_create(env,
612 cathandle->u.chd.chd_current_log, th);
615 llog_declare_write_rec(env, cathandle,
616 &lirec->lid_hdr, -1, th);
621 /* declare records in the llogs */
622 rc = llog_declare_write_rec(env, cathandle->u.chd.chd_current_log,
625 down_write(&cathandle->lgh_lock);
626 if (cathandle->lgh_stale) {
627 up_write(&cathandle->lgh_lock);
628 GOTO(out, rc = -EIO);
631 cathandle->lgh_stale = 1;
632 up_write(&cathandle->lgh_lock);
633 rc = llog_cat_update_header(env, cathandle);
641 next = cathandle->u.chd.chd_next_log;
642 if (!IS_ERR_OR_NULL(next)) {
643 if (!llog_exist(next)) {
644 if (dt_object_remote(cathandle->lgh_obj)) {
645 /* For remote operation, if we put the llog
646 * object creation in the current transaction,
647 * then the llog object will not be created on
648 * the remote target until the transaction stop,
649 * if other operations start before the
650 * transaction stop, and use the same llog
651 * object, will be dependent on the success of
652 * this transaction. So let's create the llog
653 * object synchronously here to remove the
655 down_write_nested(&cathandle->lgh_lock,
657 next = cathandle->u.chd.chd_next_log;
658 if (IS_ERR_OR_NULL(next)) {
659 /* Sigh, another thread just tried,
660 * let's fail as well */
661 up_write(&cathandle->lgh_lock);
669 down_write_nested(&next->lgh_lock, LLOGH_LOG);
670 if (!llog_exist(next)) {
671 rc = llog_cat_new_log(env, cathandle,
674 cathandle->u.chd.chd_next_log =
677 up_write(&next->lgh_lock);
678 up_write(&cathandle->lgh_lock);
682 rc = llog_declare_create(env, next, th);
683 llog_declare_write_rec(env, cathandle,
684 &lirec->lid_hdr, -1, th);
687 /* XXX: we hope for declarations made for existing llog
688 * this might be not correct with some backends
689 * where declarations are expected against specific
690 * object like ZFS with full debugging enabled */
691 /*llog_declare_write_rec(env, next, rec, -1, th);*/
696 EXPORT_SYMBOL(llog_cat_declare_add_rec);
698 int llog_cat_add(const struct lu_env *env, struct llog_handle *cathandle,
699 struct llog_rec_hdr *rec, struct llog_cookie *reccookie)
701 struct llog_ctxt *ctxt;
702 struct dt_device *dt;
703 struct thandle *th = NULL;
706 ctxt = cathandle->lgh_ctxt;
708 LASSERT(ctxt->loc_exp);
710 LASSERT(cathandle->lgh_obj != NULL);
711 dt = lu2dt_dev(cathandle->lgh_obj->do_lu.lo_dev);
713 th = dt_trans_create(env, dt);
717 rc = llog_cat_declare_add_rec(env, cathandle, rec, th);
721 rc = dt_trans_start_local(env, dt, th);
724 rc = llog_cat_add_rec(env, cathandle, rec, reccookie, th);
726 dt_trans_stop(env, dt, th);
729 EXPORT_SYMBOL(llog_cat_add);
731 /* For each cookie in the cookie array, we clear the log in-use bit and either:
732 * - the log is empty, so mark it free in the catalog header and delete it
733 * - the log is not empty, just write out the log header
735 * The cookies may be in different log files, so we need to get new logs
738 * Assumes caller has already pushed us into the kernel context.
740 int llog_cat_cancel_records(const struct lu_env *env,
741 struct llog_handle *cathandle, int count,
742 struct llog_cookie *cookies)
744 int i, index, rc = 0, failed = 0;
748 for (i = 0; i < count; i++, cookies++) {
749 struct llog_handle *loghandle;
750 struct llog_logid *lgl = &cookies->lgc_lgl;
753 rc = llog_cat_id2handle(env, cathandle, &loghandle, lgl);
755 CDEBUG(D_HA, "%s: cannot find llog for handle "DFID":%x"
757 cathandle->lgh_ctxt->loc_obd->obd_name,
758 PFID(&lgl->lgl_oi.oi_fid), lgl->lgl_ogen, rc);
763 if ((cathandle->lgh_ctxt->loc_flags &
764 LLOG_CTXT_FLAG_NORMAL_FID) && !llog_exist(loghandle)) {
765 /* For update log, some of loghandles of cathandle
766 * might not exist because remote llog creation might
767 * be failed, so let's skip the record cancellation
768 * for these non-exist llogs.
771 CDEBUG(D_HA, "%s: llog "DFID":%x does not exist"
773 cathandle->lgh_ctxt->loc_obd->obd_name,
774 PFID(&lgl->lgl_oi.oi_fid), lgl->lgl_ogen, lrc);
781 lrc = llog_cancel_rec(env, loghandle, cookies->lgc_index);
782 if (lrc == LLOG_DEL_PLAIN) { /* log has been destroyed */
783 index = loghandle->u.phd.phd_cookie.lgc_index;
784 lrc = llog_cat_cleanup(env, cathandle, loghandle,
788 } else if (lrc == -ENOENT) {
789 if (rc == 0) /* ENOENT shouldn't rewrite any error */
791 } else if (lrc < 0) {
796 llog_handle_put(loghandle);
799 CERROR("%s: fail to cancel %d of %d llog-records: rc = %d\n",
800 cathandle->lgh_ctxt->loc_obd->obd_name, failed, count,
805 EXPORT_SYMBOL(llog_cat_cancel_records);
807 static int llog_cat_process_common(const struct lu_env *env,
808 struct llog_handle *cat_llh,
809 struct llog_rec_hdr *rec,
810 struct llog_handle **llhp)
812 struct llog_logid_rec *lir = container_of(rec, typeof(*lir), lid_hdr);
813 struct llog_log_hdr *hdr;
817 if (rec->lrh_type != le32_to_cpu(LLOG_LOGID_MAGIC)) {
819 CWARN("%s: invalid record in catalog "DFID":%x: rc = %d\n",
820 cat_llh->lgh_ctxt->loc_obd->obd_name,
821 PFID(&cat_llh->lgh_id.lgl_oi.oi_fid),
822 cat_llh->lgh_id.lgl_ogen, rc);
825 CDEBUG(D_HA, "processing log "DFID":%x at index %u of catalog "DFID"\n",
826 PFID(&lir->lid_id.lgl_oi.oi_fid), lir->lid_id.lgl_ogen,
827 le32_to_cpu(rec->lrh_index),
828 PFID(&cat_llh->lgh_id.lgl_oi.oi_fid));
830 rc = llog_cat_id2handle(env, cat_llh, llhp, &lir->lid_id);
832 /* After a server crash, a stub of index record in catlog could
833 * be kept, because plain log destroy + catlog index record
834 * deletion are not atomic. So we end up with an index but no
835 * actual record. Destroy the index and move on. */
836 if (rc == -ENOENT || rc == -ESTALE)
837 rc = LLOG_DEL_RECORD;
839 CWARN("%s: can't find llog handle "DFID":%x: rc = %d\n",
840 cat_llh->lgh_ctxt->loc_obd->obd_name,
841 PFID(&lir->lid_id.lgl_oi.oi_fid),
842 lir->lid_id.lgl_ogen, rc);
847 /* clean old empty llogs, do not consider current llog in use */
848 /* ignore remote (lgh_obj == NULL) llogs */
849 hdr = (*llhp)->lgh_hdr;
850 if ((hdr->llh_flags & LLOG_F_ZAP_WHEN_EMPTY) &&
851 hdr->llh_count == 1 && cat_llh->lgh_obj != NULL &&
852 *llhp != cat_llh->u.chd.chd_current_log) {
853 rc = llog_destroy(env, *llhp);
855 CWARN("%s: can't destroy empty log "DFID": rc = %d\n",
856 (*llhp)->lgh_ctxt->loc_obd->obd_name,
857 PFID(&lir->lid_id.lgl_oi.oi_fid), rc);
864 static int llog_cat_process_cb(const struct lu_env *env,
865 struct llog_handle *cat_llh,
866 struct llog_rec_hdr *rec, void *data)
868 struct llog_process_data *d = data;
869 struct llog_handle *llh = NULL;
873 rc = llog_cat_process_common(env, cat_llh, rec, &llh);
877 if (rec->lrh_index < d->lpd_startcat) {
878 /* Skip processing of the logs until startcat */
880 } else if (d->lpd_startidx > 0) {
881 struct llog_process_cat_data cd;
883 cd.lpcd_first_idx = d->lpd_startidx;
884 cd.lpcd_last_idx = 0;
885 rc = llog_process_or_fork(env, llh, d->lpd_cb, d->lpd_data,
887 /* Continue processing the next log from idx 0 */
890 rc = llog_process_or_fork(env, llh, d->lpd_cb, d->lpd_data,
895 /* The empty plain log was destroyed while processing */
896 if (rc == LLOG_DEL_PLAIN) {
897 rc = llog_cat_cleanup(env, cat_llh, llh,
898 llh->u.phd.phd_cookie.lgc_index);
899 } else if (rc == LLOG_DEL_RECORD) {
900 /* clear wrong catalog entry */
901 rc = llog_cat_cleanup(env, cat_llh, NULL, rec->lrh_index);
905 llog_handle_put(llh);
910 int llog_cat_process_or_fork(const struct lu_env *env,
911 struct llog_handle *cat_llh, llog_cb_t cat_cb,
912 llog_cb_t cb, void *data, int startcat,
913 int startidx, bool fork)
915 struct llog_process_data d;
916 struct llog_log_hdr *llh = cat_llh->lgh_hdr;
920 LASSERT(llh->llh_flags & LLOG_F_IS_CAT);
923 d.lpd_startcat = startcat;
924 d.lpd_startidx = startidx;
926 if (llh->llh_cat_idx >= cat_llh->lgh_last_idx &&
927 llh->llh_count > 1) {
928 struct llog_process_cat_data cd;
930 CWARN("%s: catlog "DFID" crosses index zero\n",
931 cat_llh->lgh_ctxt->loc_obd->obd_name,
932 PFID(&cat_llh->lgh_id.lgl_oi.oi_fid));
934 cd.lpcd_first_idx = llh->llh_cat_idx;
935 cd.lpcd_last_idx = 0;
936 rc = llog_process_or_fork(env, cat_llh, cat_cb,
941 cd.lpcd_first_idx = 0;
942 cd.lpcd_last_idx = cat_llh->lgh_last_idx;
943 rc = llog_process_or_fork(env, cat_llh, cat_cb,
946 rc = llog_process_or_fork(env, cat_llh, cat_cb,
953 int llog_cat_process(const struct lu_env *env, struct llog_handle *cat_llh,
954 llog_cb_t cb, void *data, int startcat, int startidx)
956 return llog_cat_process_or_fork(env, cat_llh, llog_cat_process_cb,
957 cb, data, startcat, startidx, false);
959 EXPORT_SYMBOL(llog_cat_process);
961 static int llog_cat_size_cb(const struct lu_env *env,
962 struct llog_handle *cat_llh,
963 struct llog_rec_hdr *rec, void *data)
965 struct llog_process_data *d = data;
966 struct llog_handle *llh = NULL;
967 __u64 *cum_size = d->lpd_data;
972 rc = llog_cat_process_common(env, cat_llh, rec, &llh);
974 if (rc == LLOG_DEL_PLAIN) {
975 /* empty log was deleted, don't count it */
976 rc = llog_cat_cleanup(env, cat_llh, llh,
977 llh->u.phd.phd_cookie.lgc_index);
978 } else if (rc == LLOG_DEL_RECORD) {
979 /* clear wrong catalog entry */
980 rc = llog_cat_cleanup(env, cat_llh, NULL, rec->lrh_index);
982 size = llog_size(env, llh);
985 CDEBUG(D_INFO, "Add llog entry "DFID" size=%llu, tot=%llu\n",
986 PFID(&llh->lgh_id.lgl_oi.oi_fid), size, *cum_size);
990 llog_handle_put(llh);
995 __u64 llog_cat_size(const struct lu_env *env, struct llog_handle *cat_llh)
997 __u64 size = llog_size(env, cat_llh);
999 llog_cat_process_or_fork(env, cat_llh, llog_cat_size_cb,
1000 NULL, &size, 0, 0, false);
1004 EXPORT_SYMBOL(llog_cat_size);
1006 /* currently returns the number of "free" entries in catalog,
1007 * ie the available entries for a new plain LLOG file creation,
1008 * even if catalog has wrapped
1010 __u32 llog_cat_free_space(struct llog_handle *cat_llh)
1012 /* simulate almost full Catalog */
1013 if (OBD_FAIL_CHECK(OBD_FAIL_CAT_FREE_RECORDS))
1014 return cfs_fail_val;
1016 if (cat_llh->lgh_hdr->llh_count == 1)
1017 return LLOG_HDR_BITMAP_SIZE(cat_llh->lgh_hdr) - 1;
1019 if (cat_llh->lgh_last_idx > cat_llh->lgh_hdr->llh_cat_idx)
1020 return LLOG_HDR_BITMAP_SIZE(cat_llh->lgh_hdr) - 1 +
1021 cat_llh->lgh_hdr->llh_cat_idx - cat_llh->lgh_last_idx;
1023 /* catalog is presently wrapped */
1024 return cat_llh->lgh_hdr->llh_cat_idx - cat_llh->lgh_last_idx;
1026 EXPORT_SYMBOL(llog_cat_free_space);
1028 static int llog_cat_reverse_process_cb(const struct lu_env *env,
1029 struct llog_handle *cat_llh,
1030 struct llog_rec_hdr *rec, void *data)
1032 struct llog_process_data *d = data;
1033 struct llog_handle *llh;
1037 rc = llog_cat_process_common(env, cat_llh, rec, &llh);
1039 /* The empty plain log was destroyed while processing */
1040 if (rc == LLOG_DEL_PLAIN) {
1041 rc = llog_cat_cleanup(env, cat_llh, llh,
1042 llh->u.phd.phd_cookie.lgc_index);
1043 } else if (rc == LLOG_DEL_RECORD) {
1044 /* clear wrong catalog entry */
1045 rc = llog_cat_cleanup(env, cat_llh, NULL, rec->lrh_index);
1050 rc = llog_reverse_process(env, llh, d->lpd_cb, d->lpd_data, NULL);
1052 /* The empty plain was destroyed while processing */
1053 if (rc == LLOG_DEL_PLAIN)
1054 rc = llog_cat_cleanup(env, cat_llh, llh,
1055 llh->u.phd.phd_cookie.lgc_index);
1057 llog_handle_put(llh);
1061 int llog_cat_reverse_process(const struct lu_env *env,
1062 struct llog_handle *cat_llh,
1063 llog_cb_t cb, void *data)
1065 struct llog_process_data d;
1066 struct llog_process_cat_data cd;
1067 struct llog_log_hdr *llh = cat_llh->lgh_hdr;
1071 LASSERT(llh->llh_flags & LLOG_F_IS_CAT);
1075 if (llh->llh_cat_idx >= cat_llh->lgh_last_idx &&
1076 llh->llh_count > 1) {
1077 CWARN("%s: catalog "DFID" crosses index zero\n",
1078 cat_llh->lgh_ctxt->loc_obd->obd_name,
1079 PFID(&cat_llh->lgh_id.lgl_oi.oi_fid));
1081 cd.lpcd_first_idx = 0;
1082 cd.lpcd_last_idx = cat_llh->lgh_last_idx;
1083 rc = llog_reverse_process(env, cat_llh,
1084 llog_cat_reverse_process_cb,
1089 cd.lpcd_first_idx = le32_to_cpu(llh->llh_cat_idx);
1090 cd.lpcd_last_idx = 0;
1091 rc = llog_reverse_process(env, cat_llh,
1092 llog_cat_reverse_process_cb,
1095 rc = llog_reverse_process(env, cat_llh,
1096 llog_cat_reverse_process_cb,
1102 EXPORT_SYMBOL(llog_cat_reverse_process);
1104 static int llog_cat_set_first_idx(struct llog_handle *cathandle, int idx)
1106 struct llog_log_hdr *llh = cathandle->lgh_hdr;
1111 bitmap_size = LLOG_HDR_BITMAP_SIZE(llh);
1113 * The llh_cat_idx equals to the first used index minus 1
1114 * so if we canceled the first index then llh_cat_idx
1117 if (llh->llh_cat_idx == (idx - 1)) {
1118 llh->llh_cat_idx = idx;
1120 while (idx != cathandle->lgh_last_idx) {
1121 idx = (idx + 1) % bitmap_size;
1122 if (!ext2_test_bit(idx, LLOG_HDR_BITMAP(llh))) {
1123 /* update llh_cat_idx for each unset bit,
1124 * expecting the next one is set */
1125 llh->llh_cat_idx = idx;
1126 } else if (idx == 0) {
1127 /* skip header bit */
1128 llh->llh_cat_idx = 0;
1131 /* the first index is found */
1136 CDEBUG(D_RPCTRACE, "catlog "DFID" first idx %u, last_idx %u\n",
1137 PFID(&cathandle->lgh_id.lgl_oi.oi_fid),
1138 llh->llh_cat_idx, cathandle->lgh_last_idx);
1144 /* Cleanup deleted plain llog traces from catalog */
1145 int llog_cat_cleanup(const struct lu_env *env, struct llog_handle *cathandle,
1146 struct llog_handle *loghandle, int index)
1151 if (loghandle != NULL) {
1152 /* remove destroyed llog from catalog list and
1153 * chd_current_log variable */
1154 down_write(&cathandle->lgh_lock);
1155 if (cathandle->u.chd.chd_current_log == loghandle)
1156 cathandle->u.chd.chd_current_log = NULL;
1157 list_del_init(&loghandle->u.phd.phd_entry);
1158 up_write(&cathandle->lgh_lock);
1159 LASSERT(index == loghandle->u.phd.phd_cookie.lgc_index);
1160 /* llog was opened and keep in a list, close it now */
1161 llog_close(env, loghandle);
1164 /* do not attempt to cleanup on-disk llog if on client side */
1165 if (cathandle->lgh_obj == NULL)
1168 /* remove plain llog entry from catalog by index */
1169 llog_cat_set_first_idx(cathandle, index);
1170 rc = llog_cancel_rec(env, cathandle, index);
1172 CDEBUG(D_HA, "cancel plain log at index %u of catalog "DFID"\n",
1173 index, PFID(&cathandle->lgh_id.lgl_oi.oi_fid));