4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2012, 2015, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/obdclass/llog_cat.c
38 * OST<->MDS recovery logging infrastructure.
40 * Invariants in implementation:
41 * - we do not share logs among different OST<->MDS connections, so that
42 * if an OST or MDS fails it need only look at log(s) relevant to itself
44 * Author: Andreas Dilger <adilger@clusterfs.com>
45 * Author: Alexey Zhuravlev <alexey.zhuravlev@intel.com>
46 * Author: Mikhail Pershin <mike.pershin@intel.com>
49 #define DEBUG_SUBSYSTEM S_LOG
52 #include <obd_class.h>
54 #include "llog_internal.h"
56 /* Create a new log handle and add it to the open list.
57 * This log handle will be closed when all of the records in it are removed.
59 * Assumes caller has already pushed us into the kernel context and is locking.
61 static int llog_cat_new_log(const struct lu_env *env,
62 struct llog_handle *cathandle,
63 struct llog_handle *loghandle,
66 struct llog_thread_info *lgi = llog_info(env);
67 struct llog_logid_rec *rec = &lgi->lgi_logid;
68 struct thandle *handle = NULL;
69 struct dt_device *dt = NULL;
70 struct llog_log_hdr *llh = cathandle->lgh_hdr;
75 index = (cathandle->lgh_last_idx + 1) %
76 (OBD_FAIL_PRECHECK(OBD_FAIL_CAT_RECORDS) ? (cfs_fail_val + 1) :
77 LLOG_HDR_BITMAP_SIZE(llh));
79 /* check that new llog index will not overlap with the first one.
80 * - llh_cat_idx is the index just before the first/oldest still in-use
82 * - lgh_last_idx is the last/newest used index in catalog
84 * When catalog is not wrapped yet then lgh_last_idx is always larger
85 * than llh_cat_idx. After the wrap around lgh_last_idx re-starts
86 * from 0 and llh_cat_idx becomes the upper limit for it
88 * Check if catalog has already wrapped around or not by comparing
89 * last_idx and cat_idx */
90 if ((index == llh->llh_cat_idx + 1 && llh->llh_count > 1) ||
91 (index == 0 && llh->llh_cat_idx == 0)) {
92 CWARN("%s: there are no more free slots in catalog\n",
93 loghandle->lgh_ctxt->loc_obd->obd_name);
97 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_LLOG_CREATE_FAILED))
100 if (loghandle->lgh_hdr != NULL) {
101 /* If llog object is remote and creation is failed, lgh_hdr
102 * might be left over here, free it first */
103 LASSERT(!llog_exist(loghandle));
104 OBD_FREE_PTR(loghandle->lgh_hdr);
105 loghandle->lgh_hdr = NULL;
109 dt = lu2dt_dev(cathandle->lgh_obj->do_lu.lo_dev);
111 handle = dt_trans_create(env, dt);
113 RETURN(PTR_ERR(handle));
115 /* Create update llog object synchronously, which
116 * happens during inialization process see
117 * lod_sub_prep_llog(), to make sure the update
118 * llog object is created before corss-MDT writing
119 * updates into the llog object */
120 if (cathandle->lgh_ctxt->loc_flags & LLOG_CTXT_FLAG_NORMAL_FID)
123 handle->th_wait_submit = 1;
125 rc = llog_declare_create(env, loghandle, handle);
129 rec->lid_hdr.lrh_len = sizeof(*rec);
130 rec->lid_hdr.lrh_type = LLOG_LOGID_MAGIC;
131 rec->lid_id = loghandle->lgh_id;
132 rc = llog_declare_write_rec(env, cathandle, &rec->lid_hdr, -1,
137 rc = dt_trans_start_local(env, dt, handle);
144 rc = llog_create(env, loghandle, th);
145 /* if llog is already created, no need to initialize it */
148 } else if (rc != 0) {
149 CERROR("%s: can't create new plain llog in catalog: rc = %d\n",
150 loghandle->lgh_ctxt->loc_obd->obd_name, rc);
154 rc = llog_init_handle(env, loghandle,
155 LLOG_F_IS_PLAIN | LLOG_F_ZAP_WHEN_EMPTY,
156 &cathandle->lgh_hdr->llh_tgtuuid);
160 /* build the record for this log in the catalog */
161 rec->lid_hdr.lrh_len = sizeof(*rec);
162 rec->lid_hdr.lrh_type = LLOG_LOGID_MAGIC;
163 rec->lid_id = loghandle->lgh_id;
165 /* append the new record into catalog. The new index will be
166 * assigned to the record and updated in rec header */
167 rc = llog_write_rec(env, cathandle, &rec->lid_hdr,
168 &loghandle->u.phd.phd_cookie, LLOG_NEXT_IDX, th);
170 GOTO(out_destroy, rc);
172 CDEBUG(D_OTHER, "new plain log "DOSTID":%x for index %u of catalog"
173 DOSTID"\n", POSTID(&loghandle->lgh_id.lgl_oi),
174 loghandle->lgh_id.lgl_ogen, rec->lid_hdr.lrh_index,
175 POSTID(&cathandle->lgh_id.lgl_oi));
177 loghandle->lgh_hdr->llh_cat_idx = rec->lid_hdr.lrh_index;
180 dt_trans_stop(env, dt, handle);
185 /* to signal llog_cat_close() it shouldn't try to destroy the llog,
186 * we want to destroy it in this transaction, otherwise the object
187 * becomes an orphan */
188 loghandle->lgh_hdr->llh_flags &= ~LLOG_F_ZAP_WHEN_EMPTY;
189 /* this is to mimic full log, so another llog_cat_current_log()
190 * can skip it and ask for another onet */
191 loghandle->lgh_last_idx = LLOG_HDR_BITMAP_SIZE(llh) + 1;
192 llog_trans_destroy(env, loghandle, th);
196 /* Open an existent log handle and add it to the open list.
197 * This log handle will be closed when all of the records in it are removed.
199 * Assumes caller has already pushed us into the kernel context and is locking.
200 * We return a lock on the handle to ensure nobody yanks it from us.
202 * This takes extra reference on llog_handle via llog_handle_get() and require
203 * this reference to be put by caller using llog_handle_put()
205 int llog_cat_id2handle(const struct lu_env *env, struct llog_handle *cathandle,
206 struct llog_handle **res, struct llog_logid *logid)
208 struct llog_handle *loghandle;
214 if (cathandle == NULL)
217 fmt = cathandle->lgh_hdr->llh_flags & LLOG_F_EXT_MASK;
218 down_write(&cathandle->lgh_lock);
219 list_for_each_entry(loghandle, &cathandle->u.chd.chd_head,
221 struct llog_logid *cgl = &loghandle->lgh_id;
223 if (ostid_id(&cgl->lgl_oi) == ostid_id(&logid->lgl_oi) &&
224 ostid_seq(&cgl->lgl_oi) == ostid_seq(&logid->lgl_oi)) {
225 if (cgl->lgl_ogen != logid->lgl_ogen) {
226 CERROR("%s: log "DOSTID" generation %x != %x\n",
227 loghandle->lgh_ctxt->loc_obd->obd_name,
228 POSTID(&logid->lgl_oi), cgl->lgl_ogen,
232 loghandle->u.phd.phd_cat_handle = cathandle;
233 up_write(&cathandle->lgh_lock);
237 up_write(&cathandle->lgh_lock);
239 rc = llog_open(env, cathandle->lgh_ctxt, &loghandle, logid, NULL,
242 CERROR("%s: error opening log id "DOSTID":%x: rc = %d\n",
243 cathandle->lgh_ctxt->loc_obd->obd_name,
244 POSTID(&logid->lgl_oi), logid->lgl_ogen, rc);
248 rc = llog_init_handle(env, loghandle, LLOG_F_IS_PLAIN | fmt, NULL);
250 llog_close(env, loghandle);
255 down_write(&cathandle->lgh_lock);
256 list_add_tail(&loghandle->u.phd.phd_entry, &cathandle->u.chd.chd_head);
257 up_write(&cathandle->lgh_lock);
259 loghandle->u.phd.phd_cat_handle = cathandle;
260 loghandle->u.phd.phd_cookie.lgc_lgl = cathandle->lgh_id;
261 loghandle->u.phd.phd_cookie.lgc_index =
262 loghandle->lgh_hdr->llh_cat_idx;
265 llog_handle_get(loghandle);
270 int llog_cat_close(const struct lu_env *env, struct llog_handle *cathandle)
272 struct llog_handle *loghandle, *n;
277 list_for_each_entry_safe(loghandle, n, &cathandle->u.chd.chd_head,
279 struct llog_log_hdr *llh = loghandle->lgh_hdr;
282 /* unlink open-not-created llogs */
283 list_del_init(&loghandle->u.phd.phd_entry);
284 llh = loghandle->lgh_hdr;
285 if (loghandle->lgh_obj != NULL && llh != NULL &&
286 (llh->llh_flags & LLOG_F_ZAP_WHEN_EMPTY) &&
287 (llh->llh_count == 1)) {
288 rc = llog_destroy(env, loghandle);
290 CERROR("%s: failure destroying log during "
291 "cleanup: rc = %d\n",
292 loghandle->lgh_ctxt->loc_obd->obd_name,
295 index = loghandle->u.phd.phd_cookie.lgc_index;
296 llog_cat_cleanup(env, cathandle, NULL, index);
298 llog_close(env, loghandle);
300 /* if handle was stored in ctxt, remove it too */
301 if (cathandle->lgh_ctxt->loc_handle == cathandle)
302 cathandle->lgh_ctxt->loc_handle = NULL;
303 rc = llog_close(env, cathandle);
306 EXPORT_SYMBOL(llog_cat_close);
309 * lockdep markers for nested struct llog_handle::lgh_lock locking.
316 /** Return the currently active log handle. If the current log handle doesn't
317 * have enough space left for the current record, start a new one.
319 * If reclen is 0, we only want to know what the currently active log is,
320 * otherwise we get a lock on this log so nobody can steal our space.
322 * Assumes caller has already pushed us into the kernel context and is locking.
324 * NOTE: loghandle is write-locked upon successful return
326 static struct llog_handle *llog_cat_current_log(struct llog_handle *cathandle,
329 struct llog_handle *loghandle = NULL;
333 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_LLOG_CREATE_FAILED2)) {
334 down_write_nested(&cathandle->lgh_lock, LLOGH_CAT);
335 GOTO(next, loghandle);
338 down_read_nested(&cathandle->lgh_lock, LLOGH_CAT);
339 loghandle = cathandle->u.chd.chd_current_log;
341 struct llog_log_hdr *llh;
343 down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
344 llh = loghandle->lgh_hdr;
345 if (llh == NULL || !llog_is_full(loghandle)) {
346 up_read(&cathandle->lgh_lock);
349 up_write(&loghandle->lgh_lock);
352 up_read(&cathandle->lgh_lock);
354 /* time to use next log */
356 /* first, we have to make sure the state hasn't changed */
357 down_write_nested(&cathandle->lgh_lock, LLOGH_CAT);
358 loghandle = cathandle->u.chd.chd_current_log;
360 struct llog_log_hdr *llh;
362 down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
363 llh = loghandle->lgh_hdr;
365 if (!llog_is_full(loghandle)) {
366 up_write(&cathandle->lgh_lock);
369 up_write(&loghandle->lgh_lock);
374 CDEBUG(D_INODE, "use next log\n");
376 loghandle = cathandle->u.chd.chd_next_log;
377 cathandle->u.chd.chd_current_log = loghandle;
378 cathandle->u.chd.chd_next_log = NULL;
379 down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
380 up_write(&cathandle->lgh_lock);
385 /* Add a single record to the recovery log(s) using a catalog
386 * Returns as llog_write_record
388 * Assumes caller has already pushed us into the kernel context.
390 int llog_cat_add_rec(const struct lu_env *env, struct llog_handle *cathandle,
391 struct llog_rec_hdr *rec, struct llog_cookie *reccookie,
394 struct llog_handle *loghandle;
398 LASSERT(rec->lrh_len <= cathandle->lgh_ctxt->loc_chunk_size);
401 loghandle = llog_cat_current_log(cathandle, th);
402 LASSERT(!IS_ERR(loghandle));
404 /* loghandle is already locked by llog_cat_current_log() for us */
405 if (!llog_exist(loghandle)) {
406 rc = llog_cat_new_log(env, cathandle, loghandle, th);
408 up_write(&loghandle->lgh_lock);
409 /* nobody should be trying to use this llog */
410 down_write(&cathandle->lgh_lock);
411 if (cathandle->u.chd.chd_current_log == loghandle)
412 cathandle->u.chd.chd_current_log = NULL;
413 up_write(&cathandle->lgh_lock);
417 /* now let's try to add the record */
418 rc = llog_write_rec(env, loghandle, rec, reccookie, LLOG_NEXT_IDX, th);
420 CDEBUG_LIMIT(rc == -ENOSPC ? D_HA : D_ERROR,
421 "llog_write_rec %d: lh=%p\n", rc, loghandle);
422 /* -ENOSPC is returned if no empty records left
423 * and when it's lack of space on the stogage.
424 * there is no point to try again if it's the second
425 * case. many callers (like llog test) expect ENOSPC,
426 * so we preserve this error code, but look for the
427 * actual cause here */
428 if (rc == -ENOSPC && llog_is_full(loghandle))
431 up_write(&loghandle->lgh_lock);
433 if (rc == -ENOBUFS) {
436 CERROR("%s: error on 2nd llog: rc = %d\n",
437 cathandle->lgh_ctxt->loc_obd->obd_name, rc);
442 EXPORT_SYMBOL(llog_cat_add_rec);
444 int llog_cat_declare_add_rec(const struct lu_env *env,
445 struct llog_handle *cathandle,
446 struct llog_rec_hdr *rec, struct thandle *th)
448 struct llog_thread_info *lgi = llog_info(env);
449 struct llog_logid_rec *lirec = &lgi->lgi_logid;
450 struct llog_handle *loghandle, *next;
455 if (cathandle->u.chd.chd_current_log == NULL) {
456 /* declare new plain llog */
457 down_write(&cathandle->lgh_lock);
458 if (cathandle->u.chd.chd_current_log == NULL) {
459 rc = llog_open(env, cathandle->lgh_ctxt, &loghandle,
460 NULL, NULL, LLOG_OPEN_NEW);
462 cathandle->u.chd.chd_current_log = loghandle;
463 list_add_tail(&loghandle->u.phd.phd_entry,
464 &cathandle->u.chd.chd_head);
467 up_write(&cathandle->lgh_lock);
468 } else if (cathandle->u.chd.chd_next_log == NULL) {
469 /* declare next plain llog */
470 down_write(&cathandle->lgh_lock);
471 if (cathandle->u.chd.chd_next_log == NULL) {
472 rc = llog_open(env, cathandle->lgh_ctxt, &loghandle,
473 NULL, NULL, LLOG_OPEN_NEW);
475 cathandle->u.chd.chd_next_log = loghandle;
476 list_add_tail(&loghandle->u.phd.phd_entry,
477 &cathandle->u.chd.chd_head);
480 up_write(&cathandle->lgh_lock);
485 lirec->lid_hdr.lrh_len = sizeof(*lirec);
487 if (!llog_exist(cathandle->u.chd.chd_current_log)) {
488 if (dt_object_remote(cathandle->lgh_obj)) {
489 /* For remote operation, if we put the llog object
490 * creation in the current transaction, then the
491 * llog object will not be created on the remote
492 * target until the transaction stop, if other
493 * operations start before the transaction stop,
494 * and use the same llog object, will be dependent
495 * on the success of this transaction. So let's
496 * create the llog object synchronously here to
497 * remove the dependency. */
498 down_read_nested(&cathandle->lgh_lock, LLOGH_CAT);
499 loghandle = cathandle->u.chd.chd_current_log;
500 down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
501 if (!llog_exist(loghandle))
502 rc = llog_cat_new_log(env, cathandle, loghandle,
504 up_write(&loghandle->lgh_lock);
505 up_read(&cathandle->lgh_lock);
510 rc = llog_declare_create(env,
511 cathandle->u.chd.chd_current_log, th);
514 llog_declare_write_rec(env, cathandle,
515 &lirec->lid_hdr, -1, th);
518 /* declare records in the llogs */
519 rc = llog_declare_write_rec(env, cathandle->u.chd.chd_current_log,
524 next = cathandle->u.chd.chd_next_log;
526 if (!llog_exist(next)) {
527 if (dt_object_remote(cathandle->lgh_obj)) {
528 /* For remote operation, if we put the llog
529 * object creation in the current transaction,
530 * then the llog object will not be created on
531 * the remote target until the transaction stop,
532 * if other operations start before the
533 * transaction stop, and use the same llog
534 * object, will be dependent on the success of
535 * this transaction. So let's create the llog
536 * object synchronously here to remove the
538 down_read_nested(&cathandle->lgh_lock,
540 next = cathandle->u.chd.chd_next_log;
541 down_write_nested(&next->lgh_lock, LLOGH_LOG);
542 if (!llog_exist(next))
543 rc = llog_cat_new_log(env, cathandle,
545 up_write(&next->lgh_lock);
546 up_read(&cathandle->lgh_lock);
550 rc = llog_declare_create(env, next, th);
551 llog_declare_write_rec(env, cathandle,
552 &lirec->lid_hdr, -1, th);
555 /* XXX: we hope for declarations made for existing llog
556 * this might be not correct with some backends
557 * where declarations are expected against specific
558 * object like ZFS with full debugging enabled */
559 /*llog_declare_write_rec(env, next, rec, -1, th);*/
564 EXPORT_SYMBOL(llog_cat_declare_add_rec);
566 int llog_cat_add(const struct lu_env *env, struct llog_handle *cathandle,
567 struct llog_rec_hdr *rec, struct llog_cookie *reccookie)
569 struct llog_ctxt *ctxt;
570 struct dt_device *dt;
571 struct thandle *th = NULL;
574 ctxt = cathandle->lgh_ctxt;
576 LASSERT(ctxt->loc_exp);
578 LASSERT(cathandle->lgh_obj != NULL);
579 dt = lu2dt_dev(cathandle->lgh_obj->do_lu.lo_dev);
581 th = dt_trans_create(env, dt);
585 rc = llog_cat_declare_add_rec(env, cathandle, rec, th);
589 rc = dt_trans_start_local(env, dt, th);
592 rc = llog_cat_add_rec(env, cathandle, rec, reccookie, th);
594 dt_trans_stop(env, dt, th);
597 EXPORT_SYMBOL(llog_cat_add);
599 /* For each cookie in the cookie array, we clear the log in-use bit and either:
600 * - the log is empty, so mark it free in the catalog header and delete it
601 * - the log is not empty, just write out the log header
603 * The cookies may be in different log files, so we need to get new logs
606 * Assumes caller has already pushed us into the kernel context.
608 int llog_cat_cancel_records(const struct lu_env *env,
609 struct llog_handle *cathandle, int count,
610 struct llog_cookie *cookies)
612 int i, index, rc = 0, failed = 0;
616 for (i = 0; i < count; i++, cookies++) {
617 struct llog_handle *loghandle;
618 struct llog_logid *lgl = &cookies->lgc_lgl;
621 rc = llog_cat_id2handle(env, cathandle, &loghandle, lgl);
623 CERROR("%s: cannot find handle for llog "DOSTID": %d\n",
624 cathandle->lgh_ctxt->loc_obd->obd_name,
625 POSTID(&lgl->lgl_oi), rc);
630 lrc = llog_cancel_rec(env, loghandle, cookies->lgc_index);
631 if (lrc == LLOG_DEL_PLAIN) { /* log has been destroyed */
632 index = loghandle->u.phd.phd_cookie.lgc_index;
633 rc = llog_cat_cleanup(env, cathandle, loghandle,
635 } else if (lrc == -ENOENT) {
636 if (rc == 0) /* ENOENT shouldn't rewrite any error */
638 } else if (lrc < 0) {
642 llog_handle_put(loghandle);
645 CERROR("%s: fail to cancel %d of %d llog-records: rc = %d\n",
646 cathandle->lgh_ctxt->loc_obd->obd_name, failed, count,
651 EXPORT_SYMBOL(llog_cat_cancel_records);
653 static int llog_cat_process_cb(const struct lu_env *env,
654 struct llog_handle *cat_llh,
655 struct llog_rec_hdr *rec, void *data)
657 struct llog_process_data *d = data;
658 struct llog_logid_rec *lir = (struct llog_logid_rec *)rec;
659 struct llog_handle *llh;
660 struct llog_log_hdr *hdr;
664 if (rec->lrh_type != LLOG_LOGID_MAGIC) {
665 CERROR("invalid record in catalog\n");
668 CDEBUG(D_HA, "processing log "DOSTID":%x at index %u of catalog "
669 DOSTID"\n", POSTID(&lir->lid_id.lgl_oi), lir->lid_id.lgl_ogen,
670 rec->lrh_index, POSTID(&cat_llh->lgh_id.lgl_oi));
672 rc = llog_cat_id2handle(env, cat_llh, &llh, &lir->lid_id);
674 CERROR("%s: cannot find handle for llog "DOSTID": %d\n",
675 cat_llh->lgh_ctxt->loc_obd->obd_name,
676 POSTID(&lir->lid_id.lgl_oi), rc);
677 if (rc == -ENOENT || rc == -ESTALE) {
678 /* After a server crash, a stub of index
679 * record in catlog could be kept, because
680 * plain log destroy + catlog index record
681 * deletion are not atomic. So we end up with
682 * an index but no actual record. Destroy the
683 * index and move on. */
684 rc = llog_cat_cleanup(env, cat_llh, NULL,
691 /* clean old empty llogs, do not consider current llog in use */
692 /* ignore remote (lgh_obj=NULL) llogs */
694 if ((hdr->llh_flags & LLOG_F_ZAP_WHEN_EMPTY) &&
695 hdr->llh_count == 1 && cat_llh->lgh_obj != NULL &&
696 llh != cat_llh->u.chd.chd_current_log) {
697 rc = llog_destroy(env, llh);
699 CERROR("%s: fail to destroy empty log: rc = %d\n",
700 llh->lgh_ctxt->loc_obd->obd_name, rc);
701 GOTO(out, rc = LLOG_DEL_PLAIN);
704 if (rec->lrh_index < d->lpd_startcat) {
705 /* Skip processing of the logs until startcat */
707 } else if (d->lpd_startidx > 0) {
708 struct llog_process_cat_data cd;
710 cd.lpcd_first_idx = d->lpd_startidx;
711 cd.lpcd_last_idx = 0;
712 rc = llog_process_or_fork(env, llh, d->lpd_cb, d->lpd_data,
714 /* Continue processing the next log from idx 0 */
717 rc = llog_process_or_fork(env, llh, d->lpd_cb, d->lpd_data,
722 /* The empty plain log was destroyed while processing */
723 if (rc == LLOG_DEL_PLAIN)
724 rc = llog_cat_cleanup(env, cat_llh, llh,
725 llh->u.phd.phd_cookie.lgc_index);
726 llog_handle_put(llh);
731 int llog_cat_process_or_fork(const struct lu_env *env,
732 struct llog_handle *cat_llh,
733 llog_cb_t cb, void *data, int startcat,
734 int startidx, bool fork)
736 struct llog_process_data d;
737 struct llog_log_hdr *llh = cat_llh->lgh_hdr;
741 LASSERT(llh->llh_flags & LLOG_F_IS_CAT);
744 d.lpd_startcat = startcat;
745 d.lpd_startidx = startidx;
747 if (llh->llh_cat_idx >= cat_llh->lgh_last_idx &&
748 llh->llh_count > 1) {
749 struct llog_process_cat_data cd;
751 CWARN("catlog "DOSTID" crosses index zero\n",
752 POSTID(&cat_llh->lgh_id.lgl_oi));
754 cd.lpcd_first_idx = llh->llh_cat_idx;
755 cd.lpcd_last_idx = 0;
756 rc = llog_process_or_fork(env, cat_llh, llog_cat_process_cb,
761 cd.lpcd_first_idx = 0;
762 cd.lpcd_last_idx = cat_llh->lgh_last_idx;
763 rc = llog_process_or_fork(env, cat_llh, llog_cat_process_cb,
766 rc = llog_process_or_fork(env, cat_llh, llog_cat_process_cb,
773 int llog_cat_process(const struct lu_env *env, struct llog_handle *cat_llh,
774 llog_cb_t cb, void *data, int startcat, int startidx)
776 return llog_cat_process_or_fork(env, cat_llh, cb, data, startcat,
779 EXPORT_SYMBOL(llog_cat_process);
781 static int llog_cat_reverse_process_cb(const struct lu_env *env,
782 struct llog_handle *cat_llh,
783 struct llog_rec_hdr *rec, void *data)
785 struct llog_process_data *d = data;
786 struct llog_logid_rec *lir = (struct llog_logid_rec *)rec;
787 struct llog_handle *llh;
788 struct llog_log_hdr *hdr;
791 if (le32_to_cpu(rec->lrh_type) != LLOG_LOGID_MAGIC) {
792 CERROR("invalid record in catalog\n");
795 CDEBUG(D_HA, "processing log "DOSTID":%x at index %u of catalog "
796 DOSTID"\n", POSTID(&lir->lid_id.lgl_oi), lir->lid_id.lgl_ogen,
797 le32_to_cpu(rec->lrh_index), POSTID(&cat_llh->lgh_id.lgl_oi));
799 rc = llog_cat_id2handle(env, cat_llh, &llh, &lir->lid_id);
801 CERROR("%s: cannot find handle for llog "DOSTID": %d\n",
802 cat_llh->lgh_ctxt->loc_obd->obd_name,
803 POSTID(&lir->lid_id.lgl_oi), rc);
804 if (rc == -ENOENT || rc == -ESTALE) {
805 /* After a server crash, a stub of index
806 * record in catlog could be kept, because
807 * plain log destroy + catlog index record
808 * deletion are not atomic. So we end up with
809 * an index but no actual record. Destroy the
810 * index and move on. */
811 rc = llog_cat_cleanup(env, cat_llh, NULL,
818 /* clean old empty llogs, do not consider current llog in use */
820 if ((hdr->llh_flags & LLOG_F_ZAP_WHEN_EMPTY) &&
821 hdr->llh_count == 1 &&
822 llh != cat_llh->u.chd.chd_current_log) {
823 rc = llog_destroy(env, llh);
825 CERROR("%s: fail to destroy empty log: rc = %d\n",
826 llh->lgh_ctxt->loc_obd->obd_name, rc);
827 GOTO(out, rc = LLOG_DEL_PLAIN);
830 rc = llog_reverse_process(env, llh, d->lpd_cb, d->lpd_data, NULL);
833 /* The empty plain was destroyed while processing */
834 if (rc == LLOG_DEL_PLAIN)
835 rc = llog_cat_cleanup(env, cat_llh, llh,
836 llh->u.phd.phd_cookie.lgc_index);
838 llog_handle_put(llh);
842 int llog_cat_reverse_process(const struct lu_env *env,
843 struct llog_handle *cat_llh,
844 llog_cb_t cb, void *data)
846 struct llog_process_data d;
847 struct llog_process_cat_data cd;
848 struct llog_log_hdr *llh = cat_llh->lgh_hdr;
852 LASSERT(llh->llh_flags & LLOG_F_IS_CAT);
856 if (llh->llh_cat_idx >= cat_llh->lgh_last_idx &&
857 llh->llh_count > 1) {
858 CWARN("catalog "DOSTID" crosses index zero\n",
859 POSTID(&cat_llh->lgh_id.lgl_oi));
861 cd.lpcd_first_idx = 0;
862 cd.lpcd_last_idx = cat_llh->lgh_last_idx;
863 rc = llog_reverse_process(env, cat_llh,
864 llog_cat_reverse_process_cb,
869 cd.lpcd_first_idx = le32_to_cpu(llh->llh_cat_idx);
870 cd.lpcd_last_idx = 0;
871 rc = llog_reverse_process(env, cat_llh,
872 llog_cat_reverse_process_cb,
875 rc = llog_reverse_process(env, cat_llh,
876 llog_cat_reverse_process_cb,
882 EXPORT_SYMBOL(llog_cat_reverse_process);
884 static int llog_cat_set_first_idx(struct llog_handle *cathandle, int idx)
886 struct llog_log_hdr *llh = cathandle->lgh_hdr;
891 bitmap_size = LLOG_HDR_BITMAP_SIZE(llh);
893 * The llh_cat_idx equals to the first used index minus 1
894 * so if we canceled the first index then llh_cat_idx
897 if (llh->llh_cat_idx == (idx - 1)) {
898 llh->llh_cat_idx = idx;
900 while (idx != cathandle->lgh_last_idx) {
901 idx = (idx + 1) % bitmap_size;
902 if (!ext2_test_bit(idx, LLOG_HDR_BITMAP(llh))) {
903 /* update llh_cat_idx for each unset bit,
904 * expecting the next one is set */
905 llh->llh_cat_idx = idx;
906 } else if (idx == 0) {
907 /* skip header bit */
908 llh->llh_cat_idx = 0;
911 /* the first index is found */
916 CDEBUG(D_RPCTRACE, "Set catlog "DOSTID" first idx %u,"
917 " (last_idx %u)\n", POSTID(&cathandle->lgh_id.lgl_oi),
918 llh->llh_cat_idx, cathandle->lgh_last_idx);
924 /* Cleanup deleted plain llog traces from catalog */
925 int llog_cat_cleanup(const struct lu_env *env, struct llog_handle *cathandle,
926 struct llog_handle *loghandle, int index)
931 if (loghandle != NULL) {
932 /* remove destroyed llog from catalog list and
933 * chd_current_log variable */
934 down_write(&cathandle->lgh_lock);
935 if (cathandle->u.chd.chd_current_log == loghandle)
936 cathandle->u.chd.chd_current_log = NULL;
937 list_del_init(&loghandle->u.phd.phd_entry);
938 up_write(&cathandle->lgh_lock);
939 LASSERT(index == loghandle->u.phd.phd_cookie.lgc_index);
940 /* llog was opened and keep in a list, close it now */
941 llog_close(env, loghandle);
944 /* do not attempt to cleanup on-disk llog if on client side */
945 if (cathandle->lgh_obj == NULL)
948 /* remove plain llog entry from catalog by index */
949 llog_cat_set_first_idx(cathandle, index);
950 rc = llog_cancel_rec(env, cathandle, index);
952 CDEBUG(D_HA, "cancel plain log at index"
953 " %u of catalog "DOSTID"\n",
954 index, POSTID(&cathandle->lgh_id.lgl_oi));
958 /* helper to initialize catalog llog and process it to cancel */
959 int llog_cat_init_and_process(const struct lu_env *env,
960 struct llog_handle *llh)
964 rc = llog_init_handle(env, llh, LLOG_F_IS_CAT, NULL);
970 EXPORT_SYMBOL(llog_cat_init_and_process);