4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2012, 2014, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/obdclass/llog_cat.c
38 * OST<->MDS recovery logging infrastructure.
40 * Invariants in implementation:
41 * - we do not share logs among different OST<->MDS connections, so that
42 * if an OST or MDS fails it need only look at log(s) relevant to itself
44 * Author: Andreas Dilger <adilger@clusterfs.com>
45 * Author: Alexey Zhuravlev <alexey.zhuravlev@intel.com>
46 * Author: Mikhail Pershin <mike.pershin@intel.com>
49 #define DEBUG_SUBSYSTEM S_LOG
52 #include <obd_class.h>
54 #include "llog_internal.h"
56 /* Create a new log handle and add it to the open list.
57 * This log handle will be closed when all of the records in it are removed.
59 * Assumes caller has already pushed us into the kernel context and is locking.
61 static int llog_cat_new_log(const struct lu_env *env,
62 struct llog_handle *cathandle,
63 struct llog_handle *loghandle,
66 struct llog_thread_info *lgi = llog_info(env);
67 struct llog_logid_rec *rec = &lgi->lgi_logid;
69 struct thandle *handle = NULL;
70 struct dt_device *dt = NULL;
74 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_LLOG_CREATE_FAILED))
77 if (loghandle->lgh_hdr != NULL) {
78 /* If llog object is remote and creation is failed, lgh_hdr
79 * might be left over here, free it first */
80 LASSERT(!llog_exist(loghandle));
81 OBD_FREE_PTR(loghandle->lgh_hdr);
82 loghandle->lgh_hdr = NULL;
86 dt = lu2dt_dev(cathandle->lgh_obj->do_lu.lo_dev);
88 handle = dt_trans_create(env, dt);
90 RETURN(PTR_ERR(handle));
92 /* Create update llog object synchronously, which
93 * happens during inialization process see
94 * lod_sub_prep_llog(), to make sure the update
95 * llog object is created before corss-MDT writing
96 * updates into the llog object */
97 if (cathandle->lgh_ctxt->loc_flags & LLOG_CTXT_FLAG_NORMAL_FID)
100 handle->th_wait_submit = 1;
102 rc = llog_declare_create(env, loghandle, handle);
106 rec->lid_hdr.lrh_len = sizeof(*rec);
107 rec->lid_hdr.lrh_type = LLOG_LOGID_MAGIC;
108 rec->lid_id = loghandle->lgh_id;
109 rc = llog_declare_write_rec(env, cathandle, &rec->lid_hdr, -1,
114 rc = dt_trans_start_local(env, dt, handle);
121 rc = llog_create(env, loghandle, th);
122 /* if llog is already created, no need to initialize it */
125 } else if (rc != 0) {
126 CERROR("%s: can't create new plain llog in catalog: rc = %d\n",
127 loghandle->lgh_ctxt->loc_obd->obd_name, rc);
131 rc = llog_init_handle(env, loghandle,
132 LLOG_F_IS_PLAIN | LLOG_F_ZAP_WHEN_EMPTY,
133 &cathandle->lgh_hdr->llh_tgtuuid);
137 /* build the record for this log in the catalog */
138 rec->lid_hdr.lrh_len = sizeof(*rec);
139 rec->lid_hdr.lrh_type = LLOG_LOGID_MAGIC;
140 rec->lid_id = loghandle->lgh_id;
142 /* append the new record into catalog. The new index will be
143 * assigned to the record and updated in rec header */
144 rc = llog_write_rec(env, cathandle, &rec->lid_hdr,
145 &loghandle->u.phd.phd_cookie, LLOG_NEXT_IDX, th);
149 CDEBUG(D_OTHER, "new recovery log "DOSTID":%x for index %u of catalog"
150 DOSTID"\n", POSTID(&loghandle->lgh_id.lgl_oi),
151 loghandle->lgh_id.lgl_ogen, rec->lid_hdr.lrh_index,
152 POSTID(&cathandle->lgh_id.lgl_oi));
154 loghandle->lgh_hdr->llh_cat_idx = rec->lid_hdr.lrh_index;
157 dt_trans_stop(env, dt, handle);
162 /* Open an existent log handle and add it to the open list.
163 * This log handle will be closed when all of the records in it are removed.
165 * Assumes caller has already pushed us into the kernel context and is locking.
166 * We return a lock on the handle to ensure nobody yanks it from us.
168 * This takes extra reference on llog_handle via llog_handle_get() and require
169 * this reference to be put by caller using llog_handle_put()
171 int llog_cat_id2handle(const struct lu_env *env, struct llog_handle *cathandle,
172 struct llog_handle **res, struct llog_logid *logid)
174 struct llog_handle *loghandle;
180 if (cathandle == NULL)
183 fmt = cathandle->lgh_hdr->llh_flags & LLOG_F_EXT_MASK;
184 down_write(&cathandle->lgh_lock);
185 list_for_each_entry(loghandle, &cathandle->u.chd.chd_head,
187 struct llog_logid *cgl = &loghandle->lgh_id;
189 if (ostid_id(&cgl->lgl_oi) == ostid_id(&logid->lgl_oi) &&
190 ostid_seq(&cgl->lgl_oi) == ostid_seq(&logid->lgl_oi)) {
191 if (cgl->lgl_ogen != logid->lgl_ogen) {
192 CERROR("%s: log "DOSTID" generation %x != %x\n",
193 loghandle->lgh_ctxt->loc_obd->obd_name,
194 POSTID(&logid->lgl_oi), cgl->lgl_ogen,
198 loghandle->u.phd.phd_cat_handle = cathandle;
199 up_write(&cathandle->lgh_lock);
203 up_write(&cathandle->lgh_lock);
205 rc = llog_open(env, cathandle->lgh_ctxt, &loghandle, logid, NULL,
208 CERROR("%s: error opening log id "DOSTID":%x: rc = %d\n",
209 cathandle->lgh_ctxt->loc_obd->obd_name,
210 POSTID(&logid->lgl_oi), logid->lgl_ogen, rc);
214 rc = llog_init_handle(env, loghandle, LLOG_F_IS_PLAIN | fmt, NULL);
216 llog_close(env, loghandle);
221 down_write(&cathandle->lgh_lock);
222 list_add_tail(&loghandle->u.phd.phd_entry, &cathandle->u.chd.chd_head);
223 up_write(&cathandle->lgh_lock);
225 loghandle->u.phd.phd_cat_handle = cathandle;
226 loghandle->u.phd.phd_cookie.lgc_lgl = cathandle->lgh_id;
227 loghandle->u.phd.phd_cookie.lgc_index =
228 loghandle->lgh_hdr->llh_cat_idx;
231 llog_handle_get(loghandle);
236 int llog_cat_close(const struct lu_env *env, struct llog_handle *cathandle)
238 struct llog_handle *loghandle, *n;
243 list_for_each_entry_safe(loghandle, n, &cathandle->u.chd.chd_head,
245 struct llog_log_hdr *llh = loghandle->lgh_hdr;
248 /* unlink open-not-created llogs */
249 list_del_init(&loghandle->u.phd.phd_entry);
250 llh = loghandle->lgh_hdr;
251 if (loghandle->lgh_obj != NULL && llh != NULL &&
252 (llh->llh_flags & LLOG_F_ZAP_WHEN_EMPTY) &&
253 (llh->llh_count == 1)) {
254 rc = llog_destroy(env, loghandle);
256 CERROR("%s: failure destroying log during "
257 "cleanup: rc = %d\n",
258 loghandle->lgh_ctxt->loc_obd->obd_name,
261 index = loghandle->u.phd.phd_cookie.lgc_index;
262 llog_cat_cleanup(env, cathandle, NULL, index);
264 llog_close(env, loghandle);
266 /* if handle was stored in ctxt, remove it too */
267 if (cathandle->lgh_ctxt->loc_handle == cathandle)
268 cathandle->lgh_ctxt->loc_handle = NULL;
269 rc = llog_close(env, cathandle);
272 EXPORT_SYMBOL(llog_cat_close);
275 * lockdep markers for nested struct llog_handle::lgh_lock locking.
282 /** Return the currently active log handle. If the current log handle doesn't
283 * have enough space left for the current record, start a new one.
285 * If reclen is 0, we only want to know what the currently active log is,
286 * otherwise we get a lock on this log so nobody can steal our space.
288 * Assumes caller has already pushed us into the kernel context and is locking.
290 * NOTE: loghandle is write-locked upon successful return
292 static struct llog_handle *llog_cat_current_log(struct llog_handle *cathandle,
295 struct llog_handle *loghandle = NULL;
298 down_read_nested(&cathandle->lgh_lock, LLOGH_CAT);
299 loghandle = cathandle->u.chd.chd_current_log;
301 struct llog_log_hdr *llh;
303 down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
304 llh = loghandle->lgh_hdr;
306 loghandle->lgh_last_idx < LLOG_HDR_BITMAP_SIZE(llh) - 1) {
307 up_read(&cathandle->lgh_lock);
310 up_write(&loghandle->lgh_lock);
313 up_read(&cathandle->lgh_lock);
315 /* time to use next log */
317 /* first, we have to make sure the state hasn't changed */
318 down_write_nested(&cathandle->lgh_lock, LLOGH_CAT);
319 loghandle = cathandle->u.chd.chd_current_log;
321 struct llog_log_hdr *llh;
323 down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
324 llh = loghandle->lgh_hdr;
326 if (loghandle->lgh_last_idx < LLOG_HDR_BITMAP_SIZE(llh) - 1) {
327 up_write(&cathandle->lgh_lock);
330 up_write(&loghandle->lgh_lock);
334 CDEBUG(D_INODE, "use next log\n");
336 loghandle = cathandle->u.chd.chd_next_log;
337 cathandle->u.chd.chd_current_log = loghandle;
338 cathandle->u.chd.chd_next_log = NULL;
339 down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
340 up_write(&cathandle->lgh_lock);
345 /* Add a single record to the recovery log(s) using a catalog
346 * Returns as llog_write_record
348 * Assumes caller has already pushed us into the kernel context.
350 int llog_cat_add_rec(const struct lu_env *env, struct llog_handle *cathandle,
351 struct llog_rec_hdr *rec, struct llog_cookie *reccookie,
354 struct llog_handle *loghandle;
358 LASSERT(rec->lrh_len <= cathandle->lgh_ctxt->loc_chunk_size);
359 loghandle = llog_cat_current_log(cathandle, th);
360 LASSERT(!IS_ERR(loghandle));
362 /* loghandle is already locked by llog_cat_current_log() for us */
363 if (!llog_exist(loghandle)) {
364 rc = llog_cat_new_log(env, cathandle, loghandle, th);
366 up_write(&loghandle->lgh_lock);
370 /* now let's try to add the record */
371 rc = llog_write_rec(env, loghandle, rec, reccookie, LLOG_NEXT_IDX, th);
373 CDEBUG_LIMIT(rc == -ENOSPC ? D_HA : D_ERROR,
374 "llog_write_rec %d: lh=%p\n", rc, loghandle);
375 up_write(&loghandle->lgh_lock);
377 /* try to use next log */
378 loghandle = llog_cat_current_log(cathandle, th);
379 LASSERT(!IS_ERR(loghandle));
380 /* new llog can be created concurrently */
381 if (!llog_exist(loghandle)) {
382 rc = llog_cat_new_log(env, cathandle, loghandle, th);
384 up_write(&loghandle->lgh_lock);
388 /* now let's try to add the record */
389 rc = llog_write_rec(env, loghandle, rec, reccookie,
392 CERROR("llog_write_rec %d: lh=%p\n", rc, loghandle);
393 up_write(&loghandle->lgh_lock);
398 EXPORT_SYMBOL(llog_cat_add_rec);
400 int llog_cat_declare_add_rec(const struct lu_env *env,
401 struct llog_handle *cathandle,
402 struct llog_rec_hdr *rec, struct thandle *th)
404 struct llog_thread_info *lgi = llog_info(env);
405 struct llog_logid_rec *lirec = &lgi->lgi_logid;
406 struct llog_handle *loghandle, *next;
411 if (cathandle->u.chd.chd_current_log == NULL) {
412 /* declare new plain llog */
413 down_write(&cathandle->lgh_lock);
414 if (cathandle->u.chd.chd_current_log == NULL) {
415 rc = llog_open(env, cathandle->lgh_ctxt, &loghandle,
416 NULL, NULL, LLOG_OPEN_NEW);
418 cathandle->u.chd.chd_current_log = loghandle;
419 list_add_tail(&loghandle->u.phd.phd_entry,
420 &cathandle->u.chd.chd_head);
423 up_write(&cathandle->lgh_lock);
424 } else if (cathandle->u.chd.chd_next_log == NULL) {
425 /* declare next plain llog */
426 down_write(&cathandle->lgh_lock);
427 if (cathandle->u.chd.chd_next_log == NULL) {
428 rc = llog_open(env, cathandle->lgh_ctxt, &loghandle,
429 NULL, NULL, LLOG_OPEN_NEW);
431 cathandle->u.chd.chd_next_log = loghandle;
432 list_add_tail(&loghandle->u.phd.phd_entry,
433 &cathandle->u.chd.chd_head);
436 up_write(&cathandle->lgh_lock);
441 lirec->lid_hdr.lrh_len = sizeof(*lirec);
443 if (!llog_exist(cathandle->u.chd.chd_current_log)) {
444 if (dt_object_remote(cathandle->lgh_obj)) {
445 /* If it is remote cat-llog here, let's create the
446 * remote llog object synchronously, so other threads
447 * can use it correctly. */
448 rc = llog_cat_new_log(env, cathandle,
449 cathandle->u.chd.chd_current_log, NULL);
451 rc = llog_declare_create(env,
452 cathandle->u.chd.chd_current_log, th);
455 llog_declare_write_rec(env, cathandle,
456 &lirec->lid_hdr, -1, th);
459 /* declare records in the llogs */
460 rc = llog_declare_write_rec(env, cathandle->u.chd.chd_current_log,
465 next = cathandle->u.chd.chd_next_log;
467 if (!llog_exist(next)) {
468 if (dt_object_remote(cathandle->lgh_obj)) {
469 /* If it is remote cat-llog here, let's create
470 * the remote remote llog object synchronously,
471 * so other threads can use it correctly. */
472 rc = llog_cat_new_log(env, cathandle, next,
475 rc = llog_declare_create(env, next, th);
476 llog_declare_write_rec(env, cathandle,
477 &lirec->lid_hdr, -1, th);
480 /* XXX: we hope for declarations made for existing llog
481 * this might be not correct with some backends
482 * where declarations are expected against specific
483 * object like ZFS with full debugging enabled */
484 /*llog_declare_write_rec(env, next, rec, -1, th);*/
489 EXPORT_SYMBOL(llog_cat_declare_add_rec);
491 int llog_cat_add(const struct lu_env *env, struct llog_handle *cathandle,
492 struct llog_rec_hdr *rec, struct llog_cookie *reccookie)
494 struct llog_ctxt *ctxt;
495 struct dt_device *dt;
496 struct thandle *th = NULL;
499 ctxt = cathandle->lgh_ctxt;
501 LASSERT(ctxt->loc_exp);
503 LASSERT(cathandle->lgh_obj != NULL);
504 dt = lu2dt_dev(cathandle->lgh_obj->do_lu.lo_dev);
506 th = dt_trans_create(env, dt);
510 rc = llog_cat_declare_add_rec(env, cathandle, rec, th);
514 rc = dt_trans_start_local(env, dt, th);
517 rc = llog_cat_add_rec(env, cathandle, rec, reccookie, th);
519 dt_trans_stop(env, dt, th);
522 EXPORT_SYMBOL(llog_cat_add);
524 /* For each cookie in the cookie array, we clear the log in-use bit and either:
525 * - the log is empty, so mark it free in the catalog header and delete it
526 * - the log is not empty, just write out the log header
528 * The cookies may be in different log files, so we need to get new logs
531 * Assumes caller has already pushed us into the kernel context.
533 int llog_cat_cancel_records(const struct lu_env *env,
534 struct llog_handle *cathandle, int count,
535 struct llog_cookie *cookies)
537 int i, index, rc = 0, failed = 0;
541 for (i = 0; i < count; i++, cookies++) {
542 struct llog_handle *loghandle;
543 struct llog_logid *lgl = &cookies->lgc_lgl;
546 rc = llog_cat_id2handle(env, cathandle, &loghandle, lgl);
548 CERROR("%s: cannot find handle for llog "DOSTID": %d\n",
549 cathandle->lgh_ctxt->loc_obd->obd_name,
550 POSTID(&lgl->lgl_oi), rc);
555 lrc = llog_cancel_rec(env, loghandle, cookies->lgc_index);
556 if (lrc == LLOG_DEL_PLAIN) { /* log has been destroyed */
557 index = loghandle->u.phd.phd_cookie.lgc_index;
558 rc = llog_cat_cleanup(env, cathandle, loghandle,
560 } else if (lrc == -ENOENT) {
561 if (rc == 0) /* ENOENT shouldn't rewrite any error */
563 } else if (lrc < 0) {
567 llog_handle_put(loghandle);
570 CERROR("%s: fail to cancel %d of %d llog-records: rc = %d\n",
571 cathandle->lgh_ctxt->loc_obd->obd_name, failed, count,
576 EXPORT_SYMBOL(llog_cat_cancel_records);
578 static int llog_cat_process_cb(const struct lu_env *env,
579 struct llog_handle *cat_llh,
580 struct llog_rec_hdr *rec, void *data)
582 struct llog_process_data *d = data;
583 struct llog_logid_rec *lir = (struct llog_logid_rec *)rec;
584 struct llog_handle *llh;
585 struct llog_log_hdr *hdr;
589 if (rec->lrh_type != LLOG_LOGID_MAGIC) {
590 CERROR("invalid record in catalog\n");
593 CDEBUG(D_HA, "processing log "DOSTID":%x at index %u of catalog "
594 DOSTID"\n", POSTID(&lir->lid_id.lgl_oi), lir->lid_id.lgl_ogen,
595 rec->lrh_index, POSTID(&cat_llh->lgh_id.lgl_oi));
597 rc = llog_cat_id2handle(env, cat_llh, &llh, &lir->lid_id);
599 CERROR("%s: cannot find handle for llog "DOSTID": %d\n",
600 cat_llh->lgh_ctxt->loc_obd->obd_name,
601 POSTID(&lir->lid_id.lgl_oi), rc);
602 if (rc == -ENOENT || rc == -ESTALE) {
603 /* After a server crash, a stub of index
604 * record in catlog could be kept, because
605 * plain log destroy + catlog index record
606 * deletion are not atomic. So we end up with
607 * an index but no actual record. Destroy the
608 * index and move on. */
609 rc = llog_cat_cleanup(env, cat_llh, NULL,
616 /* clean old empty llogs, do not consider current llog in use */
617 /* ignore remote (lgh_obj=NULL) llogs */
619 if ((hdr->llh_flags & LLOG_F_ZAP_WHEN_EMPTY) &&
620 hdr->llh_count == 1 && cat_llh->lgh_obj != NULL &&
621 llh != cat_llh->u.chd.chd_current_log) {
622 rc = llog_destroy(env, llh);
624 CERROR("%s: fail to destroy empty log: rc = %d\n",
625 llh->lgh_ctxt->loc_obd->obd_name, rc);
626 GOTO(out, rc = LLOG_DEL_PLAIN);
629 if (rec->lrh_index < d->lpd_startcat) {
630 /* Skip processing of the logs until startcat */
632 } else if (d->lpd_startidx > 0) {
633 struct llog_process_cat_data cd;
635 cd.lpcd_first_idx = d->lpd_startidx;
636 cd.lpcd_last_idx = 0;
637 rc = llog_process_or_fork(env, llh, d->lpd_cb, d->lpd_data,
639 /* Continue processing the next log from idx 0 */
642 rc = llog_process_or_fork(env, llh, d->lpd_cb, d->lpd_data,
647 /* The empty plain log was destroyed while processing */
648 if (rc == LLOG_DEL_PLAIN)
649 rc = llog_cat_cleanup(env, cat_llh, llh,
650 llh->u.phd.phd_cookie.lgc_index);
651 llog_handle_put(llh);
656 int llog_cat_process_or_fork(const struct lu_env *env,
657 struct llog_handle *cat_llh,
658 llog_cb_t cb, void *data, int startcat,
659 int startidx, bool fork)
661 struct llog_process_data d;
662 struct llog_log_hdr *llh = cat_llh->lgh_hdr;
666 LASSERT(llh->llh_flags & LLOG_F_IS_CAT);
669 d.lpd_startcat = startcat;
670 d.lpd_startidx = startidx;
672 if (llh->llh_cat_idx > cat_llh->lgh_last_idx) {
673 struct llog_process_cat_data cd;
675 CWARN("catlog "DOSTID" crosses index zero\n",
676 POSTID(&cat_llh->lgh_id.lgl_oi));
678 cd.lpcd_first_idx = llh->llh_cat_idx;
679 cd.lpcd_last_idx = 0;
680 rc = llog_process_or_fork(env, cat_llh, llog_cat_process_cb,
685 cd.lpcd_first_idx = 0;
686 cd.lpcd_last_idx = cat_llh->lgh_last_idx;
687 rc = llog_process_or_fork(env, cat_llh, llog_cat_process_cb,
690 rc = llog_process_or_fork(env, cat_llh, llog_cat_process_cb,
697 int llog_cat_process(const struct lu_env *env, struct llog_handle *cat_llh,
698 llog_cb_t cb, void *data, int startcat, int startidx)
700 return llog_cat_process_or_fork(env, cat_llh, cb, data, startcat,
703 EXPORT_SYMBOL(llog_cat_process);
705 static int llog_cat_reverse_process_cb(const struct lu_env *env,
706 struct llog_handle *cat_llh,
707 struct llog_rec_hdr *rec, void *data)
709 struct llog_process_data *d = data;
710 struct llog_logid_rec *lir = (struct llog_logid_rec *)rec;
711 struct llog_handle *llh;
712 struct llog_log_hdr *hdr;
715 if (le32_to_cpu(rec->lrh_type) != LLOG_LOGID_MAGIC) {
716 CERROR("invalid record in catalog\n");
719 CDEBUG(D_HA, "processing log "DOSTID":%x at index %u of catalog "
720 DOSTID"\n", POSTID(&lir->lid_id.lgl_oi), lir->lid_id.lgl_ogen,
721 le32_to_cpu(rec->lrh_index), POSTID(&cat_llh->lgh_id.lgl_oi));
723 rc = llog_cat_id2handle(env, cat_llh, &llh, &lir->lid_id);
725 CERROR("%s: cannot find handle for llog "DOSTID": %d\n",
726 cat_llh->lgh_ctxt->loc_obd->obd_name,
727 POSTID(&lir->lid_id.lgl_oi), rc);
728 if (rc == -ENOENT || rc == -ESTALE) {
729 /* After a server crash, a stub of index
730 * record in catlog could be kept, because
731 * plain log destroy + catlog index record
732 * deletion are not atomic. So we end up with
733 * an index but no actual record. Destroy the
734 * index and move on. */
735 rc = llog_cat_cleanup(env, cat_llh, NULL,
742 /* clean old empty llogs, do not consider current llog in use */
744 if ((hdr->llh_flags & LLOG_F_ZAP_WHEN_EMPTY) &&
745 hdr->llh_count == 1 &&
746 llh != cat_llh->u.chd.chd_current_log) {
747 rc = llog_destroy(env, llh);
749 CERROR("%s: fail to destroy empty log: rc = %d\n",
750 llh->lgh_ctxt->loc_obd->obd_name, rc);
751 GOTO(out, rc = LLOG_DEL_PLAIN);
754 rc = llog_reverse_process(env, llh, d->lpd_cb, d->lpd_data, NULL);
757 /* The empty plain was destroyed while processing */
758 if (rc == LLOG_DEL_PLAIN)
759 rc = llog_cat_cleanup(env, cat_llh, llh,
760 llh->u.phd.phd_cookie.lgc_index);
762 llog_handle_put(llh);
766 int llog_cat_reverse_process(const struct lu_env *env,
767 struct llog_handle *cat_llh,
768 llog_cb_t cb, void *data)
770 struct llog_process_data d;
771 struct llog_process_cat_data cd;
772 struct llog_log_hdr *llh = cat_llh->lgh_hdr;
776 LASSERT(llh->llh_flags & LLOG_F_IS_CAT);
780 if (llh->llh_cat_idx > cat_llh->lgh_last_idx) {
781 CWARN("catalog "DOSTID" crosses index zero\n",
782 POSTID(&cat_llh->lgh_id.lgl_oi));
784 cd.lpcd_first_idx = 0;
785 cd.lpcd_last_idx = cat_llh->lgh_last_idx;
786 rc = llog_reverse_process(env, cat_llh,
787 llog_cat_reverse_process_cb,
792 cd.lpcd_first_idx = le32_to_cpu(llh->llh_cat_idx);
793 cd.lpcd_last_idx = 0;
794 rc = llog_reverse_process(env, cat_llh,
795 llog_cat_reverse_process_cb,
798 rc = llog_reverse_process(env, cat_llh,
799 llog_cat_reverse_process_cb,
805 EXPORT_SYMBOL(llog_cat_reverse_process);
807 static int llog_cat_set_first_idx(struct llog_handle *cathandle, int idx)
809 struct llog_log_hdr *llh = cathandle->lgh_hdr;
814 bitmap_size = LLOG_HDR_BITMAP_SIZE(llh);
816 * The llh_cat_idx equals to the first used index minus 1
817 * so if we canceled the first index then llh_cat_idx
820 if (llh->llh_cat_idx == (idx - 1)) {
821 llh->llh_cat_idx = idx;
823 while (idx != cathandle->lgh_last_idx) {
824 idx = (idx + 1) % bitmap_size;
825 if (!ext2_test_bit(idx, LLOG_HDR_BITMAP(llh))) {
826 /* update llh_cat_idx for each unset bit,
827 * expecting the next one is set */
828 llh->llh_cat_idx = idx;
829 } else if (idx == 0) {
830 /* skip header bit */
831 llh->llh_cat_idx = 0;
834 /* the first index is found */
839 CDEBUG(D_RPCTRACE, "Set catlog "DOSTID" first idx %u,"
840 " (last_idx %u)\n", POSTID(&cathandle->lgh_id.lgl_oi),
841 llh->llh_cat_idx, cathandle->lgh_last_idx);
847 /* Cleanup deleted plain llog traces from catalog */
848 int llog_cat_cleanup(const struct lu_env *env, struct llog_handle *cathandle,
849 struct llog_handle *loghandle, int index)
854 if (loghandle != NULL) {
855 /* remove destroyed llog from catalog list and
856 * chd_current_log variable */
857 down_write(&cathandle->lgh_lock);
858 if (cathandle->u.chd.chd_current_log == loghandle)
859 cathandle->u.chd.chd_current_log = NULL;
860 list_del_init(&loghandle->u.phd.phd_entry);
861 up_write(&cathandle->lgh_lock);
862 LASSERT(index == loghandle->u.phd.phd_cookie.lgc_index);
863 /* llog was opened and keep in a list, close it now */
864 llog_close(env, loghandle);
867 /* do not attempt to cleanup on-disk llog if on client side */
868 if (cathandle->lgh_obj == NULL)
871 /* remove plain llog entry from catalog by index */
872 llog_cat_set_first_idx(cathandle, index);
873 rc = llog_cancel_rec(env, cathandle, index);
875 CDEBUG(D_HA, "cancel plain log at index"
876 " %u of catalog "DOSTID"\n",
877 index, POSTID(&cathandle->lgh_id.lgl_oi));
881 /* helper to initialize catalog llog and process it to cancel */
882 int llog_cat_init_and_process(const struct lu_env *env,
883 struct llog_handle *llh)
887 rc = llog_init_handle(env, llh, LLOG_F_IS_CAT, NULL);
893 EXPORT_SYMBOL(llog_cat_init_and_process);