*/
/*
* This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
*/
/*
* lustre/obdclass/llog_osd.c
#define DEBUG_SUBSYSTEM S_LOG
+#include <linux/delay.h>
+
#include <dt_object.h>
#include <llog_swab.h>
#include <lustre_fid.h>
static int llog_osd_exist(struct llog_handle *handle)
{
LASSERT(handle->lgh_obj);
- return dt_object_exists(handle->lgh_obj) &&
- !lu_object_is_dying(handle->lgh_obj->do_lu.lo_header);
+ return dt_object_exists(handle->lgh_obj) && !handle->lgh_destroyed;
}
static void *rec_tail(struct llog_rec_hdr *rec)
lgi = llog_info(env);
+ dt_read_lock(env, o, 0);
+
rc = dt_attr_get(env, o, &lgi->lgi_attr);
if (rc)
- RETURN(rc);
+ GOTO(unlock, rc);
LASSERT(lgi->lgi_attr.la_valid & LA_SIZE);
if (lgi->lgi_attr.la_size == 0) {
CDEBUG(D_HA, "not reading header from 0-byte log\n");
- RETURN(LLOG_EEMPTY);
+ GOTO(unlock, rc = LLOG_EEMPTY);
}
flags = handle->lgh_hdr->llh_flags;
if (rc >= 0)
rc = -EFAULT;
- RETURN(rc);
+ GOTO(unlock, rc);
}
if (LLOG_REC_HDR_NEEDS_SWABBING(llh_hdr))
handle->lgh_name ? handle->lgh_name : "",
PFID(lu_object_fid(&o->do_lu)),
llh_hdr->lrh_type, LLOG_HDR_MAGIC);
- RETURN(-EIO);
+ GOTO(unlock, rc = -EIO);
} else if (llh_hdr->lrh_len < LLOG_MIN_CHUNK_SIZE ||
llh_hdr->lrh_len > handle->lgh_hdr_size) {
CERROR("%s: incorrectly sized log %s "DFID" header: "
handle->lgh_name ? handle->lgh_name : "",
PFID(lu_object_fid(&o->do_lu)),
llh_hdr->lrh_len, LLOG_MIN_CHUNK_SIZE);
- RETURN(-EIO);
+ GOTO(unlock, rc = -EIO);
} else if (LLOG_HDR_TAIL(handle->lgh_hdr)->lrt_index >
LLOG_HDR_BITMAP_SIZE(handle->lgh_hdr) ||
LLOG_HDR_TAIL(handle->lgh_hdr)->lrt_len !=
handle->lgh_name ? handle->lgh_name : "",
PFID(lu_object_fid(&o->do_lu)),
LLOG_HDR_TAIL(handle->lgh_hdr)->lrt_len, -EIO);
- RETURN(-EIO);
+ GOTO(unlock, rc = -EIO);
}
handle->lgh_hdr->llh_flags |= (flags & LLOG_F_EXT_MASK);
handle->lgh_last_idx = LLOG_HDR_TAIL(handle->lgh_hdr)->lrt_index;
+ rc = 0;
- RETURN(0);
+unlock:
+ dt_read_unlock(env, o);
+ RETURN(rc);
}
/**
struct llog_cookie *reccookie,
int idx, struct thandle *th)
{
- struct llog_thread_info *lgi = llog_info(env);
- struct llog_log_hdr *llh;
- int reclen = rec->lrh_len;
- int index, rc;
- struct llog_rec_tail *lrt;
- struct dt_object *o;
- __u32 chunk_size;
- size_t left;
- __u32 orig_last_idx;
+ struct llog_thread_info *lgi = llog_info(env);
+ struct llog_log_hdr *llh;
+ int reclen = rec->lrh_len;
+ int index, rc;
+ struct llog_rec_tail *lrt;
+ struct dt_object *o;
+ __u32 chunk_size;
+ size_t left;
+ __u32 orig_last_idx;
+ bool pad = false;
ENTRY;
llh = loghandle->lgh_hdr;
LASSERT(llh->llh_size == reclen);
}
+ /* return error if osp object is stale */
+ if (idx != LLOG_HEADER_IDX && dt_object_stale(o))
+ RETURN(-ESTALE);
rc = dt_attr_get(env, o, &lgi->lgi_attr);
if (rc)
RETURN(rc);
/* llog can be empty only when first record is being written */
LASSERT(ergo(idx > 0, lgi->lgi_attr.la_size > 0));
- if (!ext2_test_bit(idx, LLOG_HDR_BITMAP(llh))) {
+ if (!test_bit_le(idx, LLOG_HDR_BITMAP(llh))) {
CERROR("%s: modify unset record %u\n",
o->do_lu.lo_dev->ld_obd->obd_name, idx);
RETURN(-ENOENT);
RETURN(rc);
loghandle->lgh_last_idx++; /* for pad rec */
+ pad = true;
}
/* if it's the last idx in log file, then return -ENOSPC
* or wrap around if a catalog */
RETURN(-ENOSPC);
}
+ down_write(&loghandle->lgh_last_sem);
/* increment the last_idx along with llh_tail index, they should
* be equal for a llog lifetime */
loghandle->lgh_last_idx++;
/* the lgh_hdr_mutex protects llog header data from concurrent
* update/cancel, the llh_count and llh_bitmap are protected */
mutex_lock(&loghandle->lgh_hdr_mutex);
- if (ext2_set_bit(index, LLOG_HDR_BITMAP(llh))) {
- CERROR("%s: index %u already set in log bitmap\n",
- o->do_lu.lo_dev->ld_obd->obd_name, index);
+ if (__test_and_set_bit_le(index, LLOG_HDR_BITMAP(llh))) {
+ CERROR("%s: index %u already set in llog bitmap "DFID"\n",
+ o->do_lu.lo_dev->ld_obd->obd_name, index,
+ PFID(lu_object_fid(&o->do_lu)));
mutex_unlock(&loghandle->lgh_hdr_mutex);
LBUG(); /* should never happen */
}
llh->llh_size = reclen;
}
+ /*
+ * readers (e.g. llog_osd_read_header()) must not find
+ * llog updated partially (bitmap/counter claims record,
+ * but a record hasn't been added yet) as this results
+ * in EIO.
+ */
+ dt_write_lock(env, o, 0);
+
if (lgi->lgi_attr.la_size == 0) {
lgi->lgi_off = 0;
lgi->lgi_buf.lb_len = llh->llh_hdr.lrh_len;
if (rc != 0)
GOTO(out_unlock, rc);
}
+ if (OBD_FAIL_PRECHECK(OBD_FAIL_LLOG_PAUSE_AFTER_PAD) && pad) {
+ /* a window for concurrent llog reader, see LU-12577 */
+ OBD_FAIL_TIMEOUT(OBD_FAIL_LLOG_PAUSE_AFTER_PAD,
+ cfs_fail_val ?: 1);
+ }
out_unlock:
/* unlock here for remote object */
mutex_unlock(&loghandle->lgh_hdr_mutex);
- if (rc)
+ if (rc) {
+ dt_write_unlock(env, o);
GOTO(out, rc);
+ }
+ if (OBD_FAIL_PRECHECK(OBD_FAIL_LLOG_PROCESS_TIMEOUT) &&
+ cfs_fail_val == (unsigned int)(loghandle->lgh_id.lgl_oi.oi.oi_id &
+ 0xFFFFFFFF)) {
+ OBD_RACE(OBD_FAIL_LLOG_PROCESS_TIMEOUT);
+ msleep(1 * MSEC_PER_SEC);
+ }
/* computed index can be used to determine offset for fixed-size
* records. This also allows to handle Catalog wrap around case */
if (llh->llh_flags & LLOG_F_IS_FIXSIZE) {
lgi->lgi_off = llh->llh_hdr.lrh_len + (index - 1) * reclen;
} else {
rc = dt_attr_get(env, o, &lgi->lgi_attr);
- if (rc)
+ if (rc) {
+ dt_write_unlock(env, o);
GOTO(out, rc);
+ }
LASSERT(lgi->lgi_attr.la_valid & LA_SIZE);
lgi->lgi_off = max_t(__u64, lgi->lgi_attr.la_size,
lgi->lgi_buf.lb_len = reclen;
lgi->lgi_buf.lb_buf = rec;
rc = dt_record_write(env, o, &lgi->lgi_buf, &lgi->lgi_off, th);
+
+ dt_write_unlock(env, o);
if (rc < 0)
GOTO(out, rc);
+ up_write(&loghandle->lgh_last_sem);
+
CDEBUG(D_HA, "added record "DFID".%u, %u off%llu\n",
PFID(lu_object_fid(&o->do_lu)), index, rec->lrh_len,
lgi->lgi_off);
out:
/* cleanup llog for error case */
mutex_lock(&loghandle->lgh_hdr_mutex);
- ext2_clear_bit(index, LLOG_HDR_BITMAP(llh));
+ clear_bit_le(index, LLOG_HDR_BITMAP(llh));
llh->llh_count--;
mutex_unlock(&loghandle->lgh_hdr_mutex);
}
LLOG_HDR_TAIL(llh)->lrt_index = loghandle->lgh_last_idx;
+ up_write(&loghandle->lgh_last_sem);
RETURN(rc);
}
extra_flags;
}
+ if (unlikely(hdr->lrh_len == 0)) {
+ /* It is corruption case, we cannot know the next rec,
+ * jump to the last one directly to avoid dead loop. */
+ LCONSOLE(D_WARNING, "Hit invalid llog record: "
+ "idx %u, type %u, id %u\n",
+ hdr->lrh_index, hdr->lrh_type, hdr->lrh_id);
+ hdr = llog_rec_hdr_next(last_hdr);
+ if (unlikely(hdr == last_hdr))
+ LCONSOLE(D_WARNING, "The last record crashed: "
+ "idx %u, type %u, id %u\n",
+ hdr->lrh_index, hdr->lrh_type,
+ hdr->lrh_id);
+ break;
+ }
+
changelog_remap_rec(rec, rec->cr_flags & flags, xflag);
hdr = llog_rec_hdr_next(hdr);
+ /* Yield CPU to avoid soft-lockup if there are too many records
+ * to be handled. */
+ cond_resched();
} while ((char *)hdr <= (char *)last_hdr);
}
LASSERT(loghandle);
LASSERT(loghandle->lgh_ctxt);
+ if (OBD_FAIL_PRECHECK(OBD_FAIL_MDS_CHANGELOG_DEL) &&
+ cfs_fail_val == ((unsigned long)loghandle & 0xFFFFFFFF)) {
+ OBD_RACE(OBD_FAIL_MDS_CHANGELOG_DEL);
+ msleep(MSEC_PER_SEC >> 2);
+ }
+
o = loghandle->lgh_obj;
LASSERT(o);
- LASSERT(dt_object_exists(o));
+ dt_read_lock(env, o, 0);
+ if (!llog_osd_exist(loghandle))
+ GOTO(out, rc = -ESTALE); //object was destroyed
+
dt = lu2dt_dev(o->do_lu.lo_dev);
LASSERT(dt);
if (rc)
GOTO(out, rc);
- CDEBUG(D_OTHER, "looking for log index %u (cur idx %u off"
- "%llu), size %llu\n", next_idx, *cur_idx,
+ CDEBUG(D_OTHER,
+ "looking for log index %u (cur idx %u off %llu), size %llu\n",
+ next_idx, *cur_idx,
*cur_offset, lgi->lgi_attr.la_size);
while (*cur_offset < lgi->lgi_attr.la_size) {
rec = buf;
if (LLOG_REC_HDR_NEEDS_SWABBING(rec))
lustre_swab_llog_rec(rec);
-
tail = (struct llog_rec_tail *)((char *)buf + rc -
sizeof(struct llog_rec_tail));
+
+ if (llog_verify_record(loghandle, rec)) {
+ /*
+ * the block seems corrupted. make a pad record so the
+ * caller can skip the block and try with the next one
+ */
+ rec->lrh_len = rc;
+ rec->lrh_index = next_idx;
+ rec->lrh_type = LLOG_PAD_MAGIC;
+
+ tail = rec_tail(rec);
+ tail->lrt_len = rc;
+ tail->lrt_index = next_idx;
+
+ GOTO(out, rc = 0);
+ }
+
/* get the last record in block */
last_rec = (struct llog_rec_hdr *)((char *)buf + rc -
tail->lrt_len);
lustre_swab_llog_rec(last_rec);
if (last_rec->lrh_index != tail->lrt_index) {
- CERROR("%s: invalid llog tail at log id "DFID":%x "
- "offset %llu last_rec idx %u tail idx %u"
- "lrt len %u read_size %d\n",
+ CERROR("%s: invalid llog tail at log id "DFID":%x offset %llu last_rec idx %u tail idx %u lrt len %u read_size %d\n",
o->do_lu.lo_dev->ld_obd->obd_name,
PFID(&loghandle->lgh_id.lgl_oi.oi_fid),
loghandle->lgh_id.lgl_ogen, *cur_offset,
/* sanity check that the start of the new buffer is no farther
* than the record that we wanted. This shouldn't happen. */
- if (rec->lrh_index > next_idx) {
+ if (next_idx && rec->lrh_index > next_idx) {
if (!force_mini_rec && next_idx > last_idx)
goto retry;
}
GOTO(out, rc = -EIO);
out:
+ dt_read_unlock(env, o);
return rc;
}
o = loghandle->lgh_obj;
LASSERT(o);
- LASSERT(dt_object_exists(o));
+ dt_read_lock(env, o, 0);
+ if (!llog_osd_exist(loghandle))
+ GOTO(out, rc = -ESTALE);
+
dt = lu2dt_dev(o->do_lu.lo_dev);
LASSERT(dt);
}
GOTO(out, rc = -EIO);
out:
+ dt_read_unlock(env, o);
return rc;
}
} else {
/* If logid == NULL, then it means the caller needs
* to allocate new FID (llog_cat_declare_add_rec()). */
- rc = obd_fid_alloc(env, ctxt->loc_exp,
- &lgi->lgi_fid, NULL);
+ rc = dt_fid_alloc(env, dt, &lgi->lgi_fid, NULL, NULL);
if (rc < 0)
RETURN(rc);
rc = 0;
GOTO(out, rc);
new_id = true;
}
-
+ if (OBD_FAIL_PRECHECK(OBD_FAIL_MDS_LLOG_UMOUNT_RACE) &&
+ cfs_fail_val == 1) {
+ cfs_fail_val = 2;
+ OBD_RACE(OBD_FAIL_MDS_LLOG_UMOUNT_RACE);
+ msleep(MSEC_PER_SEC);
+ }
o = ls_locate(env, ls, &lgi->lgi_fid, NULL);
if (IS_ERR(o))
GOTO(out_name, rc = PTR_ERR(o));
(struct dt_key *)name, th);
} else {
rc = dt_insert(env, dir, (struct dt_rec *)rec,
- (struct dt_key *)name, th, 1);
+ (struct dt_key *)name, th);
}
dt_write_unlock(env, dir);
rec->rec_type = S_IFREG;
dt_read_lock(env, llog_dir, 0);
rc = dt_insert(env, llog_dir, (struct dt_rec *)rec,
- (struct dt_key *)res->lgh_name,
- th, 1);
+ (struct dt_key *)res->lgh_name, th);
dt_read_unlock(env, llog_dir);
dt_object_put(env, llog_dir);
if (rc)
LASSERT(o != NULL);
dt_write_lock(env, o, 0);
- if (!dt_object_exists(o))
+ if (!llog_osd_exist(loghandle))
GOTO(out_unlock, rc = 0);
if (loghandle->lgh_name) {
if (rc < 0)
GOTO(out_unlock, rc);
+ loghandle->lgh_destroyed = true;
if (loghandle->lgh_ctxt->loc_flags & LLOG_CTXT_FLAG_NORMAL_FID) {
rc = llog_osd_regular_fid_del_name_entry(env, o, th, false);
if (rc < 0)
return 0;
}
-struct llog_operations llog_osd_ops = {
+const struct llog_operations llog_osd_ops = {
.lop_next_block = llog_osd_next_block,
.lop_prev_block = llog_osd_prev_block,
.lop_read_header = llog_osd_read_header,
};
EXPORT_SYMBOL(llog_osd_ops);
-struct llog_operations llog_common_cat_ops = {
+const struct llog_operations llog_common_cat_ops = {
.lop_next_block = llog_osd_next_block,
.lop_prev_block = llog_osd_prev_block,
.lop_read_header = llog_osd_read_header,
if (IS_ERR(th))
GOTO(out, rc = PTR_ERR(th));
- lgi->lgi_attr.la_valid = LA_MODE;
+ lgi->lgi_attr.la_valid = LA_MODE | LA_TYPE;
lgi->lgi_attr.la_mode = S_IFREG | S_IRUGO | S_IWUSR;
lgi->lgi_dof.dof_type = dt_mode_to_dft(S_IFREG);