* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2012, 2014 Intel Corporation.
+ * Copyright (c) 2012, 2015, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
}
/**
+ * Implementation of the llog_operations::lop_exist
+ *
+ * This function checks that llog exists on storage.
+ *
+ * \param[in] handle llog handle of the current llog
+ *
+ * \retval true if llog object exists and is not just destroyed
+ * \retval false if llog doesn't exist or just destroyed
+ */
+static int llog_osd_exist(struct llog_handle *handle)
+{
+ LASSERT(handle->lgh_obj);
+ return dt_object_exists(handle->lgh_obj) &&
+ !lu_object_is_dying(handle->lgh_obj->do_lu.lo_header);
+}
+
+/**
* Write a padding record to the llog
*
* This function writes a padding record to the end of llog. That may
* be needed if llog contains records of variable size, e.g. config logs
* or changelogs.
- * The padding record just aligns llog to the LLOG_CHUNK_SIZE boundary if
+ * The padding record just aligns llog to the llog chunk_size boundary if
* the current record doesn't fit in the remaining space.
*
* It allocates full length to avoid two separate writes for header and tail.
ENTRY;
- LASSERT(sizeof(*handle->lgh_hdr) == LLOG_CHUNK_SIZE);
-
o = handle->lgh_obj;
LASSERT(o);
lgi = llog_info(env);
- rc = dt_attr_get(env, o, &lgi->lgi_attr, NULL);
+ rc = dt_attr_get(env, o, &lgi->lgi_attr);
if (rc)
RETURN(rc);
lgi->lgi_off = 0;
lgi->lgi_buf.lb_buf = handle->lgh_hdr;
- lgi->lgi_buf.lb_len = LLOG_CHUNK_SIZE;
-
- rc = dt_record_read(env, o, &lgi->lgi_buf, &lgi->lgi_off);
- if (rc) {
- CERROR("%s: error reading log header from "DFID": rc = %d\n",
+ lgi->lgi_buf.lb_len = handle->lgh_hdr_size;
+ rc = dt_read(env, o, &lgi->lgi_buf, &lgi->lgi_off);
+ llh_hdr = &handle->lgh_hdr->llh_hdr;
+ if (rc < sizeof(*llh_hdr) || rc < llh_hdr->lrh_len) {
+ CERROR("%s: error reading "DFID" log header size %d: rc = %d\n",
o->do_lu.lo_dev->ld_obd->obd_name,
- PFID(lu_object_fid(&o->do_lu)), rc);
+ PFID(lu_object_fid(&o->do_lu)), rc < 0 ? 0 : rc,
+ -EFAULT);
+
+ if (rc >= 0)
+ rc = -EFAULT;
+
RETURN(rc);
}
- llh_hdr = &handle->lgh_hdr->llh_hdr;
if (LLOG_REC_HDR_NEEDS_SWABBING(llh_hdr))
lustre_swab_llog_hdr(handle->lgh_hdr);
PFID(lu_object_fid(&o->do_lu)),
llh_hdr->lrh_type, LLOG_HDR_MAGIC);
RETURN(-EIO);
- } else if (llh_hdr->lrh_len != LLOG_CHUNK_SIZE) {
+ } else if (llh_hdr->lrh_len < LLOG_MIN_CHUNK_SIZE ||
+ llh_hdr->lrh_len > handle->lgh_hdr_size) {
CERROR("%s: incorrectly sized log %s "DFID" header: "
- "%#x (expected %#x)\n"
+ "%#x (expected at least %#x)\n"
"you may need to re-run lconf --write_conf.\n",
o->do_lu.lo_dev->ld_obd->obd_name,
handle->lgh_name ? handle->lgh_name : "",
PFID(lu_object_fid(&o->do_lu)),
- llh_hdr->lrh_len, LLOG_CHUNK_SIZE);
+ llh_hdr->lrh_len, LLOG_MIN_CHUNK_SIZE);
+ RETURN(-EIO);
+ } else if (LLOG_HDR_TAIL(handle->lgh_hdr)->lrt_index >
+ LLOG_HDR_BITMAP_SIZE(handle->lgh_hdr) ||
+ LLOG_HDR_TAIL(handle->lgh_hdr)->lrt_len !=
+ llh_hdr->lrh_len) {
+ CERROR("%s: incorrectly sized log %s "DFID" tailer: "
+ "%#x : rc = %d\n",
+ o->do_lu.lo_dev->ld_obd->obd_name,
+ handle->lgh_name ? handle->lgh_name : "",
+ PFID(lu_object_fid(&o->do_lu)),
+ LLOG_HDR_TAIL(handle->lgh_hdr)->lrt_len, -EIO);
RETURN(-EIO);
}
handle->lgh_hdr->llh_flags |= (flags & LLOG_F_EXT_MASK);
- handle->lgh_last_idx = handle->lgh_hdr->llh_tail.lrt_index;
+ handle->lgh_last_idx = LLOG_HDR_TAIL(handle->lgh_hdr)->lrt_index;
RETURN(0);
}
int idx, struct thandle *th)
{
struct llog_thread_info *lgi = llog_info(env);
+ __u32 chunk_size;
struct dt_object *o;
int rc;
LASSERT(th);
LASSERT(loghandle);
LASSERT(rec);
- LASSERT(rec->lrh_len <= LLOG_CHUNK_SIZE);
+ LASSERT(rec->lrh_len <= loghandle->lgh_ctxt->loc_chunk_size);
o = loghandle->lgh_obj;
LASSERT(o);
- lgi->lgi_buf.lb_len = sizeof(struct llog_log_hdr);
+ chunk_size = loghandle->lgh_ctxt->loc_chunk_size;
+ lgi->lgi_buf.lb_len = chunk_size;
lgi->lgi_buf.lb_buf = NULL;
/* each time we update header */
rc = dt_declare_record_write(env, o, &lgi->lgi_buf, 0,
* the pad record can be inserted so take into account double
* record size
*/
- lgi->lgi_buf.lb_len = rec->lrh_len * 2;
+ lgi->lgi_buf.lb_len = chunk_size * 2;
lgi->lgi_buf.lb_buf = NULL;
/* XXX: implement declared window or multi-chunks approach */
rc = dt_declare_record_write(env, o, &lgi->lgi_buf, -1, th);
int index, rc;
struct llog_rec_tail *lrt;
struct dt_object *o;
+ __u32 chunk_size;
size_t left;
- bool header_is_updated = false;
ENTRY;
LASSERT(o);
LASSERT(th);
+ chunk_size = llh->llh_hdr.lrh_len;
CDEBUG(D_OTHER, "new record %x to "DFID"\n",
rec->lrh_type, PFID(lu_object_fid(&o->do_lu)));
- /* record length should not bigger than LLOG_CHUNK_SIZE */
- if (reclen > LLOG_CHUNK_SIZE)
+ if (!llog_osd_exist(loghandle))
+ RETURN(-ENOENT);
+
+ /* record length should not bigger than */
+ if (reclen > loghandle->lgh_hdr->llh_hdr.lrh_len)
RETURN(-E2BIG);
- rc = dt_attr_get(env, o, &lgi->lgi_attr, NULL);
+ /* sanity check for fixed-records llog */
+ if (idx != LLOG_HEADER_IDX && (llh->llh_flags & LLOG_F_IS_FIXSIZE)) {
+ LASSERT(llh->llh_size != 0);
+ LASSERT(llh->llh_size == reclen);
+ }
+
+ rc = dt_attr_get(env, o, &lgi->lgi_attr);
if (rc)
RETURN(rc);
/* llog can be empty only when first record is being written */
LASSERT(ergo(idx > 0, lgi->lgi_attr.la_size > 0));
- if (!ext2_test_bit(idx, llh->llh_bitmap)) {
+ if (!ext2_test_bit(idx, LLOG_HDR_BITMAP(llh))) {
CERROR("%s: modify unset record %u\n",
o->do_lu.lo_dev->ld_obd->obd_name, idx);
RETURN(-ENOENT);
if (idx == LLOG_HEADER_IDX) {
/* llog header update */
- LASSERT(reclen == sizeof(struct llog_log_hdr));
- LASSERT(rec == &llh->llh_hdr);
+ __u32 *bitmap = LLOG_HDR_BITMAP(llh);
lgi->lgi_off = 0;
- lgi->lgi_buf.lb_len = reclen;
- lgi->lgi_buf.lb_buf = rec;
+
+ /* If it does not indicate the bitmap index
+ * (reccookie == NULL), then it means update
+ * the whole update header. Otherwise only
+ * update header and bits needs to be updated,
+ * and in DNE cases, it will signaficantly
+ * shrink the RPC size.
+ * see distribute_txn_cancel_records()*/
+ if (reccookie == NULL) {
+ lgi->lgi_buf.lb_len = reclen;
+ lgi->lgi_buf.lb_buf = rec;
+ rc = dt_record_write(env, o, &lgi->lgi_buf,
+ &lgi->lgi_off, th);
+ RETURN(rc);
+ }
+
+ /* update the header */
+ lgi->lgi_buf.lb_len = llh->llh_bitmap_offset;
+ lgi->lgi_buf.lb_buf = llh;
rc = dt_record_write(env, o, &lgi->lgi_buf,
&lgi->lgi_off, th);
+ if (rc != 0)
+ RETURN(rc);
+
+ /* update the bitmap */
+ index = reccookie->lgc_index;
+ lgi->lgi_off = llh->llh_bitmap_offset +
+ (index / (sizeof(*bitmap) * 8)) *
+ sizeof(*bitmap);
+ lgi->lgi_buf.lb_len = sizeof(*bitmap);
+ lgi->lgi_buf.lb_buf =
+ &bitmap[index/(sizeof(*bitmap)*8)];
+ rc = dt_record_write(env, o, &lgi->lgi_buf,
+ &lgi->lgi_off, th);
+
RETURN(rc);
} else if (loghandle->lgh_cur_idx > 0) {
/**
"len:%u offset %llu\n",
POSTID(&loghandle->lgh_id.lgl_oi), idx,
rec->lrh_len, (long long)lgi->lgi_off);
- } else if (llh->llh_size > 0) {
- if (llh->llh_size != rec->lrh_len) {
- CERROR("%s: wrong record size, llh_size is %u"
- " but record size is %u\n",
- o->do_lu.lo_dev->ld_obd->obd_name,
- llh->llh_size, rec->lrh_len);
- RETURN(-EINVAL);
- }
- lgi->lgi_off = sizeof(*llh) + (idx - 1) * reclen;
+ } else if (llh->llh_flags & LLOG_F_IS_FIXSIZE) {
+ lgi->lgi_off = llh->llh_hdr.lrh_len +
+ (idx - 1) * reclen;
} else {
/* This can be result of lgh_cur_idx is not set during
* llog processing or llh_size is not set to proper
* process them page-at-a-time if needed. If it will cross a chunk
* boundary, write in a fake (but referenced) entry to pad the chunk.
*/
+
+
+ /* simulate ENOSPC when new plain llog is being added to the
+ * catalog */
+ if (OBD_FAIL_CHECK(OBD_FAIL_MDS_LLOG_CREATE_FAILED2) &&
+ llh->llh_flags & LLOG_F_IS_CAT)
+ RETURN(-ENOSPC);
+
LASSERT(lgi->lgi_attr.la_valid & LA_SIZE);
lgi->lgi_off = lgi->lgi_attr.la_size;
- left = LLOG_CHUNK_SIZE - (lgi->lgi_off & (LLOG_CHUNK_SIZE - 1));
+ left = chunk_size - (lgi->lgi_off & (chunk_size - 1));
/* NOTE: padding is a record, but no bit is set */
if (left != 0 && left != reclen &&
left < (reclen + LLOG_MIN_REC_SIZE)) {
RETURN(rc);
loghandle->lgh_last_idx++; /* for pad rec */
}
- /* if it's the last idx in log file, then return -ENOSPC */
- if (loghandle->lgh_last_idx >= LLOG_BITMAP_SIZE(llh) - 1)
- RETURN(-ENOSPC);
+ /* if it's the last idx in log file, then return -ENOSPC
+ * or wrap around if a catalog */
+ if (llog_is_full(loghandle) ||
+ unlikely(llh->llh_flags & LLOG_F_IS_CAT &&
+ OBD_FAIL_PRECHECK(OBD_FAIL_CAT_RECORDS) &&
+ loghandle->lgh_last_idx >= cfs_fail_val)) {
+ if (llh->llh_flags & LLOG_F_IS_CAT)
+ loghandle->lgh_last_idx = 0;
+ else
+ RETURN(-ENOSPC);
+ }
/* increment the last_idx along with llh_tail index, they should
* be equal for a llog lifetime */
loghandle->lgh_last_idx++;
index = loghandle->lgh_last_idx;
- llh->llh_tail.lrt_index = index;
+ LLOG_HDR_TAIL(llh)->lrt_index = index;
/**
* NB: the caller should make sure only 1 process access
* the lgh_last_idx, e.g. append should be exclusive.
* Otherwise it might hit the assert.
*/
- LASSERT(index < LLOG_BITMAP_SIZE(llh));
+ LASSERT(index < LLOG_HDR_BITMAP_SIZE(llh));
rec->lrh_index = index;
lrt = rec_tail(rec);
lrt->lrt_len = rec->lrh_len;
lrt->lrt_index = rec->lrh_index;
- /* the lgh_hdr_lock protects llog header data from concurrent
+ /* the lgh_hdr_mutex protects llog header data from concurrent
* update/cancel, the llh_count and llh_bitmap are protected */
- spin_lock(&loghandle->lgh_hdr_lock);
- if (ext2_set_bit(index, llh->llh_bitmap)) {
+ mutex_lock(&loghandle->lgh_hdr_mutex);
+ if (ext2_set_bit(index, LLOG_HDR_BITMAP(llh))) {
CERROR("%s: index %u already set in log bitmap\n",
o->do_lu.lo_dev->ld_obd->obd_name, index);
- spin_unlock(&loghandle->lgh_hdr_lock);
+ mutex_unlock(&loghandle->lgh_hdr_mutex);
LBUG(); /* should never happen */
}
llh->llh_count++;
- spin_unlock(&loghandle->lgh_hdr_lock);
- lgi->lgi_off = 0;
- lgi->lgi_buf.lb_len = llh->llh_hdr.lrh_len;
- lgi->lgi_buf.lb_buf = &llh->llh_hdr;
- rc = dt_record_write(env, o, &lgi->lgi_buf, &lgi->lgi_off, th);
- if (rc)
- GOTO(out, rc);
+ if (!(llh->llh_flags & LLOG_F_IS_FIXSIZE)) {
+ /* Update the minimum size of the llog record */
+ if (llh->llh_size == 0)
+ llh->llh_size = reclen;
+ else if (reclen < llh->llh_size)
+ llh->llh_size = reclen;
+ }
+
+ if (lgi->lgi_attr.la_size == 0) {
+ lgi->lgi_off = 0;
+ lgi->lgi_buf.lb_len = llh->llh_hdr.lrh_len;
+ lgi->lgi_buf.lb_buf = &llh->llh_hdr;
+ rc = dt_record_write(env, o, &lgi->lgi_buf, &lgi->lgi_off, th);
+ if (rc != 0)
+ GOTO(out_unlock, rc);
+ } else {
+ __u32 *bitmap = LLOG_HDR_BITMAP(llh);
+
+ /* Note: If this is not initialization (size == 0), then do not
+ * write the whole header (8k bytes), only update header/tail
+ * and bits needs to be updated. Because this update might be
+ * part of cross-MDT operation, which needs to write these
+ * updates into the update log(32KB limit) and also pack inside
+ * the RPC (1MB limit), if we write 8K for each operation, which
+ * will cost a lot space, and keep us adding more updates to one
+ * update log.*/
+ lgi->lgi_off = 0;
+ lgi->lgi_buf.lb_len = llh->llh_bitmap_offset;
+ lgi->lgi_buf.lb_buf = &llh->llh_hdr;
+ rc = dt_record_write(env, o, &lgi->lgi_buf, &lgi->lgi_off, th);
+ if (rc != 0)
+ GOTO(out_unlock, rc);
+
+ lgi->lgi_off = llh->llh_bitmap_offset +
+ (index / (sizeof(*bitmap) * 8)) * sizeof(*bitmap);
+ lgi->lgi_buf.lb_len = sizeof(*bitmap);
+ lgi->lgi_buf.lb_buf = &bitmap[index/(sizeof(*bitmap)*8)];
+ rc = dt_record_write(env, o, &lgi->lgi_buf, &lgi->lgi_off, th);
+ if (rc != 0)
+ GOTO(out_unlock, rc);
+
+ lgi->lgi_off = (unsigned long)LLOG_HDR_TAIL(llh) -
+ (unsigned long)llh;
+ lgi->lgi_buf.lb_len = sizeof(llh->llh_tail);
+ lgi->lgi_buf.lb_buf = LLOG_HDR_TAIL(llh);
+ rc = dt_record_write(env, o, &lgi->lgi_buf, &lgi->lgi_off, th);
+ if (rc != 0)
+ GOTO(out_unlock, rc);
+ }
- header_is_updated = true;
- rc = dt_attr_get(env, o, &lgi->lgi_attr, NULL);
+out_unlock:
+ /* unlock here for remote object */
+ mutex_unlock(&loghandle->lgh_hdr_mutex);
if (rc)
GOTO(out, rc);
- LASSERT(lgi->lgi_attr.la_valid & LA_SIZE);
- lgi->lgi_off = lgi->lgi_attr.la_size;
+ /* computed index can be used to determine offset for fixed-size
+ * records. This also allows to handle Catalog wrap around case */
+ if (llh->llh_flags & LLOG_F_IS_FIXSIZE) {
+ lgi->lgi_off = llh->llh_hdr.lrh_len + (index - 1) * reclen;
+ } else {
+ rc = dt_attr_get(env, o, &lgi->lgi_attr);
+ if (rc)
+ GOTO(out, rc);
+
+ LASSERT(lgi->lgi_attr.la_valid & LA_SIZE);
+ lgi->lgi_off = max_t(__u64, lgi->lgi_attr.la_size,
+ lgi->lgi_off);
+ }
+
lgi->lgi_buf.lb_len = reclen;
lgi->lgi_buf.lb_buf = rec;
rc = dt_record_write(env, o, &lgi->lgi_buf, &lgi->lgi_off, th);
if (rc < 0)
GOTO(out, rc);
- CDEBUG(D_OTHER, "added record "DOSTID": idx: %u, %u\n",
- POSTID(&loghandle->lgh_id.lgl_oi), index, rec->lrh_len);
+ CDEBUG(D_OTHER, "added record "DOSTID": idx: %u, %u off"LPU64"\n",
+ POSTID(&loghandle->lgh_id.lgl_oi), index, rec->lrh_len,
+ lgi->lgi_off);
if (reccookie != NULL) {
reccookie->lgc_lgl = loghandle->lgh_id;
reccookie->lgc_index = index;
RETURN(rc);
out:
/* cleanup llog for error case */
- spin_lock(&loghandle->lgh_hdr_lock);
- ext2_clear_bit(index, llh->llh_bitmap);
+ mutex_lock(&loghandle->lgh_hdr_mutex);
+ ext2_clear_bit(index, LLOG_HDR_BITMAP(llh));
llh->llh_count--;
- spin_unlock(&loghandle->lgh_hdr_lock);
+ mutex_unlock(&loghandle->lgh_hdr_mutex);
/* restore llog last_idx */
- loghandle->lgh_last_idx--;
- llh->llh_tail.lrt_index = loghandle->lgh_last_idx;
-
- /* restore the header on disk if it was written */
- if (header_is_updated) {
- lgi->lgi_off = 0;
- lgi->lgi_buf.lb_len = llh->llh_hdr.lrh_len;
- lgi->lgi_buf.lb_buf = &llh->llh_hdr;
- dt_record_write(env, o, &lgi->lgi_buf, &lgi->lgi_off, th);
+ if (--loghandle->lgh_last_idx == 0 &&
+ (llh->llh_flags & LLOG_F_IS_CAT) && llh->llh_cat_idx != 0) {
+ /* catalog had just wrap-around case */
+ loghandle->lgh_last_idx = LLOG_HDR_BITMAP_SIZE(llh) - 1;
}
+ LLOG_HDR_TAIL(llh)->lrt_index = loghandle->lgh_last_idx;
RETURN(rc);
}
* that we are not far enough along the log (because the
* actual records are larger than minimum size) we just skip
* some more records.
+ *
+ * Note: in llog_process_thread, it will use bitmap offset as
+ * the index to locate the record, which also includs some pad
+ * records, whose record size is very small, and it also does not
+ * consider pad record when recording minimum record size (otherwise
+ * min_record size might be too small), so in some rare cases,
+ * it might skip too much record for @goal, see llog_osd_next_block().
+ *
+ * When force_mini_rec is true, it means we have to use LLOG_MIN_REC_SIZE
+ * as the min record size to skip over, usually because in the previous
+ * try, it skip too much record, see loog_osd_next(prev)_block().
*/
-static inline void llog_skip_over(__u64 *off, int curr, int goal)
+static inline void llog_skip_over(struct llog_handle *lgh, __u64 *off,
+ int curr, int goal, __u32 chunk_size,
+ bool force_mini_rec)
{
- if (goal <= curr)
- return;
- *off = (*off + (goal - curr - 1) * LLOG_MIN_REC_SIZE) &
- ~(LLOG_CHUNK_SIZE - 1);
+ struct llog_log_hdr *llh = lgh->lgh_hdr;
+
+ /* Goal should not bigger than the record count */
+ if (goal > lgh->lgh_last_idx)
+ goal = lgh->lgh_last_idx;
+
+ if (goal > curr) {
+ if (llh->llh_flags & LLOG_F_IS_FIXSIZE) {
+ *off = chunk_size + (goal - 1) * llh->llh_size;
+ } else {
+ __u64 min_rec_size = LLOG_MIN_REC_SIZE;
+
+ if (llh->llh_size > 0 && !force_mini_rec)
+ min_rec_size = llh->llh_size;
+
+ *off = *off + (goal - curr - 1) * min_rec_size;
+ }
+ }
+ /* always align with lower chunk boundary*/
+ *off &= ~(chunk_size - 1);
}
/**
* \param[in,out] cur_offset furtherst point read in the file
* \param[in] buf pointer to data buffer to fill
* \param[in] len required len to read, it is
- * LLOG_CHUNK_SIZE usually.
+ * usually llog chunk_size.
*
* \retval 0 on successful buffer read
* \retval negative value on error
struct dt_object *o;
struct dt_device *dt;
int rc;
+ __u32 chunk_size;
+ int last_idx = *cur_idx;
+ __u64 last_offset = *cur_offset;
+ bool force_mini_rec = false;
ENTRY;
LASSERT(env);
LASSERT(lgi);
- if (len == 0 || len & (LLOG_CHUNK_SIZE - 1))
+ chunk_size = loghandle->lgh_hdr->llh_hdr.lrh_len;
+ if (len == 0 || len & (chunk_size - 1))
RETURN(-EINVAL);
CDEBUG(D_OTHER, "looking for log index %u (cur idx %u off "LPU64")\n",
dt = lu2dt_dev(o->do_lu.lo_dev);
LASSERT(dt);
- rc = dt_attr_get(env, o, &lgi->lgi_attr, BYPASS_CAPA);
+ rc = dt_attr_get(env, o, &lgi->lgi_attr);
if (rc)
GOTO(out, rc);
struct llog_rec_hdr *rec, *last_rec;
struct llog_rec_tail *tail;
- llog_skip_over(cur_offset, *cur_idx, next_idx);
+ llog_skip_over(loghandle, cur_offset, *cur_idx,
+ next_idx, chunk_size, force_mini_rec);
- /* read up to next LLOG_CHUNK_SIZE block */
- lgi->lgi_buf.lb_len = LLOG_CHUNK_SIZE -
- (*cur_offset & (LLOG_CHUNK_SIZE - 1));
+ /* read up to next llog chunk_size block */
+ lgi->lgi_buf.lb_len = chunk_size -
+ (*cur_offset & (chunk_size - 1));
lgi->lgi_buf.lb_buf = buf;
rc = dt_read(env, o, &lgi->lgi_buf, cur_offset);
if (rc < 0) {
+ if (rc == -EBADR && !force_mini_rec)
+ goto retry;
+
CERROR("%s: can't read llog block from log "DFID
" offset "LPU64": rc = %d\n",
o->do_lu.lo_dev->ld_obd->obd_name,
memset(buf + rc, 0, len - rc);
}
- if (rc == 0) /* end of file, nothing to do */
+ if (rc == 0) { /* end of file, nothing to do */
+ if (!force_mini_rec)
+ goto retry;
GOTO(out, rc);
+ }
if (rc < sizeof(*tail)) {
+ if (!force_mini_rec)
+ goto retry;
+
CERROR("%s: invalid llog block at log id "DOSTID"/%u "
"offset "LPU64"\n",
o->do_lu.lo_dev->ld_obd->obd_name,
sizeof(struct llog_rec_tail));
/* get the last record in block */
last_rec = (struct llog_rec_hdr *)((char *)buf + rc -
- le32_to_cpu(tail->lrt_len));
+ tail->lrt_len);
if (LLOG_REC_HDR_NEEDS_SWABBING(last_rec))
lustre_swab_llog_rec(last_rec);
/* this shouldn't happen */
if (tail->lrt_index == 0) {
CERROR("%s: invalid llog tail at log id "DOSTID"/%u "
- "offset "LPU64"\n",
+ "offset "LPU64" bytes %d\n",
o->do_lu.lo_dev->ld_obd->obd_name,
POSTID(&loghandle->lgh_id.lgl_oi),
- loghandle->lgh_id.lgl_ogen, *cur_offset);
+ loghandle->lgh_id.lgl_ogen, *cur_offset, rc);
GOTO(out, rc = -EINVAL);
}
- if (tail->lrt_index < next_idx)
+ if (tail->lrt_index < next_idx) {
+ last_idx = *cur_idx;
+ last_offset = *cur_offset;
continue;
+ }
/* sanity check that the start of the new buffer is no farther
* than the record that we wanted. This shouldn't happen. */
if (rec->lrh_index > next_idx) {
+ if (!force_mini_rec && next_idx > last_idx)
+ goto retry;
+
CERROR("%s: missed desired record? %u > %u\n",
o->do_lu.lo_dev->ld_obd->obd_name,
rec->lrh_index, next_idx);
CLF_VERSION | CLF_RENAME);
GOTO(out, rc = 0);
+
+retry:
+ /* Note: because there are some pad records in the
+ * llog, so llog_skip_over() might skip too much
+ * records, let's try skip again with minimum record */
+ force_mini_rec = true;
+ *cur_offset = last_offset;
+ *cur_idx = last_idx;
}
GOTO(out, rc = -EIO);
out:
* \param[in] loghandle llog handle of the current llog
* \param[in] prev_idx target index to find
* \param[in] buf pointer to data buffer to fill
- * \param[in] len required len to read, it is LLOG_CHUNK_SIZE usually.
+ * \param[in] len required len to read, it is llog_chunk_size usually.
*
* \retval 0 on successful buffer read
* \retval negative value on error
struct dt_object *o;
struct dt_device *dt;
loff_t cur_offset;
+ __u32 chunk_size;
int rc;
ENTRY;
- if (len == 0 || len & (LLOG_CHUNK_SIZE - 1))
+ chunk_size = loghandle->lgh_hdr->llh_hdr.lrh_len;
+ if (len == 0 || len & (chunk_size - 1))
RETURN(-EINVAL);
CDEBUG(D_OTHER, "looking for log index %u\n", prev_idx);
dt = lu2dt_dev(o->do_lu.lo_dev);
LASSERT(dt);
- cur_offset = LLOG_CHUNK_SIZE;
- llog_skip_over(&cur_offset, 0, prev_idx);
+ /* Let's only use mini record size for previous block read
+ * for now XXX */
+ cur_offset = chunk_size;
+ llog_skip_over(loghandle, &cur_offset, 0, prev_idx,
+ chunk_size, true);
- rc = dt_attr_get(env, o, &lgi->lgi_attr, BYPASS_CAPA);
+ rc = dt_attr_get(env, o, &lgi->lgi_attr);
if (rc)
GOTO(out, rc);
* \retval dt_object of llog directory
* \retval ERR_PTR of negative value on error
*/
-struct dt_object *llog_osd_dir_get(const struct lu_env *env,
- struct llog_ctxt *ctxt)
+static struct dt_object *llog_osd_dir_get(const struct lu_env *env,
+ struct llog_ctxt *ctxt)
{
struct dt_device *dt;
struct dt_thread_info *dti = dt_info(env);
struct dt_object *o;
struct dt_device *dt;
struct ls_device *ls;
- struct local_oid_storage *los;
+ struct local_oid_storage *los = NULL;
int rc = 0;
ENTRY;
LASSERT(ctxt->loc_exp->exp_obd);
dt = ctxt->loc_exp->exp_obd->obd_lvfs_ctxt.dt;
LASSERT(dt);
+ if (ctxt->loc_flags & LLOG_CTXT_FLAG_NORMAL_FID) {
+ struct lu_object_conf conf = { 0 };
+ if (logid != NULL) {
+ logid_to_fid(logid, &lgi->lgi_fid);
+ } else {
+ /* If logid == NULL, then it means the caller needs
+ * to allocate new FID (llog_cat_declare_add_rec()). */
+ rc = obd_fid_alloc(env, ctxt->loc_exp,
+ &lgi->lgi_fid, NULL);
+ if (rc < 0)
+ RETURN(rc);
+ rc = 0;
+ conf.loc_flags = LOC_F_NEW;
+ }
+
+ o = dt_locate_at(env, dt, &lgi->lgi_fid,
+ dt->dd_lu_dev.ld_site->ls_top_dev, &conf);
+ if (IS_ERR(o))
+ RETURN(PTR_ERR(o));
+
+ goto after_open;
+ }
ls = ls_device_get(dt);
if (IS_ERR(ls))
if (IS_ERR(o))
GOTO(out_name, rc = PTR_ERR(o));
+after_open:
/* No new llog is expected but doesn't exist */
if (open_param != LLOG_OPEN_NEW && !dt_object_exists(o))
GOTO(out_put, rc = -ENOENT);
if (handle->lgh_name != NULL)
OBD_FREE(handle->lgh_name, strlen(name) + 1);
out:
- dt_los_put(los);
+ if (los != NULL)
+ dt_los_put(los);
RETURN(rc);
}
/**
- * Implementation of the llog_operations::lop_exist
+ * Get dir for regular fid log object
*
- * This function checks that llog exists on storage.
+ * Get directory for regular fid log object, and these regular fid log
+ * object will be inserted under this directory, to satisfy the FS
+ * consistency check, e2fsck etc.
*
- * \param[in] handle llog handle of the current llog
+ * \param [in] env execution environment
+ * \param [in] dto llog object
*
- * \retval true if llog object exists and is not just destroyed
- * \retval false if llog doesn't exist or just destroyed
+ * \retval pointer to the directory if it is found.
+ * \retval ERR_PTR(negative errno) if it fails.
*/
-static int llog_osd_exist(struct llog_handle *handle)
+struct dt_object *llog_osd_get_regular_fid_dir(const struct lu_env *env,
+ struct dt_object *dto)
{
- LASSERT(handle->lgh_obj);
- return (dt_object_exists(handle->lgh_obj) &&
- !lu_object_is_dying(handle->lgh_obj->do_lu.lo_header));
+ struct llog_thread_info *lgi = llog_info(env);
+ struct seq_server_site *ss = dto->do_lu.lo_dev->ld_site->ld_seq_site;
+ struct lu_seq_range *range = &lgi->lgi_range;
+ struct lu_fid *dir_fid = &lgi->lgi_fid;
+ struct dt_object *dir;
+ int rc;
+ ENTRY;
+
+ fld_range_set_any(range);
+ LASSERT(ss != NULL);
+ rc = ss->ss_server_fld->lsf_seq_lookup(env, ss->ss_server_fld,
+ fid_seq(lu_object_fid(&dto->do_lu)), range);
+ if (rc < 0)
+ RETURN(ERR_PTR(rc));
+
+ lu_update_log_dir_fid(dir_fid, range->lsr_index);
+ dir = dt_locate(env, lu2dt_dev(dto->do_lu.lo_dev), dir_fid);
+ if (IS_ERR(dir))
+ RETURN(dir);
+
+ if (!dt_try_as_dir(env, dir)) {
+ lu_object_put(env, &dir->do_lu);
+ RETURN(ERR_PTR(-ENOTDIR));
+ }
+
+ RETURN(dir);
+}
+
+/**
+ * Add llog object with regular FID to name entry
+ *
+ * Add llog object with regular FID to name space, and each llog
+ * object on each MDT will be /update_log_dir/[seq:oid:ver],
+ * so to satisfy the namespace consistency check, e2fsck etc.
+ *
+ * \param [in] env execution environment
+ * \param [in] dto llog object
+ * \param [in] th thandle
+ * \param [in] declare if it is declare or execution
+ *
+ * \retval 0 if insertion succeeds.
+ * \retval negative errno if insertion fails.
+ */
+static int
+llog_osd_regular_fid_add_name_entry(const struct lu_env *env,
+ struct dt_object *dto,
+ struct thandle *th, bool declare)
+{
+ struct llog_thread_info *lgi = llog_info(env);
+ const struct lu_fid *fid = lu_object_fid(&dto->do_lu);
+ struct dt_insert_rec *rec = &lgi->lgi_dt_rec;
+ struct dt_object *dir;
+ char *name = lgi->lgi_name;
+ int rc;
+ ENTRY;
+
+ if (!fid_is_norm(fid))
+ RETURN(0);
+
+ dir = llog_osd_get_regular_fid_dir(env, dto);
+ if (IS_ERR(dir))
+ RETURN(PTR_ERR(dir));
+
+ rec->rec_fid = fid;
+ rec->rec_type = S_IFREG;
+ snprintf(name, sizeof(lgi->lgi_name), DFID, PFID(fid));
+ dt_write_lock(env, dir, 0);
+ if (declare) {
+ rc = dt_declare_insert(env, dir, (struct dt_rec *)rec,
+ (struct dt_key *)name, th);
+ } else {
+ rc = dt_insert(env, dir, (struct dt_rec *)rec,
+ (struct dt_key *)name, th, 1);
+ }
+ dt_write_unlock(env, dir);
+
+ lu_object_put(env, &dir->do_lu);
+ RETURN(rc);
}
+
/**
* Implementation of the llog_operations::lop_declare_create
*
if (dt_object_exists(o))
RETURN(0);
+ if (res->lgh_ctxt->loc_flags & LLOG_CTXT_FLAG_NORMAL_FID) {
+ struct llog_thread_info *lgi = llog_info(env);
+
+ lgi->lgi_attr.la_valid = LA_MODE | LA_SIZE;
+ lgi->lgi_attr.la_size = 0;
+ lgi->lgi_attr.la_mode = S_IFREG | S_IRUGO | S_IWUSR;
+ lgi->lgi_dof.dof_type = dt_mode_to_dft(S_IFREG);
+
+ rc = dt_declare_create(env, o, &lgi->lgi_attr, NULL,
+ &lgi->lgi_dof, th);
+ if (rc < 0)
+ RETURN(rc);
+
+
+ rc = llog_osd_regular_fid_add_name_entry(env, o, th, true);
+
+ RETURN(rc);
+ }
los = res->private_data;
LASSERT(los);
if (dt_object_exists(o))
RETURN(-EEXIST);
+ if (res->lgh_ctxt->loc_flags & LLOG_CTXT_FLAG_NORMAL_FID) {
+ struct llog_thread_info *lgi = llog_info(env);
+
+ lgi->lgi_attr.la_valid = LA_MODE | LA_SIZE | LA_TYPE;
+ lgi->lgi_attr.la_size = 0;
+ lgi->lgi_attr.la_mode = S_IFREG | S_IRUGO | S_IWUSR;
+ lgi->lgi_dof.dof_type = dt_mode_to_dft(S_IFREG);
+
+ dt_write_lock(env, o, 0);
+ rc = dt_create(env, o, &lgi->lgi_attr, NULL,
+ &lgi->lgi_dof, th);
+ dt_write_unlock(env, o);
+ if (rc < 0)
+ RETURN(rc);
+
+ rc = llog_osd_regular_fid_add_name_entry(env, o, th, false);
+
+ RETURN(rc);
+ }
+
los = res->private_data;
LASSERT(los);
dt_read_lock(env, llog_dir, 0);
rc = dt_insert(env, llog_dir, (struct dt_rec *)rec,
(struct dt_key *)res->lgh_name,
- th, BYPASS_CAPA, 1);
+ th, 1);
dt_read_unlock(env, llog_dir);
lu_object_put(env, &llog_dir->do_lu);
if (rc)
LASSERT(handle->lgh_obj);
- lu_object_put(env, &handle->lgh_obj->do_lu);
-
+ if (handle->lgh_ctxt->loc_flags & LLOG_CTXT_FLAG_NORMAL_FID) {
+ /* Remove the object from the cache, otherwise it may
+ * hold LOD being released during cleanup process */
+ lu_object_put_nocache(env, &handle->lgh_obj->do_lu);
+ LASSERT(handle->private_data == NULL);
+ RETURN(rc);
+ } else {
+ lu_object_put(env, &handle->lgh_obj->do_lu);
+ }
los = handle->private_data;
LASSERT(los);
dt_los_put(los);
}
/**
- * Implementation of the llog_operations::lop_destroy
+ * delete llog object name entry
*
- * This function destroys the llog and deletes also entry in the
+ * Delete llog object (with regular FID) from name space (under
+ * update_log_dir).
+ *
+ * \param [in] env execution environment
+ * \param [in] dto llog object
+ * \param [in] th thandle
+ * \param [in] declare if it is declare or execution
+ *
+ * \retval 0 if deletion succeeds.
+ * \retval negative errno if deletion fails.
+ */
+static int
+llog_osd_regular_fid_del_name_entry(const struct lu_env *env,
+ struct dt_object *dto,
+ struct thandle *th, bool declare)
+{
+ struct llog_thread_info *lgi = llog_info(env);
+ const struct lu_fid *fid = lu_object_fid(&dto->do_lu);
+ struct dt_object *dir;
+ char *name = lgi->lgi_name;
+ int rc;
+ ENTRY;
+
+ if (!fid_is_norm(fid))
+ RETURN(0);
+
+ dir = llog_osd_get_regular_fid_dir(env, dto);
+ if (IS_ERR(dir))
+ RETURN(PTR_ERR(dir));
+
+ snprintf(name, sizeof(lgi->lgi_name), DFID, PFID(fid));
+ dt_write_lock(env, dir, 0);
+ if (declare) {
+ rc = dt_declare_delete(env, dir, (struct dt_key *)name,
+ th);
+ } else {
+ rc = dt_delete(env, dir, (struct dt_key *)name, th);
+ }
+ dt_write_unlock(env, dir);
+
+ lu_object_put(env, &dir->do_lu);
+ RETURN(rc);
+}
+
+/**
+ * Implementation of the llog_operations::lop_declare_destroy
+ *
+ * This function declare destroys the llog and deletes also entry in the
* llog directory in case of named llog. Llog should be opened prior that.
- * Destroy method is not part of external transaction and does everything
- * inside.
*
* \param[in] env execution environment
* \param[in] loghandle llog handle of the current llog
* \retval 0 on successful destroy
* \retval negative value on error
*/
-static int llog_osd_destroy(const struct lu_env *env,
- struct llog_handle *loghandle)
+static int llog_osd_declare_destroy(const struct lu_env *env,
+ struct llog_handle *loghandle,
+ struct thandle *th)
{
struct llog_ctxt *ctxt;
struct dt_object *o, *llog_dir = NULL;
- struct dt_device *d;
- struct thandle *th;
- char *name = NULL;
int rc;
ENTRY;
o = loghandle->lgh_obj;
LASSERT(o);
- d = lu2dt_dev(o->do_lu.lo_dev);
- LASSERT(d);
- LASSERT(d == ctxt->loc_exp->exp_obd->obd_lvfs_ctxt.dt);
-
- th = dt_trans_create(env, d);
- if (IS_ERR(th))
- RETURN(PTR_ERR(th));
-
if (loghandle->lgh_name) {
llog_dir = llog_osd_dir_get(env, ctxt);
if (IS_ERR(llog_dir))
- GOTO(out_trans, rc = PTR_ERR(llog_dir));
+ RETURN(PTR_ERR(llog_dir));
- name = loghandle->lgh_name;
rc = dt_declare_delete(env, llog_dir,
- (struct dt_key *)name, th);
- if (rc)
- GOTO(out_trans, rc);
+ (struct dt_key *)loghandle->lgh_name,
+ th);
+ if (rc < 0)
+ GOTO(out_put, rc);
}
- dt_declare_ref_del(env, o, th);
+ rc = dt_declare_ref_del(env, o, th);
+ if (rc < 0)
+ GOTO(out_put, rc);
rc = dt_declare_destroy(env, o, th);
- if (rc)
- GOTO(out_trans, rc);
+ if (rc < 0)
+ GOTO(out_put, rc);
- rc = dt_trans_start_local(env, d, th);
- if (rc)
- GOTO(out_trans, rc);
+ if (loghandle->lgh_ctxt->loc_flags & LLOG_CTXT_FLAG_NORMAL_FID) {
+ rc = llog_osd_regular_fid_del_name_entry(env, o, th, true);
+ if (rc < 0)
+ GOTO(out_put, rc);
+ }
+
+out_put:
+ if (!(IS_ERR_OR_NULL(llog_dir)))
+ lu_object_put(env, &llog_dir->do_lu);
+
+ RETURN(rc);
+}
+
+
+/**
+ * Implementation of the llog_operations::lop_destroy
+ *
+ * This function destroys the llog and deletes also entry in the
+ * llog directory in case of named llog. Llog should be opened prior that.
+ * Destroy method is not part of external transaction and does everything
+ * inside.
+ *
+ * \param[in] env execution environment
+ * \param[in] loghandle llog handle of the current llog
+ *
+ * \retval 0 on successful destroy
+ * \retval negative value on error
+ */
+static int llog_osd_destroy(const struct lu_env *env,
+ struct llog_handle *loghandle, struct thandle *th)
+{
+ struct llog_ctxt *ctxt;
+ struct dt_object *o, *llog_dir = NULL;
+ int rc;
+
+ ENTRY;
+
+ ctxt = loghandle->lgh_ctxt;
+ LASSERT(ctxt != NULL);
+
+ o = loghandle->lgh_obj;
+ LASSERT(o != NULL);
dt_write_lock(env, o, 0);
- if (dt_object_exists(o)) {
- if (name) {
- dt_read_lock(env, llog_dir, 0);
- rc = dt_delete(env, llog_dir,
- (struct dt_key *) name,
- th, BYPASS_CAPA);
- dt_read_unlock(env, llog_dir);
- if (rc) {
- CERROR("%s: can't remove llog %s: rc = %d\n",
- o->do_lu.lo_dev->ld_obd->obd_name,
- name, rc);
- GOTO(out_unlock, rc);
- }
+ if (!dt_object_exists(o))
+ GOTO(out_unlock, rc = 0);
+
+ if (loghandle->lgh_name) {
+ llog_dir = llog_osd_dir_get(env, ctxt);
+ if (IS_ERR(llog_dir))
+ GOTO(out_unlock, rc = PTR_ERR(llog_dir));
+
+ dt_read_lock(env, llog_dir, 0);
+ rc = dt_delete(env, llog_dir,
+ (struct dt_key *)loghandle->lgh_name,
+ th);
+ dt_read_unlock(env, llog_dir);
+ if (rc) {
+ CERROR("%s: can't remove llog %s: rc = %d\n",
+ o->do_lu.lo_dev->ld_obd->obd_name,
+ loghandle->lgh_name, rc);
+ GOTO(out_unlock, rc);
}
- dt_ref_del(env, o, th);
- rc = dt_destroy(env, o, th);
- if (rc)
+ }
+
+ dt_ref_del(env, o, th);
+ rc = dt_destroy(env, o, th);
+ if (rc < 0)
+ GOTO(out_unlock, rc);
+
+ if (loghandle->lgh_ctxt->loc_flags & LLOG_CTXT_FLAG_NORMAL_FID) {
+ rc = llog_osd_regular_fid_del_name_entry(env, o, th, false);
+ if (rc < 0)
GOTO(out_unlock, rc);
}
+
out_unlock:
dt_write_unlock(env, o);
-out_trans:
- dt_trans_stop(env, d, th);
- if (llog_dir != NULL)
+ if (!(IS_ERR_OR_NULL(llog_dir)))
lu_object_put(env, &llog_dir->do_lu);
RETURN(rc);
}
ctxt = llog_ctxt_get(olg->olg_ctxts[ctxt_idx]);
LASSERT(ctxt);
+ if (disk_obd == NULL)
+ GOTO(out, rc = 0);
+
/* initialize data allowing to generate new fids,
* literally we need a sequece */
lgi->lgi_fid.f_seq = FID_SEQ_LLOG;
.lop_next_block = llog_osd_next_block,
.lop_prev_block = llog_osd_prev_block,
.lop_read_header = llog_osd_read_header,
+ .lop_declare_destroy = llog_osd_declare_destroy,
.lop_destroy = llog_osd_destroy,
.lop_setup = llog_osd_setup,
.lop_cleanup = llog_osd_cleanup,
};
EXPORT_SYMBOL(llog_osd_ops);
+struct llog_operations llog_common_cat_ops = {
+ .lop_next_block = llog_osd_next_block,
+ .lop_prev_block = llog_osd_prev_block,
+ .lop_read_header = llog_osd_read_header,
+ .lop_declare_destroy = llog_osd_declare_destroy,
+ .lop_destroy = llog_osd_destroy,
+ .lop_setup = llog_osd_setup,
+ .lop_cleanup = llog_osd_cleanup,
+ .lop_open = llog_osd_open,
+ .lop_exist = llog_osd_exist,
+ .lop_declare_create = llog_osd_declare_create,
+ .lop_create = llog_osd_create,
+ .lop_declare_write_rec = llog_osd_declare_write_rec,
+ .lop_write_rec = llog_osd_write_rec,
+ .lop_close = llog_osd_close,
+ .lop_add = llog_cat_add_rec,
+ .lop_declare_add = llog_cat_declare_add_rec,
+};
+EXPORT_SYMBOL(llog_common_cat_ops);
+
/**
* Read the special file which contains the list of llog catalogs IDs
*
lgi->lgi_attr.la_mode = S_IFREG | S_IRUGO | S_IWUSR;
lgi->lgi_dof.dof_type = dt_mode_to_dft(S_IFREG);
+ th->th_wait_submit = 1;
+ /* Make the llog object creation synchronization, so
+ * it will be reliable to the reference, especially
+ * for remote reference */
+ th->th_sync = 1;
+
rc = dt_declare_create(env, o, &lgi->lgi_attr, NULL,
&lgi->lgi_dof, th);
if (rc)
GOTO(out, rc);
}
- rc = dt_attr_get(env, o, &lgi->lgi_attr, BYPASS_CAPA);
+ rc = dt_attr_get(env, o, &lgi->lgi_attr);
if (rc)
GOTO(out, rc);
lgi->lgi_buf.lb_buf = idarray;
lgi->lgi_buf.lb_len = size;
rc = dt_record_read(env, o, &lgi->lgi_buf, &lgi->lgi_off);
- if (rc) {
+ /* -EFAULT means the llog is a sparse file. This is not an error
+ * after arbitrary OST index is supported. */
+ if (rc < 0 && rc != -EFAULT) {
CERROR("%s: error reading CATALOGS: rc = %d\n",
o->do_lu.lo_dev->ld_obd->obd_name, rc);
GOTO(out, rc);
if (!dt_object_exists(o))
GOTO(out, rc = -ENOENT);
- rc = dt_attr_get(env, o, &lgi->lgi_attr, BYPASS_CAPA);
+ rc = dt_attr_get(env, o, &lgi->lgi_attr);
if (rc)
GOTO(out, rc);
lgi->lgi_buf.lb_buf = idarray;
rc = dt_declare_record_write(env, o, &lgi->lgi_buf, lgi->lgi_off, th);
if (rc)
- GOTO(out, rc);
+ GOTO(out_trans, rc);
+
+ /* For update log, this happens during initialization,
+ * see lod_sub_prep_llog(), and we need make sure catlog
+ * file ID is written to catlist file(committed) before
+ * cross-MDT operation write update records to catlog FILE,
+ * otherwise, during failover these update records might
+ * missing */
+ if (fid_is_update_log(fid))
+ th->th_sync = 1;
rc = dt_trans_start_local(env, d, th);
if (rc)
GOTO(out_trans, rc);
+ th->th_wait_submit = 1;
+
rc = dt_record_write(env, o, &lgi->lgi_buf, &lgi->lgi_off, th);
if (rc)
CDEBUG(D_INODE, "can't write CATALOGS at index %d: rc = %d\n",