X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fobdclass%2Fllog.c;h=43bf3965458ede127289cc4963de981d2f1a5d99;hp=d3b1e04d774986385751d5a20ddaf38bf6da5b5e;hb=0670d5aed457196121c843fd24877d3f2670d478;hpb=3442db6faf685fbdbd092bdfdc8d273e4990a141 diff --git a/lustre/obdclass/llog.c b/lustre/obdclass/llog.c index d3b1e04..43bf396 100644 --- a/lustre/obdclass/llog.c +++ b/lustre/obdclass/llog.c @@ -23,7 +23,7 @@ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2012, 2016, Intel Corporation. + * Copyright (c) 2012, 2017, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -47,6 +47,7 @@ #include #include #include +#include #include #include "llog_internal.h" /* @@ -63,6 +64,7 @@ static struct llog_handle *llog_alloc_handle(void) init_rwsem(&loghandle->lgh_lock); mutex_init(&loghandle->lgh_hdr_mutex); + init_rwsem(&loghandle->lgh_last_sem); INIT_LIST_HEAD(&loghandle->u.phd.phd_entry); atomic_set(&loghandle->lgh_refcount, 1); @@ -169,6 +171,9 @@ int llog_destroy(const struct lu_env *env, struct llog_handle *handle) dt = lu2dt_dev(handle->lgh_obj->do_lu.lo_dev); + if (unlikely(unlikely(dt->dd_rdonly))) + RETURN(-EROFS); + th = dt_trans_create(env, dt); if (IS_ERR(th)) RETURN(PTR_ERR(th)); @@ -191,38 +196,39 @@ out_trans: EXPORT_SYMBOL(llog_destroy); /* returns negative on error; 0 if success; 1 if success & log destroyed */ -int llog_cancel_rec(const struct lu_env *env, struct llog_handle *loghandle, - int index) +int llog_cancel_arr_rec(const struct lu_env *env, struct llog_handle *loghandle, + int num, int *index) { struct llog_thread_info *lgi = llog_info(env); struct dt_device *dt; - struct llog_log_hdr *llh = loghandle->lgh_hdr; + struct llog_log_hdr *llh; struct thandle *th; - int rc; + __u32 tmp_lgc_index; + int rc, i = 0; int rc1; bool subtract_count = false; ENTRY; - CDEBUG(D_RPCTRACE, "Canceling %d in log "DFID"\n", index, - PFID(&loghandle->lgh_id.lgl_oi.oi_fid)); - - if (index == 0) { - CERROR("Can't cancel index 0 which is header\n"); - RETURN(-EINVAL); - } - LASSERT(loghandle != NULL); LASSERT(loghandle->lgh_ctxt != NULL); LASSERT(loghandle->lgh_obj != NULL); + llh = loghandle->lgh_hdr; + + CDEBUG(D_RPCTRACE, "Canceling %d records, first %d in log "DFID"\n", + num, index[0], PFID(&loghandle->lgh_id.lgl_oi.oi_fid)); + dt = lu2dt_dev(loghandle->lgh_obj->do_lu.lo_dev); + if (unlikely(unlikely(dt->dd_rdonly))) + RETURN(0); + th = dt_trans_create(env, dt); if (IS_ERR(th)) RETURN(PTR_ERR(th)); - rc = llog_declare_write_rec(env, loghandle, &llh->llh_hdr, index, th); + rc = llog_declare_write_rec(env, loghandle, &llh->llh_hdr, 0, th); if (rc < 0) GOTO(out_trans, rc); @@ -240,19 +246,32 @@ int llog_cancel_rec(const struct lu_env *env, struct llog_handle *loghandle, down_write(&loghandle->lgh_lock); /* clear bitmap */ mutex_lock(&loghandle->lgh_hdr_mutex); - if (!ext2_clear_bit(index, LLOG_HDR_BITMAP(llh))) { - CDEBUG(D_RPCTRACE, "Catalog index %u already clear?\n", index); - GOTO(out_unlock, rc); + for (i = 0; i < num; ++i) { + if (index[i] == 0) { + CERROR("Can't cancel index 0 which is header\n"); + GOTO(out_unlock, rc = -EINVAL); + } + if (!ext2_clear_bit(index[i], LLOG_HDR_BITMAP(llh))) { + CDEBUG(D_RPCTRACE, "Catalog index %u already clear?\n", + index[i]); + GOTO(out_unlock, rc = -ENOENT); + } } - - loghandle->lgh_hdr->llh_count--; + loghandle->lgh_hdr->llh_count -= num; subtract_count = true; + + /* Since llog_process_thread use lgi_cookie, it`s better to save them + * and restore after using + */ + tmp_lgc_index = lgi->lgi_cookie.lgc_index; /* Pass this index to llog_osd_write_rec(), which will use the index * to only update the necesary bitmap. */ - lgi->lgi_cookie.lgc_index = index; + lgi->lgi_cookie.lgc_index = index[0]; /* update header */ - rc = llog_write_rec(env, loghandle, &llh->llh_hdr, &lgi->lgi_cookie, - LLOG_HEADER_IDX, th); + rc = llog_write_rec(env, loghandle, &llh->llh_hdr, (num != 1 ? NULL : + &lgi->lgi_cookie), LLOG_HEADER_IDX, th); + lgi->lgi_cookie.lgc_index = tmp_lgc_index; + if (rc != 0) GOTO(out_unlock, rc); @@ -285,15 +304,23 @@ out_trans: rc1 = dt_trans_stop(env, dt, th); if (rc == 0) rc = rc1; - if (rc < 0 && subtract_count) { + if (rc < 0) { mutex_lock(&loghandle->lgh_hdr_mutex); - loghandle->lgh_hdr->llh_count++; - ext2_set_bit(index, LLOG_HDR_BITMAP(llh)); + if (subtract_count) + loghandle->lgh_hdr->llh_count += num; + for (i = i - 1; i >= 0; i--) + ext2_set_bit(index[i], LLOG_HDR_BITMAP(llh)); mutex_unlock(&loghandle->lgh_hdr_mutex); } RETURN(rc); } +int llog_cancel_rec(const struct lu_env *env, struct llog_handle *loghandle, + int index) +{ + return llog_cancel_arr_rec(env, loghandle, 1, &index); +} + int llog_read_header(const struct lu_env *env, struct llog_handle *handle, const struct obd_uuid *uuid) { @@ -419,6 +446,7 @@ static int llog_process_thread(void *arg) struct llog_handle *loghandle = lpi->lpi_loghandle; struct llog_log_hdr *llh = loghandle->lgh_hdr; struct llog_process_cat_data *cd = lpi->lpi_catdata; + struct llog_thread_info *lti; char *buf; size_t chunk_size; __u64 cur_offset; @@ -432,6 +460,8 @@ static int llog_process_thread(void *arg) if (llh == NULL) RETURN(-EINVAL); + lti = lpi->lpi_env == NULL ? NULL : llog_info(lpi->lpi_env); + cur_offset = chunk_size = llh->llh_hdr.lrh_len; /* expect chunk_size to be power of two */ LASSERT(is_power_of_2(chunk_size)); @@ -457,6 +487,7 @@ static int llog_process_thread(void *arg) unsigned int buf_offset = 0; bool partial_chunk; int lh_last_idx; + int synced_idx = 0; /* skip records not set in bitmap */ while (index <= last_index && @@ -474,7 +505,8 @@ repeat: /* get the buf with our target record; avoid old garbage */ memset(buf, 0, chunk_size); /* the record index for outdated chunk data */ - lh_last_idx = loghandle->lgh_last_idx + 1; + /* it is safe to process buffer until saved lgh_last_idx */ + lh_last_idx = LLOG_HDR_TAIL(llh)->lrt_index; rc = llog_next_block(lpi->lpi_env, loghandle, &saved_index, index, &cur_offset, buf, chunk_size); if (repeated && rc) @@ -518,47 +550,49 @@ repeat: CDEBUG(D_OTHER, "after swabbing, type=%#x idx=%d\n", rec->lrh_type, rec->lrh_index); + if (index == (synced_idx + 1) && + synced_idx == LLOG_HDR_TAIL(llh)->lrt_index) + GOTO(out, rc = 0); + + if (OBD_FAIL_PRECHECK(OBD_FAIL_LLOG_PROCESS_TIMEOUT) && + cfs_fail_val == (unsigned int) + (loghandle->lgh_id.lgl_oi.oi.oi_id & + 0xFFFFFFFF)) { + OBD_RACE(OBD_FAIL_LLOG_PROCESS_TIMEOUT); + } + /* the bitmap could be changed during processing * records from the chunk. For wrapped catalog * it means we can read deleted record and try to - * process it. Check this case and reread the chunk. */ - - /* for partial chunk the end of it is zeroed, check - * for index 0 to distinguish it. */ - if ((partial_chunk && rec->lrh_index == 0) || - (index == lh_last_idx && - lh_last_idx != (loghandle->lgh_last_idx + 1))) { - /* concurrent llog_add() might add new records - * while llog_processing, check this is not - * the case and re-read the current chunk - * otherwise. */ - int records; - /* lgh_last_idx could be less then index - * for catalog, if catalog is wrapped */ - if ((index > loghandle->lgh_last_idx && - !(loghandle->lgh_hdr->llh_flags & - LLOG_F_IS_CAT)) || repeated || - (loghandle->lgh_obj != NULL && - dt_object_remote(loghandle->lgh_obj))) - GOTO(out, rc = 0); - /* <2 records means no more records - * if the last record we processed was - * the final one, then the underlying - * object might have been destroyed yet. - * we better don't access that.. */ - mutex_lock(&loghandle->lgh_hdr_mutex); - records = loghandle->lgh_hdr->llh_count; - mutex_unlock(&loghandle->lgh_hdr_mutex); - if (records <= 1) - GOTO(out, rc = 0); - CDEBUG(D_OTHER, "Re-read last llog buffer for " - "new records, index %u, last %u\n", - index, loghandle->lgh_last_idx); + * process it. Check this case and reread the chunk. + * It is safe to process to lh_last_idx, including + * lh_last_idx if it was synced. We can not do <= + * comparison, cause for wrapped catalog lgh_last_idx + * could be less than index. So we detect last index + * for processing as index == lh_last_idx+1. But when + * catalog is wrapped and full lgh_last_idx=llh_cat_idx, + * the first processing index is llh_cat_idx+1. + */ + + if ((index == lh_last_idx && synced_idx != index) || + (index == (lh_last_idx + 1) && + !(index == (llh->llh_cat_idx + 1) && + (llh->llh_flags & LLOG_F_IS_CAT))) || + (rec->lrh_index == 0 && !repeated)) { + /* save offset inside buffer for the re-read */ buf_offset = (char *)rec - (char *)buf; cur_offset = chunk_offset; repeated = true; + /* We need to be sure lgh_last_idx + * record was saved to disk + */ + down_read(&loghandle->lgh_last_sem); + synced_idx = LLOG_HDR_TAIL(llh)->lrt_index; + up_read(&loghandle->lgh_last_sem); + CDEBUG(D_OTHER, "synced_idx: %d\n", synced_idx); goto repeat; + } repeated = false; @@ -594,15 +628,42 @@ repeat: rec->lrh_index, rec->lrh_len, (int)(buf + chunk_size - (char *)rec)); - loghandle->lgh_cur_idx = rec->lrh_index; + /* lgh_cur_offset is used only at llog_test_3 */ loghandle->lgh_cur_offset = (char *)rec - (char *)buf + chunk_offset; /* if set, process the callback on this record */ if (ext2_test_bit(index, LLOG_HDR_BITMAP(llh))) { + struct llog_cookie *lgc; + __u64 tmp_off; + int tmp_idx; + + CDEBUG(D_OTHER, "index: %d, lh_last_idx: %d " + "synced_idx: %d lgh_last_idx: %d\n", + index, lh_last_idx, synced_idx, + loghandle->lgh_last_idx); + + if (lti != NULL) { + lgc = <i->lgi_cookie; + /* store lu_env for recursive calls */ + tmp_off = lgc->lgc_offset; + tmp_idx = lgc->lgc_index; + + lgc->lgc_offset = (char *)rec - + (char *)buf + chunk_offset; + lgc->lgc_index = rec->lrh_index; + } + /* using lu_env for passing record offset to + * llog_write through various callbacks */ rc = lpi->lpi_cb(lpi->lpi_env, loghandle, rec, lpi->lpi_cbdata); last_called_index = index; + + if (lti != NULL) { + lgc->lgc_offset = tmp_off; + lgc->lgc_index = tmp_idx; + } + if (rc == LLOG_PROC_BREAK) { GOTO(out, rc); } else if (rc == LLOG_DEL_RECORD) { @@ -1073,6 +1134,9 @@ int llog_open_create(const struct lu_env *env, struct llog_ctxt *ctxt, d = lu2dt_dev((*res)->lgh_obj->do_lu.lo_dev); + if (unlikely(unlikely(d->dd_rdonly))) + RETURN(-EROFS); + th = dt_trans_create(env, d); if (IS_ERR(th)) GOTO(out, rc = PTR_ERR(th)); @@ -1140,7 +1204,8 @@ int llog_write(const struct lu_env *env, struct llog_handle *loghandle, { struct dt_device *dt; struct thandle *th; - int rc; + bool need_cookie; + int rc; ENTRY; @@ -1150,6 +1215,9 @@ int llog_write(const struct lu_env *env, struct llog_handle *loghandle, dt = lu2dt_dev(loghandle->lgh_obj->do_lu.lo_dev); + if (unlikely(unlikely(dt->dd_rdonly))) + RETURN(-EROFS); + th = dt_trans_create(env, dt); if (IS_ERR(th)) RETURN(PTR_ERR(th)); @@ -1163,8 +1231,21 @@ int llog_write(const struct lu_env *env, struct llog_handle *loghandle, if (rc) GOTO(out_trans, rc); + need_cookie = !(idx == LLOG_HEADER_IDX || idx == LLOG_NEXT_IDX); + down_write(&loghandle->lgh_lock); - rc = llog_write_rec(env, loghandle, rec, NULL, idx, th); + if (need_cookie) { + struct llog_thread_info *lti = llog_info(env); + + /* cookie comes from llog_process_thread */ + rc = llog_write_rec(env, loghandle, rec, <i->lgi_cookie, + rec->lrh_index, th); + /* upper layer didn`t pass cookie so change rc */ + rc = (rc == 1 ? 0 : rc); + } else { + rc = llog_write_rec(env, loghandle, rec, NULL, idx, th); + } + up_write(&loghandle->lgh_lock); out_trans: dt_trans_stop(env, dt, th);