X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fosp%2Fosp_sync.c;h=4f777e861dbc0c061ce0aa8cca0fe4ce19a73999;hb=12a130c71b88351bc361d1f741cad0a4c1fbb257;hp=4c20df93ec7b3b370abb73a9261c682d067c9bb9;hpb=64df136d23a197ad1579877dbd063e789ae7b9d1;p=fs%2Flustre-release.git diff --git a/lustre/osp/osp_sync.c b/lustre/osp/osp_sync.c index 4c20df9..4f777e8 100644 --- a/lustre/osp/osp_sync.c +++ b/lustre/osp/osp_sync.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -27,7 +23,7 @@ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2012, 2014, Intel Corporation. + * Copyright (c) 2012, 2016, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -50,7 +46,7 @@ static int osp_sync_id_traction_init(struct osp_device *d); static void osp_sync_id_traction_fini(struct osp_device *d); -static __u32 osp_sync_id_get(struct osp_device *d, __u32 id); +static __u64 osp_sync_id_get(struct osp_device *d, __u64 id); static void osp_sync_remove_from_tracker(struct osp_device *d); /* @@ -100,6 +96,7 @@ struct osp_job_req_args { struct ptlrpc_replay_async_args jra_raa; struct list_head jra_committed_link; struct list_head jra_inflight_link; + struct llog_cookie jra_lcookie; __u32 jra_magic; }; @@ -188,7 +185,8 @@ static inline int osp_sync_inflight_conflict(struct osp_device *d, static inline int osp_sync_low_in_progress(struct osp_device *d) { - return d->opd_syn_rpc_in_progress < d->opd_syn_max_rpc_in_progress; + return atomic_read(&d->opd_syn_rpc_in_progress) < + d->opd_syn_max_rpc_in_progress; } /** @@ -201,7 +199,8 @@ static inline int osp_sync_low_in_progress(struct osp_device *d) */ static inline int osp_sync_low_in_flight(struct osp_device *d) { - return d->opd_syn_rpc_in_flight < d->opd_syn_max_rpc_in_flight; + return atomic_read(&d->opd_syn_rpc_in_flight) < + d->opd_syn_max_rpc_in_flight; } /** @@ -238,6 +237,25 @@ void __osp_sync_check_for_work(struct osp_device *d) osp_sync_check_for_work(d); } +static inline __u64 osp_sync_correct_id(struct osp_device *d, + struct llog_rec_hdr *rec) +{ + /* + * llog use cyclic store with 32 bit lrh_id + * so overflow lrh_id is possible. Range between + * last_processed and last_committed is less than + * 64745 ^ 2 and less than 2^32 - 1 + */ + __u64 correct_id = d->opd_syn_last_committed_id; + + if ((correct_id & 0xffffffffULL) < rec->lrh_id) + correct_id -= 0x100000000ULL; + + correct_id &= ~0xffffffffULL; + correct_id |= rec->lrh_id; + + return correct_id; +} /** * Check and return ready-for-new status. * @@ -269,9 +287,10 @@ static inline int osp_sync_can_process_new(struct osp_device *d, return 0; if (d->opd_syn_prev_done == 0) return 1; - if (d->opd_syn_changes == 0) + if (atomic_read(&d->opd_syn_changes) == 0) return 0; - if (rec == NULL || rec->lrh_id <= d->opd_syn_last_committed_id) + if (rec == NULL || + osp_sync_correct_id(d, rec) <= d->opd_syn_last_committed_id) return 1; return 0; } @@ -402,8 +421,7 @@ static int osp_sync_add_rec(const struct lu_env *env, struct osp_device *d, LASSERT(txn); txn->oti_current_id = osp_sync_id_get(d, txn->oti_current_id); - osi->osi_hdr.lrh_id = txn->oti_current_id; - + osi->osi_hdr.lrh_id = (txn->oti_current_id & 0xffffffffULL); ctxt = llog_get_context(d->opd_obd, LLOG_MDS_OST_ORIG_CTXT); if (ctxt == NULL) RETURN(-ENOMEM); @@ -418,9 +436,7 @@ static int osp_sync_add_rec(const struct lu_env *env, struct osp_device *d, POSTID(&osi->osi_cookie.lgc_lgl.lgl_oi), (unsigned long)osi->osi_cookie.lgc_lgl.lgl_ogen, (unsigned long)osi->osi_cookie.lgc_index, rc); - spin_lock(&d->opd_syn_lock); - d->opd_syn_changes++; - spin_unlock(&d->opd_syn_lock); + atomic_inc(&d->opd_syn_changes); } /* return 0 always here, error case just cause no llog record */ RETURN(0); @@ -472,7 +488,7 @@ static void osp_sync_request_commit_cb(struct ptlrpc_request *req) struct osp_device *d = req->rq_cb_data; struct osp_job_req_args *jra; - CDEBUG(D_HA, "commit req %p, transno "LPU64"\n", req, req->rq_transno); + CDEBUG(D_HA, "commit req %p, transno %llu\n", req, req->rq_transno); if (unlikely(req->rq_transno == 0)) return; @@ -551,17 +567,15 @@ static int osp_sync_interpret(const struct lu_env *env, */ LASSERTF(req->rq_transno == 0 || req->rq_import_generation < imp->imp_generation, - "transno "LPU64", rc %d, gen: req %d, imp %d\n", + "transno %llu, rc %d, gen: req %d, imp %d\n", req->rq_transno, rc, req->rq_import_generation, imp->imp_generation); if (req->rq_transno == 0) { /* this is the last time we see the request * if transno is not zero, then commit cb * will be called at some point */ - LASSERT(d->opd_syn_rpc_in_progress > 0); - spin_lock(&d->opd_syn_lock); - d->opd_syn_rpc_in_progress--; - spin_unlock(&d->opd_syn_lock); + LASSERT(atomic_read(&d->opd_syn_rpc_in_progress) > 0); + atomic_dec(&d->opd_syn_rpc_in_progress); } wake_up(&d->opd_syn_waitq); @@ -575,16 +589,16 @@ static int osp_sync_interpret(const struct lu_env *env, osp_statfs_need_now(d); } - LASSERT(d->opd_syn_rpc_in_flight > 0); spin_lock(&d->opd_syn_lock); - d->opd_syn_rpc_in_flight--; list_del_init(&jra->jra_inflight_link); spin_unlock(&d->opd_syn_lock); + LASSERT(atomic_read(&d->opd_syn_rpc_in_flight) > 0); + atomic_dec(&d->opd_syn_rpc_in_flight); if (unlikely(atomic_read(&d->opd_syn_barrier) > 0)) wake_up(&d->opd_syn_barrier_waitq); CDEBUG(D_OTHER, "%s: %d in flight, %d in progress\n", - d->opd_obd->obd_name, d->opd_syn_rpc_in_flight, - d->opd_syn_rpc_in_progress); + d->opd_obd->obd_name, atomic_read(&d->opd_syn_rpc_in_flight), + atomic_read(&d->opd_syn_rpc_in_progress)); osp_sync_check_for_work(d); @@ -597,17 +611,25 @@ static int osp_sync_interpret(const struct lu_env *env, * This is just a tiny helper function to put the request on the sending list * * \param[in] d OSP device + * \param[in] llh llog handle where the record is stored + * \param[in] h llog record * \param[in] req request */ static void osp_sync_send_new_rpc(struct osp_device *d, + struct llog_handle *llh, + struct llog_rec_hdr *h, struct ptlrpc_request *req) { struct osp_job_req_args *jra; - LASSERT(d->opd_syn_rpc_in_flight <= d->opd_syn_max_rpc_in_flight); + LASSERT(atomic_read(&d->opd_syn_rpc_in_flight) <= + d->opd_syn_max_rpc_in_flight); jra = ptlrpc_req_async_args(req); jra->jra_magic = OSP_JOB_MAGIC; + jra->jra_lcookie.lgc_lgl = llh->lgh_id; + jra->jra_lcookie.lgc_subsys = LLOG_MDS_OST_ORIG_CTXT; + jra->jra_lcookie.lgc_index = h->lrh_index; INIT_LIST_HEAD(&jra->jra_committed_link); spin_lock(&d->opd_syn_lock); list_add_tail(&jra->jra_inflight_link, &d->opd_syn_inflight_list); @@ -626,8 +648,6 @@ static void osp_sync_send_new_rpc(struct osp_device *d, * are initialized. * * \param[in] d OSP device - * \param[in] llh llog handle where the record is stored - * \param[in] h llog record * \param[in] op type of the change * \param[in] format request format to be used * @@ -635,13 +655,10 @@ static void osp_sync_send_new_rpc(struct osp_device *d, * \retval ERR_PTR(errno) on error */ static struct ptlrpc_request *osp_sync_new_job(struct osp_device *d, - struct llog_handle *llh, - struct llog_rec_hdr *h, ost_cmd_t op, const struct req_format *format) { struct ptlrpc_request *req; - struct ost_body *body; struct obd_import *imp; int rc; @@ -662,18 +679,6 @@ static struct ptlrpc_request *osp_sync_new_job(struct osp_device *d, return ERR_PTR(rc); } - /* - * this is a trick: to save on memory allocations we put cookie - * into the request, but don't set corresponded flag in o_valid - * so that OST doesn't interpret this cookie. once the request - * is committed on OST we take cookie from the request and cancel - */ - body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); - LASSERT(body); - body->oa.o_lcookie.lgc_lgl = llh->lgh_id; - body->oa.o_lcookie.lgc_subsys = LLOG_MDS_OST_ORIG_CTXT; - body->oa.o_lcookie.lgc_index = h->lrh_index; - req->rq_interpret_reply = osp_sync_interpret; req->rq_commit_cb = osp_sync_request_commit_cb; req->rq_cb_data = d; @@ -713,13 +718,13 @@ static int osp_sync_new_setattr_job(struct osp_device *d, /* lsr_valid can only be 0 or have OBD_MD_{FLUID,FLGID} set, * so no bits other than these should be set. */ if ((rec->lsr_valid & ~(OBD_MD_FLUID | OBD_MD_FLGID)) != 0) { - CERROR("%s: invalid setattr record, lsr_valid:"LPU64"\n", + CERROR("%s: invalid setattr record, lsr_valid:%llu\n", d->opd_obd->obd_name, rec->lsr_valid); /* return 1 on invalid record */ RETURN(1); } - req = osp_sync_new_job(d, llh, h, OST_SETATTR, &RQF_OST_SETATTR); + req = osp_sync_new_job(d, OST_SETATTR, &RQF_OST_SETATTR); if (IS_ERR(req)) RETURN(PTR_ERR(req)); @@ -736,7 +741,7 @@ static int osp_sync_new_setattr_job(struct osp_device *d, else body->oa.o_valid |= rec->lsr_valid; - osp_sync_send_new_rpc(d, req); + osp_sync_send_new_rpc(d, llh, h, req); RETURN(0); } @@ -766,7 +771,7 @@ static int osp_sync_new_unlink_job(struct osp_device *d, ENTRY; LASSERT(h->lrh_type == MDS_UNLINK_REC); - req = osp_sync_new_job(d, llh, h, OST_DESTROY, &RQF_OST_DESTROY); + req = osp_sync_new_job(d, OST_DESTROY, &RQF_OST_DESTROY); if (IS_ERR(req)) RETURN(PTR_ERR(req)); @@ -779,7 +784,7 @@ static int osp_sync_new_unlink_job(struct osp_device *d, if (rec->lur_count) body->oa.o_valid |= OBD_MD_FLOBJCOUNT; - osp_sync_send_new_rpc(d, req); + osp_sync_send_new_rpc(d, llh, h, req); RETURN(0); } @@ -812,8 +817,7 @@ static int osp_sync_new_unlink64_job(struct osp_device *d, ENTRY; LASSERT(h->lrh_type == MDS_UNLINK64_REC); - req = osp_sync_new_job(d, llh, h, OST_DESTROY, - &RQF_OST_DESTROY); + req = osp_sync_new_job(d, OST_DESTROY, &RQF_OST_DESTROY); if (IS_ERR(req)) RETURN(PTR_ERR(req)); @@ -826,7 +830,7 @@ static int osp_sync_new_unlink64_job(struct osp_device *d, body->oa.o_misc = rec->lur_count; body->oa.o_valid = OBD_MD_FLGROUP | OBD_MD_FLID | OBD_MD_FLOBJCOUNT; - osp_sync_send_new_rpc(d, req); + osp_sync_send_new_rpc(d, llh, h, req); RETURN(0); } @@ -885,10 +889,8 @@ static void osp_sync_process_record(const struct lu_env *env, /* notice we increment counters before sending RPC, to be consistent * in RPC interpret callback which may happen very quickly */ - spin_lock(&d->opd_syn_lock); - d->opd_syn_rpc_in_flight++; - d->opd_syn_rpc_in_progress++; - spin_unlock(&d->opd_syn_lock); + atomic_inc(&d->opd_syn_rpc_in_flight); + atomic_inc(&d->opd_syn_rpc_in_progress); switch (rec->lrh_type) { /* case MDS_UNLINK_REC is kept for compatibility */ @@ -909,32 +911,35 @@ static void osp_sync_process_record(const struct lu_env *env, break; } - spin_lock(&d->opd_syn_lock); - /* For all kinds of records, not matter successful or not, * we should decrease changes and bump last_processed_id. */ if (d->opd_syn_prev_done) { - LASSERT(d->opd_syn_changes > 0); - LASSERT(rec->lrh_id <= d->opd_syn_last_committed_id); + __u64 correct_id = osp_sync_correct_id(d, rec); + LASSERT(atomic_read(&d->opd_syn_changes) > 0); + LASSERT(correct_id <= d->opd_syn_last_committed_id); /* NOTE: it's possible to meet same id if * OST stores few stripes of same file */ - if (rec->lrh_id > d->opd_syn_last_processed_id) { - d->opd_syn_last_processed_id = rec->lrh_id; - wake_up(&d->opd_syn_barrier_waitq); + while (1) { + /* another thread may be trying to set new value */ + rmb(); + if (correct_id > d->opd_syn_last_processed_id) { + d->opd_syn_last_processed_id = correct_id; + wake_up(&d->opd_syn_barrier_waitq); + } else + break; } - d->opd_syn_changes--; + atomic_dec(&d->opd_syn_changes); } if (rc != 0) { - d->opd_syn_rpc_in_flight--; - d->opd_syn_rpc_in_progress--; + atomic_dec(&d->opd_syn_rpc_in_flight); + atomic_dec(&d->opd_syn_rpc_in_progress); } - CDEBUG(D_OTHER, "%s: %d in flight, %d in progress\n", - d->opd_obd->obd_name, d->opd_syn_rpc_in_flight, - d->opd_syn_rpc_in_progress); - spin_unlock(&d->opd_syn_lock); + CDEBUG(D_OTHER, "%s: %d in flight, %d in progress\n", + d->opd_obd->obd_name, atomic_read(&d->opd_syn_rpc_in_flight), + atomic_read(&d->opd_syn_rpc_in_progress)); /* Delete the invalid record */ if (rc == 1) { @@ -1025,12 +1030,12 @@ static void osp_sync_process_committed(const struct lu_env *env, * called we can check committness directly */ if (req->rq_import_generation == imp->imp_generation) { rc = llog_cat_cancel_records(env, llh, 1, - &body->oa.o_lcookie); + &jra->jra_lcookie); if (rc) CERROR("%s: can't cancel record: %d\n", obd->obd_name, rc); } else { - DEBUG_REQ(D_ERROR, req, "imp_committed = "LPU64, + DEBUG_REQ(D_OTHER, req, "imp_committed = %llu", imp->imp_peer_committed_transno); } ptlrpc_req_finished(req); @@ -1039,13 +1044,11 @@ static void osp_sync_process_committed(const struct lu_env *env, llog_ctxt_put(ctxt); - LASSERT(d->opd_syn_rpc_in_progress >= done); - spin_lock(&d->opd_syn_lock); - d->opd_syn_rpc_in_progress -= done; - spin_unlock(&d->opd_syn_lock); + LASSERT(atomic_read(&d->opd_syn_rpc_in_progress) >= done); + atomic_sub(done, &d->opd_syn_rpc_in_progress); CDEBUG(D_OTHER, "%s: %d in flight, %d in progress\n", - d->opd_obd->obd_name, d->opd_syn_rpc_in_flight, - d->opd_syn_rpc_in_progress); + d->opd_obd->obd_name, atomic_read(&d->opd_syn_rpc_in_flight), + atomic_read(&d->opd_syn_rpc_in_progress)); osp_sync_check_for_work(d); @@ -1096,11 +1099,11 @@ static int osp_sync_process_queues(const struct lu_env *env, if (osp_sync_can_process_new(d, rec)) { if (llh == NULL) { /* ask llog for another record */ - CDEBUG(D_HA, "%lu changes, %u in progress," + CDEBUG(D_HA, "%u changes, %u in progress," " %u in flight\n", - d->opd_syn_changes, - d->opd_syn_rpc_in_progress, - d->opd_syn_rpc_in_flight); + atomic_read(&d->opd_syn_changes), + atomic_read(&d->opd_syn_rpc_in_progress), + atomic_read(&d->opd_syn_rpc_in_flight)); return 0; } osp_sync_process_record(env, d, llh, rec); @@ -1156,6 +1159,12 @@ static int osp_sync_thread(void *_arg) if (rc) { CERROR("%s: can't initialize env: rc = %d\n", obd->obd_name, rc); + + spin_lock(&d->opd_syn_lock); + thread->t_flags = SVC_STOPPED; + spin_unlock(&d->opd_syn_lock); + wake_up(&thread->t_ctl_waitq); + RETURN(rc); } @@ -1178,44 +1187,53 @@ static int osp_sync_thread(void *_arg) } rc = llog_cat_process(&env, llh, osp_sync_process_queues, d, 0, 0); + if (rc < 0) { + CERROR("%s: llog process with osp_sync_process_queues " + "failed: %d\n", d->opd_obd->obd_name, rc); + GOTO(close, rc); + } LASSERTF(rc == 0 || rc == LLOG_PROC_BREAK, - "%lu changes, %u in progress, %u in flight: %d\n", - d->opd_syn_changes, d->opd_syn_rpc_in_progress, - d->opd_syn_rpc_in_flight, rc); + "%u changes, %u in progress, %u in flight: %d\n", + atomic_read(&d->opd_syn_changes), + atomic_read(&d->opd_syn_rpc_in_progress), + atomic_read(&d->opd_syn_rpc_in_flight), rc); /* we don't expect llog_process_thread() to exit till umount */ LASSERTF(thread->t_flags != SVC_RUNNING, - "%lu changes, %u in progress, %u in flight\n", - d->opd_syn_changes, d->opd_syn_rpc_in_progress, - d->opd_syn_rpc_in_flight); + "%u changes, %u in progress, %u in flight\n", + atomic_read(&d->opd_syn_changes), + atomic_read(&d->opd_syn_rpc_in_progress), + atomic_read(&d->opd_syn_rpc_in_flight)); /* wait till all the requests are completed */ count = 0; - while (d->opd_syn_rpc_in_progress > 0) { + while (atomic_read(&d->opd_syn_rpc_in_progress) > 0) { osp_sync_process_committed(&env, d); lwi = LWI_TIMEOUT(cfs_time_seconds(5), NULL, NULL); rc = l_wait_event(d->opd_syn_waitq, - d->opd_syn_rpc_in_progress == 0, + atomic_read(&d->opd_syn_rpc_in_progress) == 0, &lwi); if (rc == -ETIMEDOUT) count++; LASSERTF(count < 10, "%s: %d %d %sempty\n", - d->opd_obd->obd_name, d->opd_syn_rpc_in_progress, - d->opd_syn_rpc_in_flight, + d->opd_obd->obd_name, + atomic_read(&d->opd_syn_rpc_in_progress), + atomic_read(&d->opd_syn_rpc_in_flight), list_empty(&d->opd_syn_committed_there) ? "" : "!"); } +close: llog_cat_close(&env, llh); rc = llog_cleanup(&env, ctxt); if (rc) CERROR("can't cleanup llog: %d\n", rc); out: - LASSERTF(d->opd_syn_rpc_in_progress == 0, + LASSERTF(atomic_read(&d->opd_syn_rpc_in_progress) == 0, "%s: %d %d %sempty\n", - d->opd_obd->obd_name, d->opd_syn_rpc_in_progress, - d->opd_syn_rpc_in_flight, + d->opd_obd->obd_name, atomic_read(&d->opd_syn_rpc_in_progress), + atomic_read(&d->opd_syn_rpc_in_flight), list_empty(&d->opd_syn_committed_there) ? "" : "!"); thread->t_flags = SVC_STOPPED; @@ -1313,7 +1331,7 @@ static int osp_sync_llog_init(const struct lu_env *env, struct osp_device *d) LASSERT(lgh != NULL); ctxt->loc_handle = lgh; - rc = llog_cat_init_and_process(env, lgh); + rc = llog_init_handle(env, lgh, LLOG_F_IS_CAT, NULL); if (rc) GOTO(out_close, rc); @@ -1361,9 +1379,10 @@ static void osp_sync_llog_fini(const struct lu_env *env, struct osp_device *d) struct llog_ctxt *ctxt; ctxt = llog_get_context(d->opd_obd, LLOG_MDS_OST_ORIG_CTXT); - if (ctxt != NULL) + if (ctxt) { llog_cat_close(env, ctxt->loc_handle); - llog_cleanup(env, ctxt); + llog_cleanup(env, ctxt); + } } /** @@ -1386,6 +1405,19 @@ int osp_sync_init(const struct lu_env *env, struct osp_device *d) ENTRY; + d->opd_syn_max_rpc_in_flight = OSP_MAX_IN_FLIGHT; + d->opd_syn_max_rpc_in_progress = OSP_MAX_IN_PROGRESS; + spin_lock_init(&d->opd_syn_lock); + init_waitqueue_head(&d->opd_syn_waitq); + init_waitqueue_head(&d->opd_syn_barrier_waitq); + thread_set_flags(&d->opd_syn_thread, SVC_INIT); + init_waitqueue_head(&d->opd_syn_thread.t_ctl_waitq); + INIT_LIST_HEAD(&d->opd_syn_inflight_list); + INIT_LIST_HEAD(&d->opd_syn_committed_there); + + if (d->opd_storage->dd_rdonly) + RETURN(0); + rc = osp_sync_id_traction_init(d); if (rc) RETURN(rc); @@ -1403,15 +1435,6 @@ int osp_sync_init(const struct lu_env *env, struct osp_device *d) /* * Start synchronization thread */ - d->opd_syn_max_rpc_in_flight = OSP_MAX_IN_FLIGHT; - d->opd_syn_max_rpc_in_progress = OSP_MAX_IN_PROGRESS; - spin_lock_init(&d->opd_syn_lock); - init_waitqueue_head(&d->opd_syn_waitq); - init_waitqueue_head(&d->opd_syn_barrier_waitq); - init_waitqueue_head(&d->opd_syn_thread.t_ctl_waitq); - INIT_LIST_HEAD(&d->opd_syn_inflight_list); - INIT_LIST_HEAD(&d->opd_syn_committed_there); - task = kthread_run(osp_sync_thread, d, "osp-syn-%u-%u", d->opd_index, d->opd_group); if (IS_ERR(task)) { @@ -1447,9 +1470,11 @@ int osp_sync_fini(struct osp_device *d) ENTRY; - thread->t_flags = SVC_STOPPING; - wake_up(&d->opd_syn_waitq); - wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_STOPPED); + if (!thread_is_init(thread) && !thread_is_stopped(thread)) { + thread->t_flags = SVC_STOPPING; + wake_up(&d->opd_syn_waitq); + wait_event(thread->t_ctl_waitq, thread_is_stopped(thread)); + } /* * unregister transaction callbacks only when sync thread @@ -1487,7 +1512,7 @@ static void osp_sync_tracker_commit_cb(struct thandle *th, void *cookie) spin_lock(&tr->otr_lock); if (likely(txn->oti_current_id > tr->otr_committed_id)) { - CDEBUG(D_OTHER, "committed: %u -> %u\n", + CDEBUG(D_OTHER, "committed: %llu -> %llu\n", tr->otr_committed_id, txn->oti_current_id); tr->otr_committed_id = txn->oti_current_id; @@ -1607,14 +1632,14 @@ static void osp_sync_id_traction_fini(struct osp_device *d) * Generates a new ID using the tracker associated with the given OSP device * \a d, if the given ID \a id is non-zero. Unconditially adds OSP device to * the wakeup list, so OSP won't miss when a transaction using the ID is - * committed. Notice ID is 32bit, but llog doesn't support >2^32 records anyway. + * committed. * * \param[in] d OSP device * \param[in] id 0 or ID generated previously * * \retval ID the caller should use */ -static __u32 osp_sync_id_get(struct osp_device *d, __u32 id) +static __u64 osp_sync_id_get(struct osp_device *d, __u64 id) { struct osp_id_tracker *tr; @@ -1623,9 +1648,12 @@ static __u32 osp_sync_id_get(struct osp_device *d, __u32 id) /* XXX: we can improve this introducing per-cpu preallocated ids? */ spin_lock(&tr->otr_lock); + if (OBD_FAIL_CHECK(OBD_FAIL_MDS_TRACK_OVERFLOW)) + tr->otr_next_id = 0xfffffff0; + if (unlikely(tr->otr_next_id <= d->opd_syn_last_used_id)) { spin_unlock(&tr->otr_lock); - CERROR("%s: next %u, last synced %lu\n", + CERROR("%s: next %llu, last synced %llu\n", d->opd_obd->obd_name, tr->otr_next_id, d->opd_syn_last_used_id); LBUG(); @@ -1638,7 +1666,7 @@ static __u32 osp_sync_id_get(struct osp_device *d, __u32 id) if (list_empty(&d->opd_syn_ontrack)) list_add(&d->opd_syn_ontrack, &tr->otr_wakeup_list); spin_unlock(&tr->otr_lock); - CDEBUG(D_OTHER, "new id %u\n", (unsigned) id); + CDEBUG(D_OTHER, "new id %llu\n", id); return id; }