X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fosp%2Fosp_sync.c;h=454732558e8837e762cc18abe9949ec48ad46a99;hp=fc19031940281fa0c35da3d7c05f7ba0fe69dbe2;hb=3cce65712d94cffe8f1626545845b95b88aef672;hpb=21f138ef4f2c95794e409304609ca39ea56aa1ae diff --git a/lustre/osp/osp_sync.c b/lustre/osp/osp_sync.c index fc19031..4547325 100644 --- a/lustre/osp/osp_sync.c +++ b/lustre/osp/osp_sync.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -27,7 +23,7 @@ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2012, 2013, Intel Corporation. + * Copyright (c) 2012, 2017, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -43,14 +39,11 @@ #define DEBUG_SUBSYSTEM S_MDS +#include #include +#include #include "osp_internal.h" -static int osp_sync_id_traction_init(struct osp_device *d); -static void osp_sync_id_traction_fini(struct osp_device *d); -static __u32 osp_sync_id_get(struct osp_device *d, __u32 id); -static void osp_sync_remove_from_tracker(struct osp_device *d); - /* * this is a components of OSP implementing synchronization between MDS and OST * it llogs all interesting changes (currently it's uig/gid change and object @@ -61,163 +54,344 @@ static void osp_sync_remove_from_tracker(struct osp_device *d); * the first queue is llog itself, once read a change is stored in 2nd queue * in form of RPC (but RPC isn't fired yet). * - * the second queue (opd_syn_waiting_for_commit) holds changes awaiting local + * the second queue (opd_sync_waiting_for_commit) holds changes awaiting local * commit. once change is committed locally it migrates onto 3rd queue. * - * the third queue (opd_syn_committed_here) holds changes committed locally, + * the third queue (opd_sync_committed_here) holds changes committed locally, * but not sent to OST (as the pipe can be full). once pipe becomes non-full * we take a change from the queue and fire corresponded RPC. * * once RPC is reported committed by OST (using regular last_committed mech.) - * the change jumps into 4th queue (opd_syn_committed_there), now we can + * the change jumps into 4th queue (opd_sync_committed_there), now we can * cancel corresponded llog record and release RPC * - * opd_syn_changes is a number of unread llog records (to be processed). + * opd_sync_changes is a number of unread llog records (to be processed). * notice this number doesn't include llog records from previous boots. - * with OSP_SYN_THRESHOLD we try to batch processing a bit (TO BE IMPLEMENTED) + * with OSP_SYNC_THRESHOLD we try to batch processing a bit (TO BE IMPLEMENTED) * - * opd_syn_rpc_in_progress is a number of requests in 2-4 queues. - * we control this with OSP_MAX_IN_PROGRESS so that OSP don't consume + * opd_sync_rpcs_in_progress is total number of requests in above 2-4 queues. + * we control this with OSP_MAX_RPCS_IN_PROGRESS so that OSP don't consume * too much memory -- how to deal with 1000th OSTs ? batching could help? * - * opd_syn_rpc_in_flight is a number of RPC in flight. - * we control this with OSP_MAX_IN_FLIGHT + * opd_sync_rpcs_in_flight is a number of RPC in flight. + * we control this with OSP_MAX_RPCS_IN_FLIGHT */ /* XXX: do math to learn reasonable threshold * should it be ~ number of changes fitting bulk? */ -#define OSP_SYN_THRESHOLD 10 -#define OSP_MAX_IN_FLIGHT 8 -#define OSP_MAX_IN_PROGRESS 4096 +#define OSP_SYNC_THRESHOLD 10 +#define OSP_MAX_RPCS_IN_FLIGHT 8 +#define OSP_MAX_RPCS_IN_PROGRESS 4096 #define OSP_JOB_MAGIC 0x26112005 +struct osp_job_req_args { + /** bytes reserved for ptlrpc_replay_req() */ + struct ptlrpc_replay_async_args jra_raa; + struct list_head jra_committed_link; + struct list_head jra_in_flight_link; + struct llog_cookie jra_lcookie; + __u32 jra_magic; +}; + +static int osp_sync_add_commit_cb(const struct lu_env *env, + struct osp_device *d, struct thandle *th); + static inline int osp_sync_running(struct osp_device *d) { - return !!(d->opd_syn_thread.t_flags & SVC_RUNNING); + return !!(d->opd_sync_thread.t_flags & SVC_RUNNING); } +/** + * Check status: whether OSP thread has stopped + * + * \param[in] d OSP device + * + * \retval 0 still running + * \retval 1 stopped + */ static inline int osp_sync_stopped(struct osp_device *d) { - return !!(d->opd_syn_thread.t_flags & SVC_STOPPED); + return !!(d->opd_sync_thread.t_flags & SVC_STOPPED); } +/* + ** Check for new changes to sync + * + * \param[in] d OSP device + * + * \retval 1 there are changes + * \retval 0 there are no changes + */ static inline int osp_sync_has_new_job(struct osp_device *d) { - return ((d->opd_syn_last_processed_id < d->opd_syn_last_used_id) && - (d->opd_syn_last_processed_id < d->opd_syn_last_committed_id)) - || (d->opd_syn_prev_done == 0); + return atomic_read(&d->opd_sync_changes) > 0 || + d->opd_sync_prev_done == 0; +} + +static inline int osp_sync_in_flight_conflict(struct osp_device *d, + struct llog_rec_hdr *h) +{ + struct osp_job_req_args *jra; + struct ost_id ostid; + int conflict = 0; + + if (h == NULL || h->lrh_type == LLOG_GEN_REC || + list_empty(&d->opd_sync_in_flight_list)) + return conflict; + + memset(&ostid, 0, sizeof(ostid)); + switch (h->lrh_type) { + case MDS_UNLINK_REC: { + struct llog_unlink_rec *unlink = (struct llog_unlink_rec *)h; + + ostid_set_seq(&ostid, unlink->lur_oseq); + if (ostid_set_id(&ostid, unlink->lur_oid)) { + CERROR("Bad %llu to set " DOSTID "\n", + (unsigned long long)(unlink->lur_oid), + POSTID(&ostid)); + return 1; + } + } + break; + case MDS_UNLINK64_REC: + fid_to_ostid(&((struct llog_unlink64_rec *)h)->lur_fid, &ostid); + break; + case MDS_SETATTR64_REC: + ostid = ((struct llog_setattr64_rec *)h)->lsr_oi; + break; + default: + LBUG(); + } + + spin_lock(&d->opd_sync_lock); + list_for_each_entry(jra, &d->opd_sync_in_flight_list, + jra_in_flight_link) { + struct ptlrpc_request *req; + struct ost_body *body; + + LASSERT(jra->jra_magic == OSP_JOB_MAGIC); + + req = container_of((void *)jra, struct ptlrpc_request, + rq_async_args); + body = req_capsule_client_get(&req->rq_pill, + &RMF_OST_BODY); + LASSERT(body); + + if (memcmp(&ostid, &body->oa.o_oi, sizeof(ostid)) == 0) { + conflict = 1; + break; + } + } + spin_unlock(&d->opd_sync_lock); + + return conflict; } -static inline int osp_sync_low_in_progress(struct osp_device *d) +static inline int osp_sync_rpcs_in_progress_low(struct osp_device *d) { - return d->opd_syn_rpc_in_progress < d->opd_syn_max_rpc_in_progress; + return atomic_read(&d->opd_sync_rpcs_in_progress) < + d->opd_sync_max_rpcs_in_progress; } -static inline int osp_sync_low_in_flight(struct osp_device *d) +/** + * Check for room in the network pipe to OST + * + * \param[in] d OSP device + * + * \retval 1 there is room + * \retval 0 no room, the pipe is full + */ +static inline int osp_sync_rpcs_in_flight_low(struct osp_device *d) { - return d->opd_syn_rpc_in_flight < d->opd_syn_max_rpc_in_flight; + return atomic_read(&d->opd_sync_rpcs_in_flight) < + d->opd_sync_max_rpcs_in_flight; } -static inline int osp_sync_has_work(struct osp_device *d) +/** + * Wake up check for the main sync thread + * + * \param[in] d OSP device + * + * \retval 1 time to wake up + * \retval 0 no need to wake up + */ +static inline int osp_sync_has_work(struct osp_device *osp) { /* has new/old changes and low in-progress? */ - if (osp_sync_has_new_job(d) && osp_sync_low_in_progress(d) && - osp_sync_low_in_flight(d) && d->opd_imp_connected) + if (osp_sync_has_new_job(osp) && osp_sync_rpcs_in_progress_low(osp) && + osp_sync_rpcs_in_flight_low(osp) && osp->opd_imp_connected) return 1; /* has remotely committed? */ - if (!cfs_list_empty(&d->opd_syn_committed_there)) + if (!list_empty(&osp->opd_sync_committed_there)) return 1; return 0; } -#define osp_sync_check_for_work(d) \ -{ \ - if (osp_sync_has_work(d)) { \ - wake_up(&d->opd_syn_waitq); \ - } \ +void osp_sync_check_for_work(struct osp_device *osp) +{ + if (osp_sync_has_work(osp)) + wake_up(&osp->opd_sync_waitq); } -void __osp_sync_check_for_work(struct osp_device *d) +static inline __u64 osp_sync_correct_id(struct osp_device *d, + struct llog_rec_hdr *rec) { - osp_sync_check_for_work(d); + /* + * llog use cyclic store with 32 bit lrh_id + * so overflow lrh_id is possible. Range between + * last_processed and last_committed is less than + * 64745 ^ 2 and less than 2^32 - 1 + */ + __u64 correct_id = d->opd_sync_last_committed_id; + + if ((correct_id & 0xffffffffULL) < rec->lrh_id) + correct_id -= 0x100000000ULL; + + correct_id &= ~0xffffffffULL; + correct_id |= rec->lrh_id; + + return correct_id; } +/** + * Check and return ready-for-new status. + * + * The thread processing llog record uses this function to check whether + * it's time to take another record and process it. The number of conditions + * must be met: the connection should be ready, RPCs in flight not exceeding + * the limit, the record is committed locally, etc (see the lines below). + * + * \param[in] d OSP device + * \param[in] rec next llog record to process + * + * \retval 0 not ready + * \retval 1 ready + */ static inline int osp_sync_can_process_new(struct osp_device *d, struct llog_rec_hdr *rec) { LASSERT(d); - if (!osp_sync_low_in_progress(d)) + if (unlikely(atomic_read(&d->opd_sync_barrier) > 0)) return 0; - if (!osp_sync_low_in_flight(d)) + if (unlikely(osp_sync_in_flight_conflict(d, rec))) + return 0; + if (!osp_sync_rpcs_in_progress_low(d)) + return 0; + if (!osp_sync_rpcs_in_flight_low(d)) return 0; if (!d->opd_imp_connected) return 0; - if (d->opd_syn_prev_done == 0) + if (d->opd_sync_prev_done == 0) return 1; - if (d->opd_syn_changes == 0) + if (atomic_read(&d->opd_sync_changes) == 0) return 0; - if (rec == NULL || rec->lrh_id <= d->opd_syn_last_committed_id) + if (rec == NULL) + return 1; + /* notice "<" not "<=" */ + if (osp_sync_correct_id(d, rec) < d->opd_sync_last_committed_id) return 1; return 0; } +/** + * Declare intention to add a new change. + * + * With regard to OSD API, we have to declare any changes ahead. In this + * case we declare an intention to add a llog record representing the + * change on the local storage. + * + * \param[in] env LU environment provided by the caller + * \param[in] o OSP object + * \param[in] type type of change: MDS_UNLINK64_REC or MDS_SETATTR64_REC + * \param[in] th transaction handle (local) + * + * \retval 0 on success + * \retval negative negated errno on error + */ int osp_sync_declare_add(const struct lu_env *env, struct osp_object *o, - llog_op_type type, struct thandle *th) + enum llog_op_type type, struct thandle *th) { struct osp_thread_info *osi = osp_env_info(env); struct osp_device *d = lu2osp_dev(o->opo_obj.do_lu.lo_dev); struct llog_ctxt *ctxt; + struct thandle *storage_th; int rc; ENTRY; /* it's a layering violation, to access internals of th, * but we can do this as a sanity check, for a while */ - LASSERT(th->th_dev == d->opd_storage); + LASSERT(th->th_top != NULL); + storage_th = thandle_get_sub_by_dt(env, th->th_top, d->opd_storage); + if (IS_ERR(storage_th)) + RETURN(PTR_ERR(storage_th)); switch (type) { case MDS_UNLINK64_REC: osi->osi_hdr.lrh_len = sizeof(struct llog_unlink64_rec); break; case MDS_SETATTR64_REC: - osi->osi_hdr.lrh_len = sizeof(struct llog_setattr64_rec); + osi->osi_hdr.lrh_len = sizeof(struct llog_setattr64_rec_v2); break; default: LBUG(); } - /* we want ->dt_trans_start() to allocate per-thandle structure */ - th->th_tags |= LCT_OSP_THREAD; - ctxt = llog_get_context(d->opd_obd, LLOG_MDS_OST_ORIG_CTXT); LASSERT(ctxt); - rc = llog_declare_add(env, ctxt->loc_handle, &osi->osi_hdr, th); + rc = llog_declare_add(env, ctxt->loc_handle, &osi->osi_hdr, + storage_th); llog_ctxt_put(ctxt); RETURN(rc); } +/** + * Generate a llog record for a given change. + * + * Generates a llog record for the change passed. The change can be of two + * types: unlink and setattr. The record gets an ID which later will be + * used to track commit status of the change. For unlink changes, the caller + * can supply a starting FID and the count of the objects to destroy. For + * setattr the caller should apply attributes to apply. + * + * + * \param[in] env LU environment provided by the caller + * \param[in] d OSP device + * \param[in] fid fid of the object the change should be applied to + * \param[in] type type of change: MDS_UNLINK64_REC or MDS_SETATTR64_REC + * \param[in] count count of objects to destroy + * \param[in] th transaction handle (local) + * \param[in] attr attributes for setattr + * + * \retval 0 on success + * \retval negative negated errno on error + */ static int osp_sync_add_rec(const struct lu_env *env, struct osp_device *d, - const struct lu_fid *fid, llog_op_type type, + const struct lu_fid *fid, enum llog_op_type type, int count, struct thandle *th, const struct lu_attr *attr) { struct osp_thread_info *osi = osp_env_info(env); struct llog_ctxt *ctxt; - struct osp_txn_info *txn; + struct thandle *storage_th; + bool immediate_commit_cb = false; int rc; ENTRY; /* it's a layering violation, to access internals of th, * but we can do this as a sanity check, for a while */ - LASSERT(th->th_dev == d->opd_storage); + LASSERT(th->th_top != NULL); + storage_th = thandle_get_sub_by_dt(env, th->th_top, d->opd_storage); + if (IS_ERR(storage_th)) + RETURN(PTR_ERR(storage_th)); switch (type) { case MDS_UNLINK64_REC: @@ -235,43 +409,60 @@ static int osp_sync_add_rec(const struct lu_env *env, struct osp_device *d, LASSERT(attr); osi->osi_setattr.lsr_uid = attr->la_uid; osi->osi_setattr.lsr_gid = attr->la_gid; + osi->osi_setattr.lsr_layout_version = attr->la_layout_version; + osi->osi_setattr.lsr_projid = attr->la_projid; + osi->osi_setattr.lsr_valid = + ((attr->la_valid & LA_UID) ? OBD_MD_FLUID : 0) | + ((attr->la_valid & LA_GID) ? OBD_MD_FLGID : 0) | + ((attr->la_valid & LA_PROJID) ? OBD_MD_FLPROJID : 0); + if (attr->la_valid & LA_LAYOUT_VERSION) { + osi->osi_setattr.lsr_valid |= OBD_MD_LAYOUT_VERSION; + + /* FLR: the layout version has to be transferred to + * OST objects ASAP, otherwise clients will have to + * experience delay to be able to write OST objects. */ + immediate_commit_cb = true; + } break; default: LBUG(); } - txn = osp_txn_info(&th->th_ctx); - LASSERT(txn); - - txn->oti_current_id = osp_sync_id_get(d, txn->oti_current_id); - osi->osi_hdr.lrh_id = txn->oti_current_id; + /* we keep the same id, but increment it when the callback + * is registered, so that all records upto the one taken + * by the callback are subject to processing */ + spin_lock(&d->opd_sync_lock); + osi->osi_hdr.lrh_id = d->opd_sync_last_used_id; + spin_unlock(&d->opd_sync_lock); ctxt = llog_get_context(d->opd_obd, LLOG_MDS_OST_ORIG_CTXT); if (ctxt == NULL) RETURN(-ENOMEM); + rc = llog_add(env, ctxt->loc_handle, &osi->osi_hdr, &osi->osi_cookie, - NULL, th); + storage_th); llog_ctxt_put(ctxt); - CDEBUG(D_OTHER, "%s: new record "DOSTID":%lu/%lu: %d\n", - d->opd_obd->obd_name, POSTID(&osi->osi_cookie.lgc_lgl.lgl_oi), - (unsigned long) osi->osi_cookie.lgc_lgl.lgl_ogen, - (unsigned long) osi->osi_cookie.lgc_index, rc); - - if (rc > 0) - rc = 0; - - if (likely(rc == 0)) { - spin_lock(&d->opd_syn_lock); - d->opd_syn_changes++; - spin_unlock(&d->opd_syn_lock); + if (likely(rc >= 0)) { + CDEBUG(D_OTHER, "%s: new record "DFID":%x.%u: rc = %d\n", + d->opd_obd->obd_name, + PFID(&osi->osi_cookie.lgc_lgl.lgl_oi.oi_fid), + osi->osi_cookie.lgc_lgl.lgl_ogen, + osi->osi_cookie.lgc_index, rc); + atomic_inc(&d->opd_sync_changes); } - RETURN(rc); + if (immediate_commit_cb) + rc = osp_sync_add_commit_cb(env, d, th); + else + rc = osp_sync_add_commit_cb_1s(env, d, th); + + /* return 0 always here, error case just cause no llog record */ + RETURN(0); } int osp_sync_add(const struct lu_env *env, struct osp_object *o, - llog_op_type type, struct thandle *th, + enum llog_op_type type, struct thandle *th, const struct lu_attr *attr) { return osp_sync_add_rec(env, lu2osp_dev(o->opo_obj.do_lu.lo_dev), @@ -280,7 +471,7 @@ int osp_sync_add(const struct lu_env *env, struct osp_object *o, } int osp_sync_gap(const struct lu_env *env, struct osp_device *d, - struct lu_fid *fid, int lost, struct thandle *th) + struct lu_fid *fid, int lost, struct thandle *th) { return osp_sync_add_rec(env, d, fid, MDS_UNLINK64_REC, lost, th, NULL); } @@ -298,57 +489,79 @@ int osp_sync_gap(const struct lu_env *env, struct osp_device *d, * subsequent commit callback (at the most) */ -/* - * called for each atomic on-disk change (not once per transaction batch) - * and goes over the list - * XXX: should be optimized? - */ - /** - * called for each RPC reported committed + * ptlrpc commit callback. + * + * The callback is called by PTLRPC when a RPC is reported committed by the + * target (OST). We register the callback for the every RPC applying a change + * from the llog. This way we know then the llog records can be cancelled. + * Notice the callback can be called when OSP is finishing. We can detect this + * checking that actual transno in the request is less or equal of known + * committed transno (see osp_sync_process_committed() for the details). + * XXX: this is pretty expensive and can be improved later using batching. + * + * \param[in] req request */ static void osp_sync_request_commit_cb(struct ptlrpc_request *req) { struct osp_device *d = req->rq_cb_data; + struct osp_job_req_args *jra; - CDEBUG(D_HA, "commit req %p, transno "LPU64"\n", req, req->rq_transno); + CDEBUG(D_HA, "commit req %p, transno %llu\n", req, req->rq_transno); if (unlikely(req->rq_transno == 0)) return; - /* do not do any opd_dyn_rpc_* accounting here + /* do not do any opd_sync_rpcs_* accounting here * it's done in osp_sync_interpret sooner or later */ - LASSERT(d); - LASSERT(req->rq_svc_thread == (void *) OSP_JOB_MAGIC); - LASSERT(cfs_list_empty(&req->rq_exp_list)); + + jra = ptlrpc_req_async_args(req); + LASSERT(jra->jra_magic == OSP_JOB_MAGIC); + LASSERT(list_empty(&jra->jra_committed_link)); ptlrpc_request_addref(req); - spin_lock(&d->opd_syn_lock); - cfs_list_add(&req->rq_exp_list, &d->opd_syn_committed_there); - spin_unlock(&d->opd_syn_lock); + spin_lock(&d->opd_sync_lock); + list_add(&jra->jra_committed_link, &d->opd_sync_committed_there); + spin_unlock(&d->opd_sync_lock); /* XXX: some batching wouldn't hurt */ - wake_up(&d->opd_syn_waitq); + wake_up(&d->opd_sync_waitq); } +/** + * RPC interpretation callback. + * + * The callback is called by ptlrpc when RPC is replied. Now we have to decide + * whether we should: + * - put request on a special list to wait until it's committed by the target, + * if the request is successful + * - schedule llog record cancel if no target object is found + * - try later (essentially after reboot) in case of unexpected error + * + * \param[in] env LU environment provided by the caller + * \param[in] req request replied + * \param[in] aa callback data + * \param[in] rc result of RPC + * + * \retval 0 always + */ static int osp_sync_interpret(const struct lu_env *env, - struct ptlrpc_request *req, void *aa, int rc) + struct ptlrpc_request *req, void *args, int rc) { + struct osp_job_req_args *jra = args; struct osp_device *d = req->rq_cb_data; - if (req->rq_svc_thread != (void *) OSP_JOB_MAGIC) - DEBUG_REQ(D_ERROR, req, "bad magic %p\n", req->rq_svc_thread); - LASSERT(req->rq_svc_thread == (void *) OSP_JOB_MAGIC); + if (jra->jra_magic != OSP_JOB_MAGIC) { + DEBUG_REQ(D_ERROR, req, "bad magic %u\n", jra->jra_magic); + LBUG(); + } LASSERT(d); CDEBUG(D_HA, "reply req %p/%d, rc %d, transno %u\n", req, - cfs_atomic_read(&req->rq_refcount), + atomic_read(&req->rq_refcount), rc, (unsigned) req->rq_transno); - LASSERT(rc || req->rq_transno); - - LASSERT(d->opd_pre != NULL); if (rc == -ENOENT) { /* @@ -356,37 +569,37 @@ static int osp_sync_interpret(const struct lu_env *env, * but object doesn't exist anymore - cancell llog record */ LASSERT(req->rq_transno == 0); - LASSERT(cfs_list_empty(&req->rq_exp_list)); + LASSERT(list_empty(&jra->jra_committed_link)); ptlrpc_request_addref(req); - spin_lock(&d->opd_syn_lock); - cfs_list_add(&req->rq_exp_list, &d->opd_syn_committed_there); - spin_unlock(&d->opd_syn_lock); + spin_lock(&d->opd_sync_lock); + list_add(&jra->jra_committed_link, + &d->opd_sync_committed_there); + spin_unlock(&d->opd_sync_lock); - wake_up(&d->opd_syn_waitq); + wake_up(&d->opd_sync_waitq); } else if (rc) { struct obd_import *imp = req->rq_import; /* * error happened, we'll try to repeat on next boot ? */ - LASSERTF(req->rq_transno == 0 || + LASSERTF(req->rq_transno == 0 || rc == -EIO || req->rq_import_generation < imp->imp_generation, - "transno "LPU64", rc %d, gen: req %d, imp %d\n", + "transno %llu, rc %d, gen: req %d, imp %d\n", req->rq_transno, rc, req->rq_import_generation, imp->imp_generation); if (req->rq_transno == 0) { /* this is the last time we see the request * if transno is not zero, then commit cb * will be called at some point */ - LASSERT(d->opd_syn_rpc_in_progress > 0); - spin_lock(&d->opd_syn_lock); - d->opd_syn_rpc_in_progress--; - spin_unlock(&d->opd_syn_lock); + LASSERT(atomic_read(&d->opd_sync_rpcs_in_progress) > 0); + atomic_dec(&d->opd_sync_rpcs_in_progress); } - wake_up(&d->opd_syn_waitq); - } else if (unlikely(d->opd_pre_status == -ENOSPC)) { + wake_up(&d->opd_sync_waitq); + } else if (d->opd_pre != NULL && + unlikely(d->opd_pre_status == -ENOSPC)) { /* * if current status is -ENOSPC (lack of free space on OST) * then we should poll OST immediately once object destroy @@ -395,13 +608,16 @@ static int osp_sync_interpret(const struct lu_env *env, osp_statfs_need_now(d); } - LASSERT(d->opd_syn_rpc_in_flight > 0); - spin_lock(&d->opd_syn_lock); - d->opd_syn_rpc_in_flight--; - spin_unlock(&d->opd_syn_lock); + spin_lock(&d->opd_sync_lock); + list_del_init(&jra->jra_in_flight_link); + spin_unlock(&d->opd_sync_lock); + LASSERT(atomic_read(&d->opd_sync_rpcs_in_flight) > 0); + atomic_dec(&d->opd_sync_rpcs_in_flight); + if (unlikely(atomic_read(&d->opd_sync_barrier) > 0)) + wake_up(&d->opd_sync_barrier_waitq); CDEBUG(D_OTHER, "%s: %d in flight, %d in progress\n", - d->opd_obd->obd_name, d->opd_syn_rpc_in_flight, - d->opd_syn_rpc_in_progress); + d->opd_obd->obd_name, atomic_read(&d->opd_sync_rpcs_in_flight), + atomic_read(&d->opd_sync_rpcs_in_progress)); osp_sync_check_for_work(d); @@ -409,32 +625,69 @@ static int osp_sync_interpret(const struct lu_env *env, } /* - * the function walks through list of committed locally changes - * and send them to RPC until the pipe is full + ** Add request to ptlrpc queue. + * + * This is just a tiny helper function to put the request on the sending list + * + * \param[in] d OSP device + * \param[in] llh llog handle where the record is stored + * \param[in] h llog record + * \param[in] req request */ static void osp_sync_send_new_rpc(struct osp_device *d, + struct llog_handle *llh, + struct llog_rec_hdr *h, struct ptlrpc_request *req) { - LASSERT(d->opd_syn_rpc_in_flight <= d->opd_syn_max_rpc_in_flight); - LASSERT(req->rq_svc_thread == (void *) OSP_JOB_MAGIC); - - ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1); + struct osp_job_req_args *jra; + + LASSERT(atomic_read(&d->opd_sync_rpcs_in_flight) <= + d->opd_sync_max_rpcs_in_flight); + + jra = ptlrpc_req_async_args(req); + jra->jra_magic = OSP_JOB_MAGIC; + jra->jra_lcookie.lgc_lgl = llh->lgh_id; + jra->jra_lcookie.lgc_subsys = LLOG_MDS_OST_ORIG_CTXT; + jra->jra_lcookie.lgc_index = h->lrh_index; + INIT_LIST_HEAD(&jra->jra_committed_link); + spin_lock(&d->opd_sync_lock); + list_add_tail(&jra->jra_in_flight_link, &d->opd_sync_in_flight_list); + spin_unlock(&d->opd_sync_lock); + + ptlrpcd_add_req(req); } + +/** + * Allocate and prepare RPC for a new change. + * + * The function allocates and initializes an RPC which will be sent soon to + * apply the change to the target OST. The request is initialized from the + * llog record passed. Notice only the fields common to all type of changes + * are initialized. + * + * \param[in] d OSP device + * \param[in] op type of the change + * \param[in] format request format to be used + * + * \retval pointer new request on success + * \retval ERR_PTR(errno) on error + */ static struct ptlrpc_request *osp_sync_new_job(struct osp_device *d, - struct llog_handle *llh, - struct llog_rec_hdr *h, - ost_cmd_t op, + enum ost_cmd op, const struct req_format *format) { struct ptlrpc_request *req; - struct ost_body *body; struct obd_import *imp; int rc; /* Prepare the request */ imp = d->opd_obd->u.cli.cl_import; LASSERT(imp); + + if (OBD_FAIL_CHECK(OBD_FAIL_OSP_CHECK_ENOMEM)) + RETURN(ERR_PTR(-ENOMEM)); + req = ptlrpc_request_alloc(imp, format); if (req == NULL) RETURN(ERR_PTR(-ENOMEM)); @@ -445,20 +698,6 @@ static struct ptlrpc_request *osp_sync_new_job(struct osp_device *d, return ERR_PTR(rc); } - /* - * this is a trick: to save on memory allocations we put cookie - * into the request, but don't set corresponded flag in o_valid - * so that OST doesn't interpret this cookie. once the request - * is committed on OST we take cookie from the request and cancel - */ - body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); - LASSERT(body); - body->oa.o_lcookie.lgc_lgl = llh->lgh_id; - body->oa.o_lcookie.lgc_subsys = LLOG_MDS_OST_ORIG_CTXT; - body->oa.o_lcookie.lgc_index = h->lrh_index; - CFS_INIT_LIST_HEAD(&req->rq_exp_list); - req->rq_svc_thread = (void *) OSP_JOB_MAGIC; - req->rq_interpret_reply = osp_sync_interpret; req->rq_commit_cb = osp_sync_request_commit_cb; req->rq_cb_data = d; @@ -468,6 +707,20 @@ static struct ptlrpc_request *osp_sync_new_job(struct osp_device *d, return req; } +/** + * Generate a request for setattr change. + * + * The function prepares a new RPC, initializes it with setattr specific + * bits and send the RPC. + * + * \param[in] d OSP device + * \param[in] llh llog handle where the record is stored + * \param[in] h llog record + * + * \retval 0 on success + * \retval 1 on invalid record + * \retval negative negated errno on error + */ static int osp_sync_new_setattr_job(struct osp_device *d, struct llog_handle *llh, struct llog_rec_hdr *h) @@ -479,7 +732,20 @@ static int osp_sync_new_setattr_job(struct osp_device *d, ENTRY; LASSERT(h->lrh_type == MDS_SETATTR64_REC); - req = osp_sync_new_job(d, llh, h, OST_SETATTR, &RQF_OST_SETATTR); + if (OBD_FAIL_CHECK(OBD_FAIL_OSP_CHECK_INVALID_REC)) + RETURN(1); + + /* lsr_valid can only be 0 or HAVE OBD_MD_{FLUID, FLGID, FLPROJID} set, + * so no bits other than these should be set. */ + if ((rec->lsr_valid & ~(OBD_MD_FLUID | OBD_MD_FLGID | + OBD_MD_FLPROJID | OBD_MD_LAYOUT_VERSION)) != 0) { + CERROR("%s: invalid setattr record, lsr_valid:%llu\n", + d->opd_obd->obd_name, rec->lsr_valid); + /* return 1 on invalid record */ + RETURN(1); + } + + req = osp_sync_new_job(d, OST_SETATTR, &RQF_OST_SETATTR); if (IS_ERR(req)) RETURN(PTR_ERR(req)); @@ -488,14 +754,45 @@ static int osp_sync_new_setattr_job(struct osp_device *d, body->oa.o_oi = rec->lsr_oi; body->oa.o_uid = rec->lsr_uid; body->oa.o_gid = rec->lsr_gid; - body->oa.o_valid = OBD_MD_FLGROUP | OBD_MD_FLID | - OBD_MD_FLUID | OBD_MD_FLGID; + body->oa.o_valid = OBD_MD_FLGROUP | OBD_MD_FLID; + if (h->lrh_len > sizeof(struct llog_setattr64_rec)) { + struct llog_setattr64_rec_v2 *rec_v2 = (typeof(rec_v2))rec; + body->oa.o_projid = rec_v2->lsr_projid; + body->oa.o_layout_version = rec_v2->lsr_layout_version; + } + + /* old setattr record (prior 2.6.0) doesn't have 'valid' stored, + * we assume that both UID and GID are valid in that case. */ + if (rec->lsr_valid == 0) + body->oa.o_valid |= (OBD_MD_FLUID | OBD_MD_FLGID); + else + body->oa.o_valid |= rec->lsr_valid; + + if (body->oa.o_valid & OBD_MD_LAYOUT_VERSION) { + OBD_FAIL_TIMEOUT(OBD_FAIL_FLR_LV_DELAY, cfs_fail_val); + if (unlikely(OBD_FAIL_CHECK(OBD_FAIL_FLR_LV_INC))) + ++body->oa.o_layout_version; + } - osp_sync_send_new_rpc(d, req); + osp_sync_send_new_rpc(d, llh, h, req); RETURN(0); } -/* Old records may be in old format, so we handle that too */ +/** + * Generate a request for unlink change. + * + * The function prepares a new RPC, initializes it with unlink(destroy) + * specific bits and sends the RPC. The function is used to handle + * llog_unlink_rec which were used in the older versions of Lustre. + * Current version uses llog_unlink_rec64. + * + * \param[in] d OSP device + * \param[in] llh llog handle where the record is stored + * \param[in] h llog record + * + * \retval 0 on success + * \retval negative negated errno on error + */ static int osp_sync_new_unlink_job(struct osp_device *d, struct llog_handle *llh, struct llog_rec_hdr *h) @@ -503,40 +800,60 @@ static int osp_sync_new_unlink_job(struct osp_device *d, struct llog_unlink_rec *rec = (struct llog_unlink_rec *)h; struct ptlrpc_request *req; struct ost_body *body; + int rc; ENTRY; LASSERT(h->lrh_type == MDS_UNLINK_REC); - req = osp_sync_new_job(d, llh, h, OST_DESTROY, &RQF_OST_DESTROY); + req = osp_sync_new_job(d, OST_DESTROY, &RQF_OST_DESTROY); if (IS_ERR(req)) RETURN(PTR_ERR(req)); body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); LASSERT(body); ostid_set_seq(&body->oa.o_oi, rec->lur_oseq); - ostid_set_id(&body->oa.o_oi, rec->lur_oid); + rc = ostid_set_id(&body->oa.o_oi, rec->lur_oid); + if (rc) + return rc; body->oa.o_misc = rec->lur_count; body->oa.o_valid = OBD_MD_FLGROUP | OBD_MD_FLID; if (rec->lur_count) body->oa.o_valid |= OBD_MD_FLOBJCOUNT; - osp_sync_send_new_rpc(d, req); + osp_sync_send_new_rpc(d, llh, h, req); RETURN(0); } +/** + * Generate a request for unlink change. + * + * The function prepares a new RPC, initializes it with unlink(destroy) + * specific bits and sends the RPC. Depending on the target (MDT or OST) + * two different protocols are used. For MDT we use OUT (basically OSD API + * updates transferred via a network). For OST we still use the old + * protocol (OBD?), originally for compatibility. Later we can start to + * use OUT for OST as well, this will allow batching and better code + * unification. + * + * \param[in] d OSP device + * \param[in] llh llog handle where the record is stored + * \param[in] h llog record + * + * \retval 0 on success + * \retval negative negated errno on error + */ static int osp_sync_new_unlink64_job(struct osp_device *d, struct llog_handle *llh, struct llog_rec_hdr *h) { struct llog_unlink64_rec *rec = (struct llog_unlink64_rec *)h; - struct ptlrpc_request *req; + struct ptlrpc_request *req = NULL; struct ost_body *body; int rc; ENTRY; LASSERT(h->lrh_type == MDS_UNLINK64_REC); - - req = osp_sync_new_job(d, llh, h, OST_DESTROY, &RQF_OST_DESTROY); + req = osp_sync_new_job(d, OST_DESTROY, &RQF_OST_DESTROY); if (IS_ERR(req)) RETURN(PTR_ERR(req)); @@ -547,40 +864,63 @@ static int osp_sync_new_unlink64_job(struct osp_device *d, if (rc < 0) RETURN(rc); body->oa.o_misc = rec->lur_count; - body->oa.o_valid = OBD_MD_FLGROUP | OBD_MD_FLID | OBD_MD_FLOBJCOUNT; - - osp_sync_send_new_rpc(d, req); + body->oa.o_valid = OBD_MD_FLGROUP | OBD_MD_FLID | + OBD_MD_FLOBJCOUNT; + osp_sync_send_new_rpc(d, llh, h, req); RETURN(0); } -static int osp_sync_process_record(const struct lu_env *env, - struct osp_device *d, - struct llog_handle *llh, - struct llog_rec_hdr *rec) +/** + * Process llog records. + * + * This function is called to process the llog records committed locally. + * In the recovery model used by OSP we can apply a change to a remote + * target once corresponding transaction (like posix unlink) is committed + * locally so can't revert. + * Depending on the llog record type, a given handler is called that is + * responsible for preparing and sending the RPC to apply the change. + * Special record type LLOG_GEN_REC marking a reboot is cancelled right away. + * + * \param[in] env LU environment provided by the caller + * \param[in] d OSP device + * \param[in] llh llog handle where the record is stored + * \param[in] rec llog record + */ +static void osp_sync_process_record(const struct lu_env *env, + struct osp_device *d, + struct llog_handle *llh, + struct llog_rec_hdr *rec) { + struct llog_handle *cathandle = llh->u.phd.phd_cat_handle; struct llog_cookie cookie; int rc = 0; + ENTRY; + cookie.lgc_lgl = llh->lgh_id; cookie.lgc_subsys = LLOG_MDS_OST_ORIG_CTXT; cookie.lgc_index = rec->lrh_index; + d->opd_sync_last_catalog_idx = llh->lgh_hdr->llh_cat_idx; + if (unlikely(rec->lrh_type == LLOG_GEN_REC)) { struct llog_gen_rec *gen = (struct llog_gen_rec *)rec; /* we're waiting for the record generated by this instance */ - LASSERT(d->opd_syn_prev_done == 0); - if (!memcmp(&d->opd_syn_generation, &gen->lgr_gen, + LASSERT(d->opd_sync_prev_done == 0); + if (!memcmp(&d->opd_sync_generation, &gen->lgr_gen, sizeof(gen->lgr_gen))) { CDEBUG(D_HA, "processed all old entries\n"); - d->opd_syn_prev_done = 1; + d->opd_sync_prev_done = 1; } /* cancel any generation record */ - rc = llog_cat_cancel_records(env, llh->u.phd.phd_cat_handle, - 1, &cookie); + rc = llog_cat_cancel_records(env, cathandle, 1, &cookie); - return rc; + /* flush all pending records ASAP */ + osp_sync_force(env, d); + + RETURN_EXIT; } /* @@ -590,10 +930,8 @@ static int osp_sync_process_record(const struct lu_env *env, /* notice we increment counters before sending RPC, to be consistent * in RPC interpret callback which may happen very quickly */ - spin_lock(&d->opd_syn_lock); - d->opd_syn_rpc_in_flight++; - d->opd_syn_rpc_in_progress++; - spin_unlock(&d->opd_syn_lock); + atomic_inc(&d->opd_sync_rpcs_in_flight); + atomic_inc(&d->opd_sync_rpcs_in_progress); switch (rec->lrh_type) { /* case MDS_UNLINK_REC is kept for compatibility */ @@ -607,56 +945,75 @@ static int osp_sync_process_record(const struct lu_env *env, rc = osp_sync_new_setattr_job(d, llh, rec); break; default: - CERROR("unknown record type: %x\n", rec->lrh_type); - rc = -EINVAL; - break; + CERROR("%s: unknown record type: %x\n", d->opd_obd->obd_name, + rec->lrh_type); + /* treat "unknown record type" as "invalid" */ + rc = 1; + break; } - if (likely(rc == 0)) { - spin_lock(&d->opd_syn_lock); - if (d->opd_syn_prev_done) { - LASSERT(d->opd_syn_changes > 0); - LASSERT(rec->lrh_id <= d->opd_syn_last_committed_id); - /* - * NOTE: it's possible to meet same id if - * OST stores few stripes of same file - */ - if (rec->lrh_id > d->opd_syn_last_processed_id) - d->opd_syn_last_processed_id = rec->lrh_id; - - d->opd_syn_changes--; - } - CDEBUG(D_OTHER, "%s: %d in flight, %d in progress\n", - d->opd_obd->obd_name, d->opd_syn_rpc_in_flight, - d->opd_syn_rpc_in_progress); - spin_unlock(&d->opd_syn_lock); - } else { - spin_lock(&d->opd_syn_lock); - d->opd_syn_rpc_in_flight--; - d->opd_syn_rpc_in_progress--; - spin_unlock(&d->opd_syn_lock); + /* For all kinds of records, not matter successful or not, + * we should decrease changes and bump last_processed_id. + */ + if (d->opd_sync_prev_done) { + LASSERT(atomic_read(&d->opd_sync_changes) > 0); + atomic_dec(&d->opd_sync_changes); + wake_up(&d->opd_sync_barrier_waitq); + } + atomic64_inc(&d->opd_sync_processed_recs); + if (rc != 0) { + atomic_dec(&d->opd_sync_rpcs_in_flight); + atomic_dec(&d->opd_sync_rpcs_in_progress); } - CDEBUG(D_HA, "found record %x, %d, idx %u, id %u: %d\n", - rec->lrh_type, rec->lrh_len, rec->lrh_index, rec->lrh_id, rc); - return rc; + CDEBUG(D_OTHER, "%s: %d in flight, %d in progress\n", + d->opd_obd->obd_name, atomic_read(&d->opd_sync_rpcs_in_flight), + atomic_read(&d->opd_sync_rpcs_in_progress)); + + /* Delete the invalid record */ + if (rc == 1) { + rc = llog_cat_cancel_records(env, cathandle, 1, &cookie); + if (rc != 0) + CERROR("%s: can't delete invalid record: " + "fid = "DFID", rec_id = %u, rc = %d\n", + d->opd_obd->obd_name, + PFID(lu_object_fid(&cathandle->lgh_obj->do_lu)), + rec->lrh_id, rc); + } + + CDEBUG(D_HA, "found record %x, %d, idx %u, id %u\n", + rec->lrh_type, rec->lrh_len, rec->lrh_index, rec->lrh_id); + + RETURN_EXIT; } +/** + * Cancel llog records for the committed changes. + * + * The function walks through the list of the committed RPCs and cancels + * corresponding llog records. see osp_sync_request_commit_cb() for the + * details. + * + * \param[in] env LU environment provided by the caller + * \param[in] d OSP device + */ static void osp_sync_process_committed(const struct lu_env *env, struct osp_device *d) { struct obd_device *obd = d->opd_obd; struct obd_import *imp = obd->u.cli.cl_import; struct ost_body *body; - struct ptlrpc_request *req, *tmp; + struct ptlrpc_request *req; struct llog_ctxt *ctxt; struct llog_handle *llh; - cfs_list_t list; - int rc, done = 0; + int *arr; + struct list_head list, *le; + struct llog_logid lgid; + int rc, i, count = 0, done = 0; ENTRY; - if (cfs_list_empty(&d->opd_syn_committed_there)) + if (list_empty(&d->opd_sync_committed_there)) return; /* @@ -666,8 +1023,7 @@ static void osp_sync_process_committed(const struct lu_env *env, * notice: we do this upon commit as well because some backends * (like DMU) do not release space right away. */ - LASSERT(d->opd_pre != NULL); - if (unlikely(d->opd_pre_status == -ENOSPC)) + if (d->opd_pre != NULL && unlikely(d->opd_pre_status == -ENOSPC)) osp_statfs_need_now(d); /* @@ -682,57 +1038,103 @@ static void osp_sync_process_committed(const struct lu_env *env, llh = ctxt->loc_handle; LASSERT(llh); - CFS_INIT_LIST_HEAD(&list); - spin_lock(&d->opd_syn_lock); - cfs_list_splice(&d->opd_syn_committed_there, &list); - CFS_INIT_LIST_HEAD(&d->opd_syn_committed_there); - spin_unlock(&d->opd_syn_lock); - - cfs_list_for_each_entry_safe(req, tmp, &list, rq_exp_list) { - LASSERT(req->rq_svc_thread == (void *) OSP_JOB_MAGIC); - cfs_list_del_init(&req->rq_exp_list); - - body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); + INIT_LIST_HEAD(&list); + spin_lock(&d->opd_sync_lock); + list_splice(&d->opd_sync_committed_there, &list); + INIT_LIST_HEAD(&d->opd_sync_committed_there); + spin_unlock(&d->opd_sync_lock); + + list_for_each(le, &list) + count++; + if (count > 2) + OBD_ALLOC_WAIT(arr, sizeof(int) * count); + else + arr = NULL; + i = 0; + while (!list_empty(&list)) { + struct osp_job_req_args *jra; + + jra = list_entry(list.next, struct osp_job_req_args, + jra_committed_link); + LASSERT(jra->jra_magic == OSP_JOB_MAGIC); + list_del_init(&jra->jra_committed_link); + + req = container_of((void *)jra, struct ptlrpc_request, + rq_async_args); + body = req_capsule_client_get(&req->rq_pill, + &RMF_OST_BODY); LASSERT(body); - /* import can be closing, thus all commit cb's are * called we can check committness directly */ - if (req->rq_transno <= imp->imp_peer_committed_transno) { - rc = llog_cat_cancel_records(env, llh, 1, - &body->oa.o_lcookie); - if (rc) - CERROR("%s: can't cancel record: %d\n", - obd->obd_name, rc); + if (req->rq_import_generation == imp->imp_generation) { + if (arr && (!i || + !memcmp(&jra->jra_lcookie.lgc_lgl, &lgid, + sizeof(lgid)))) { + if (unlikely(!i)) + lgid = jra->jra_lcookie.lgc_lgl; + + arr[i++] = jra->jra_lcookie.lgc_index; + } else { + rc = llog_cat_cancel_records(env, llh, 1, + &jra->jra_lcookie); + if (rc) + CERROR("%s: can't cancel record: %d\n", + obd->obd_name, rc); + } } else { - DEBUG_REQ(D_HA, req, "not committed"); + DEBUG_REQ(D_OTHER, req, "imp_committed = %llu", + imp->imp_peer_committed_transno); } - ptlrpc_req_finished(req); done++; } + if (arr && i > 0) { + rc = llog_cat_cancel_arr_rec(env, llh, &lgid, i, arr); + + if (rc) + CERROR("%s: can't cancel %d records rc: %d\n", + obd->obd_name, i, rc); + else + CDEBUG(D_OTHER, "%s: massive records cancel id "DFID\ + " num %d\n", obd->obd_name, + PFID(&lgid.lgl_oi.oi_fid), i); + } + if (arr) + OBD_FREE(arr, sizeof(int) * count); llog_ctxt_put(ctxt); - LASSERT(d->opd_syn_rpc_in_progress >= done); - spin_lock(&d->opd_syn_lock); - d->opd_syn_rpc_in_progress -= done; - spin_unlock(&d->opd_syn_lock); - CDEBUG(D_OTHER, "%s: %d in flight, %d in progress\n", - d->opd_obd->obd_name, d->opd_syn_rpc_in_flight, - d->opd_syn_rpc_in_progress); + LASSERT(atomic_read(&d->opd_sync_rpcs_in_progress) >= done); + atomic_sub(done, &d->opd_sync_rpcs_in_progress); + CDEBUG(D_OTHER, "%s: %d in flight, %d in progress, done %d\n", + d->opd_obd->obd_name, atomic_read(&d->opd_sync_rpcs_in_flight), + atomic_read(&d->opd_sync_rpcs_in_progress), done); osp_sync_check_for_work(d); /* wake up the thread if requested to stop: * it might be waiting for in-progress to complete */ if (unlikely(osp_sync_running(d) == 0)) - wake_up(&d->opd_syn_waitq); + wake_up(&d->opd_sync_waitq); EXIT; } -/* - * this is where most of queues processing happens +/** + * The core of the syncing mechanism. + * + * This is a callback called by the llog processing function. Essentially it + * suspends llog processing until there is a record to process (it's supposed + * to be committed locally). The function handles RPCs committed by the target + * and cancels corresponding llog records. + * + * \param[in] env LU environment provided by the caller + * \param[in] llh llog handle we're processing + * \param[in] rec current llog record + * \param[in] data callback data containing a pointer to the device + * + * \retval 0 to ask the caller (llog_process()) to continue + * \retval LLOG_PROC_BREAK to ask the caller to break */ static int osp_sync_process_queues(const struct lu_env *env, struct llog_handle *llh, @@ -740,7 +1142,6 @@ static int osp_sync_process_queues(const struct lu_env *env, void *data) { struct osp_device *d = data; - int rc; do { struct l_wait_info lwi = { 0 }; @@ -758,50 +1159,30 @@ static int osp_sync_process_queues(const struct lu_env *env, if (osp_sync_can_process_new(d, rec)) { if (llh == NULL) { /* ask llog for another record */ - CDEBUG(D_HA, "%lu changes, %u in progress, %u in flight\n", - d->opd_syn_changes, - d->opd_syn_rpc_in_progress, - d->opd_syn_rpc_in_flight); + CDEBUG(D_HA, "%u changes, %u in progress," + " %u in flight\n", + atomic_read(&d->opd_sync_changes), + atomic_read(&d->opd_sync_rpcs_in_progress), + atomic_read(&d->opd_sync_rpcs_in_flight)); return 0; } - - /* - * try to send, in case of disconnection, suspend - * processing till we can send this request - */ - do { - rc = osp_sync_process_record(env, d, llh, rec); - /* - * XXX: probably different handling is needed - * for some bugs, like immediate exit or if - * OSP gets inactive - */ - if (rc) { - CERROR("can't send: %d\n", rc); - l_wait_event(d->opd_syn_waitq, - !osp_sync_running(d) || - osp_sync_has_work(d), - &lwi); - } - } while (rc != 0 && osp_sync_running(d)); - + osp_sync_process_record(env, d, llh, rec); llh = NULL; rec = NULL; } - if (d->opd_syn_last_processed_id == d->opd_syn_last_used_id) - osp_sync_remove_from_tracker(d); - - l_wait_event(d->opd_syn_waitq, + l_wait_event(d->opd_sync_waitq, !osp_sync_running(d) || osp_sync_can_process_new(d, rec) || - !cfs_list_empty(&d->opd_syn_committed_there), + !list_empty(&d->opd_sync_committed_there), &lwi); } while (1); } -/* - * this thread runs llog_cat_process() scanner calling our callback +/** + * OSP sync thread. + * + * This thread runs llog_cat_process() scanner calling our callback * to process llog records. in the callback we implement tricky * state machine as we don't want to start scanning of the llog again * and again, also we don't want to process too many records and send @@ -809,20 +1190,26 @@ static int osp_sync_process_queues(const struct lu_env *env, * being synced to OST) the callback can suspend awaiting for some * new conditions, like syncs completed. * - * in order to process llog records left by previous boots and to allow + * In order to process llog records left by previous boots and to allow * llog_process_thread() to find something (otherwise it'd just exit * immediately) we add a special GENERATATION record on each boot. + * + * \param[in] _arg a pointer to thread's arguments + * + * \retval 0 on success + * \retval negative negated errno on error */ static int osp_sync_thread(void *_arg) { struct osp_device *d = _arg; - struct ptlrpc_thread *thread = &d->opd_syn_thread; + struct ptlrpc_thread *thread = &d->opd_sync_thread; struct l_wait_info lwi = { 0 }; struct llog_ctxt *ctxt; struct obd_device *obd = d->opd_obd; struct llog_handle *llh; struct lu_env env; int rc, count; + bool wrapped; ENTRY; @@ -830,12 +1217,18 @@ static int osp_sync_thread(void *_arg) if (rc) { CERROR("%s: can't initialize env: rc = %d\n", obd->obd_name, rc); + + spin_lock(&d->opd_sync_lock); + thread->t_flags = SVC_STOPPED; + spin_unlock(&d->opd_sync_lock); + wake_up(&thread->t_ctl_waitq); + RETURN(rc); } - spin_lock(&d->opd_syn_lock); + spin_lock(&d->opd_sync_lock); thread->t_flags = SVC_RUNNING; - spin_unlock(&d->opd_syn_lock); + spin_unlock(&d->opd_sync_lock); wake_up(&thread->t_ctl_waitq); ctxt = llog_get_context(obd, LLOG_MDS_OST_ORIG_CTXT); @@ -851,46 +1244,84 @@ static int osp_sync_thread(void *_arg) GOTO(out, rc = -EINVAL); } - rc = llog_cat_process(&env, llh, osp_sync_process_queues, d, 0, 0); + /* + * Catalog processing stops when it processed last catalog record + * with index equal to the end of catalog bitmap. Or if it is wrapped, + * processing stops with index equal to the lgh_last_idx. We need to + * continue processing. + */ + d->opd_sync_last_catalog_idx = 0; + do { + int size; + + wrapped = (llh->lgh_hdr->llh_cat_idx >= llh->lgh_last_idx && + llh->lgh_hdr->llh_count > 1); + + rc = llog_cat_process(&env, llh, osp_sync_process_queues, d, + d->opd_sync_last_catalog_idx, 0); + + size = OBD_FAIL_PRECHECK(OBD_FAIL_CAT_RECORDS) ? + cfs_fail_val : (LLOG_HDR_BITMAP_SIZE(llh->lgh_hdr) - 1); + /* processing reaches catalog bottom */ + if (d->opd_sync_last_catalog_idx == size) + d->opd_sync_last_catalog_idx = LLOG_CAT_FIRST; + else if (wrapped) + /* If catalog is wrapped we can`t predict last index of + * processing because lgh_last_idx could be changed. + * Starting form the next one */ + d->opd_sync_last_catalog_idx++; + + } while (rc == 0 && (wrapped || + d->opd_sync_last_catalog_idx == LLOG_CAT_FIRST)); + + if (rc < 0) { + CERROR("%s: llog process with osp_sync_process_queues " + "failed: %d\n", d->opd_obd->obd_name, rc); + GOTO(close, rc); + } LASSERTF(rc == 0 || rc == LLOG_PROC_BREAK, - "%lu changes, %u in progress, %u in flight: %d\n", - d->opd_syn_changes, d->opd_syn_rpc_in_progress, - d->opd_syn_rpc_in_flight, rc); + "%u changes, %u in progress, %u in flight: %d\n", + atomic_read(&d->opd_sync_changes), + atomic_read(&d->opd_sync_rpcs_in_progress), + atomic_read(&d->opd_sync_rpcs_in_flight), rc); /* we don't expect llog_process_thread() to exit till umount */ LASSERTF(thread->t_flags != SVC_RUNNING, - "%lu changes, %u in progress, %u in flight\n", - d->opd_syn_changes, d->opd_syn_rpc_in_progress, - d->opd_syn_rpc_in_flight); + "%u changes, %u in progress, %u in flight\n", + atomic_read(&d->opd_sync_changes), + atomic_read(&d->opd_sync_rpcs_in_progress), + atomic_read(&d->opd_sync_rpcs_in_flight)); /* wait till all the requests are completed */ count = 0; - while (d->opd_syn_rpc_in_progress > 0) { + while (atomic_read(&d->opd_sync_rpcs_in_progress) > 0) { osp_sync_process_committed(&env, d); lwi = LWI_TIMEOUT(cfs_time_seconds(5), NULL, NULL); - rc = l_wait_event(d->opd_syn_waitq, - d->opd_syn_rpc_in_progress == 0, + rc = l_wait_event(d->opd_sync_waitq, + atomic_read(&d->opd_sync_rpcs_in_progress) == 0, &lwi); if (rc == -ETIMEDOUT) count++; LASSERTF(count < 10, "%s: %d %d %sempty\n", - d->opd_obd->obd_name, d->opd_syn_rpc_in_progress, - d->opd_syn_rpc_in_flight, - cfs_list_empty(&d->opd_syn_committed_there) ? "" :"!"); + d->opd_obd->obd_name, + atomic_read(&d->opd_sync_rpcs_in_progress), + atomic_read(&d->opd_sync_rpcs_in_flight), + list_empty(&d->opd_sync_committed_there) ? "" : "!"); } +close: llog_cat_close(&env, llh); rc = llog_cleanup(&env, ctxt); if (rc) CERROR("can't cleanup llog: %d\n", rc); out: - LASSERTF(d->opd_syn_rpc_in_progress == 0, - "%s: %d %d %sempty\n", - d->opd_obd->obd_name, d->opd_syn_rpc_in_progress, - d->opd_syn_rpc_in_flight, - cfs_list_empty(&d->opd_syn_committed_there) ? "" : "!"); + LASSERTF(atomic_read(&d->opd_sync_rpcs_in_progress) == 0, + "%s: %d %d %sempty\n", d->opd_obd->obd_name, + atomic_read(&d->opd_sync_rpcs_in_progress), + atomic_read(&d->opd_sync_rpcs_in_flight), + list_empty(&d->opd_sync_committed_there) ? "" : "!"); thread->t_flags = SVC_STOPPED; @@ -901,13 +1332,28 @@ out: RETURN(0); } +/** + * Initialize llog. + * + * Initializes the llog. Specific llog to be used depends on the type of the + * target OSP represents (OST or MDT). The function adds appends a new llog + * record to mark the place where the records associated with this boot + * start. + * + * \param[in] env LU environment provided by the caller + * \param[in] d OSP device + * + * \retval 0 on success + * \retval negative negated errno on error + */ static int osp_sync_llog_init(const struct lu_env *env, struct osp_device *d) { - struct osp_thread_info *osi = osp_env_info(env); - struct llog_handle *lgh = NULL; - struct obd_device *obd = d->opd_obd; - struct llog_ctxt *ctxt; - int rc; + struct osp_thread_info *osi = osp_env_info(env); + struct lu_fid *fid = &osi->osi_fid; + struct llog_handle *lgh = NULL; + struct obd_device *obd = d->opd_obd; + struct llog_ctxt *ctxt; + int rc; ENTRY; @@ -919,21 +1365,33 @@ static int osp_sync_llog_init(const struct lu_env *env, struct osp_device *d) OBD_SET_CTXT_MAGIC(&obd->obd_lvfs_ctxt); obd->obd_lvfs_ctxt.dt = d->opd_storage; + lu_local_obj_fid(fid, LLOG_CATALOGS_OID); + rc = llog_osd_get_cat_list(env, d->opd_storage, d->opd_index, 1, - &osi->osi_cid); - if (rc) { - CERROR("%s: can't get id from catalogs: rc = %d\n", - obd->obd_name, rc); - RETURN(rc); + &osi->osi_cid, fid); + if (rc < 0) { + if (rc != -EFAULT) { + CERROR("%s: can't get id from catalogs: rc = %d\n", + obd->obd_name, rc); + RETURN(rc); + } + + /* After sparse OST indices is supported, the CATALOG file + * may become a sparse file that results in failure on + * reading. Skip this error as the llog will be created + * later */ + memset(&osi->osi_cid, 0, sizeof(osi->osi_cid)); + rc = 0; } - CDEBUG(D_INFO, "%s: Init llog for %d - catid "DOSTID":%x\n", + CDEBUG(D_INFO, "%s: Init llog for %d - catid "DFID":%x\n", obd->obd_name, d->opd_index, - POSTID(&osi->osi_cid.lci_logid.lgl_oi), + PFID(&osi->osi_cid.lci_logid.lgl_oi.oi_fid), osi->osi_cid.lci_logid.lgl_ogen); - rc = llog_setup(env, obd, &obd->obd_olg, LLOG_MDS_OST_ORIG_CTXT, obd, - &osp_mds_ost_orig_logops); + rc = llog_setup(env, obd, &obd->obd_olg, LLOG_MDS_OST_ORIG_CTXT, + d->opd_storage->dd_lu_dev.ld_obd, + &llog_common_cat_ops); if (rc) RETURN(rc); @@ -960,12 +1418,12 @@ static int osp_sync_llog_init(const struct lu_env *env, struct osp_device *d) LASSERT(lgh != NULL); ctxt->loc_handle = lgh; - rc = llog_cat_init_and_process(env, lgh); + rc = llog_init_handle(env, lgh, LLOG_F_IS_CAT, NULL); if (rc) GOTO(out_close, rc); rc = llog_osd_put_cat_list(env, d->opd_storage, d->opd_index, 1, - &osi->osi_cid); + &osi->osi_cid, fid); if (rc) GOTO(out_close, rc); @@ -973,17 +1431,16 @@ static int osp_sync_llog_init(const struct lu_env *env, struct osp_device *d) * put a mark in the llog till which we'll be processing * old records restless */ - d->opd_syn_generation.mnt_cnt = cfs_time_current(); - d->opd_syn_generation.conn_cnt = cfs_time_current(); + d->opd_sync_generation.mnt_cnt = ktime_get_ns(); + d->opd_sync_generation.conn_cnt = ktime_get_ns(); osi->osi_hdr.lrh_type = LLOG_GEN_REC; osi->osi_hdr.lrh_len = sizeof(osi->osi_gen); - memcpy(&osi->osi_gen.lgr_gen, &d->opd_syn_generation, + memcpy(&osi->osi_gen.lgr_gen, &d->opd_sync_generation, sizeof(osi->osi_gen.lgr_gen)); - rc = llog_cat_add(env, lgh, &osi->osi_gen.lgr_hdr, &osi->osi_cookie, - NULL); + rc = llog_cat_add(env, lgh, &osi->osi_gen.lgr_hdr, &osi->osi_cookie); if (rc < 0) GOTO(out_close, rc); llog_ctxt_put(ctxt); @@ -995,29 +1452,58 @@ out_cleanup: RETURN(rc); } +/** + * Cleanup llog used for syncing. + * + * Closes and cleanups the llog. The function is called when the device is + * shutting down. + * + * \param[in] env LU environment provided by the caller + * \param[in] d OSP device + */ static void osp_sync_llog_fini(const struct lu_env *env, struct osp_device *d) { struct llog_ctxt *ctxt; ctxt = llog_get_context(d->opd_obd, LLOG_MDS_OST_ORIG_CTXT); - if (ctxt != NULL) + if (ctxt) { llog_cat_close(env, ctxt->loc_handle); - llog_cleanup(env, ctxt); + llog_cleanup(env, ctxt); + } } -/* - * initializes sync component of OSP +/** + * Initialization of the sync component of OSP. + * + * Initializes the llog and starts a new thread to handle the changes to + * the remote target (OST or MDT). + * + * \param[in] env LU environment provided by the caller + * \param[in] d OSP device + * + * \retval 0 on success + * \retval negative negated errno on error */ int osp_sync_init(const struct lu_env *env, struct osp_device *d) { struct l_wait_info lwi = { 0 }; + struct task_struct *task; int rc; ENTRY; - rc = osp_sync_id_traction_init(d); - if (rc) - RETURN(rc); + d->opd_sync_max_rpcs_in_flight = OSP_MAX_RPCS_IN_FLIGHT; + d->opd_sync_max_rpcs_in_progress = OSP_MAX_RPCS_IN_PROGRESS; + spin_lock_init(&d->opd_sync_lock); + init_waitqueue_head(&d->opd_sync_waitq); + init_waitqueue_head(&d->opd_sync_barrier_waitq); + thread_set_flags(&d->opd_sync_thread, SVC_INIT); + init_waitqueue_head(&d->opd_sync_thread.t_ctl_waitq); + INIT_LIST_HEAD(&d->opd_sync_in_flight_list); + INIT_LIST_HEAD(&d->opd_sync_committed_there); + + if (d->opd_storage->dd_rdonly) + RETURN(0); /* * initialize llog storing changes @@ -1032,199 +1518,157 @@ int osp_sync_init(const struct lu_env *env, struct osp_device *d) /* * Start synchronization thread */ - d->opd_syn_max_rpc_in_flight = OSP_MAX_IN_FLIGHT; - d->opd_syn_max_rpc_in_progress = OSP_MAX_IN_PROGRESS; - spin_lock_init(&d->opd_syn_lock); - init_waitqueue_head(&d->opd_syn_waitq); - init_waitqueue_head(&d->opd_syn_thread.t_ctl_waitq); - CFS_INIT_LIST_HEAD(&d->opd_syn_committed_there); - - rc = PTR_ERR(kthread_run(osp_sync_thread, d, - "osp-syn-%u", d->opd_index)); - if (IS_ERR_VALUE(rc)) { - CERROR("%s: can't start sync thread: rc = %d\n", + task = kthread_run(osp_sync_thread, d, "osp-syn-%u-%u", + d->opd_index, d->opd_group); + if (IS_ERR(task)) { + rc = PTR_ERR(task); + CERROR("%s: cannot start sync thread: rc = %d\n", d->opd_obd->obd_name, rc); GOTO(err_llog, rc); } - l_wait_event(d->opd_syn_thread.t_ctl_waitq, + l_wait_event(d->opd_sync_thread.t_ctl_waitq, osp_sync_running(d) || osp_sync_stopped(d), &lwi); RETURN(0); err_llog: osp_sync_llog_fini(env, d); err_id: - osp_sync_id_traction_fini(d); return rc; } +/** + * Stop the syncing thread. + * + * Asks the syncing thread to stop and wait until it's stopped. + * + * \param[in] d OSP device + * + * \retval 0 + */ int osp_sync_fini(struct osp_device *d) { - struct ptlrpc_thread *thread = &d->opd_syn_thread; + struct ptlrpc_thread *thread = &d->opd_sync_thread; ENTRY; - thread->t_flags = SVC_STOPPING; - wake_up(&d->opd_syn_waitq); - wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_STOPPED); - - /* - * unregister transaction callbacks only when sync thread - * has finished operations with llog - */ - osp_sync_id_traction_fini(d); + if (!thread_is_init(thread) && !thread_is_stopped(thread)) { + thread->t_flags = SVC_STOPPING; + wake_up(&d->opd_sync_waitq); + wait_event(thread->t_ctl_waitq, thread_is_stopped(thread)); + } RETURN(0); } -static DEFINE_MUTEX(osp_id_tracker_sem); -static CFS_LIST_HEAD(osp_id_tracker_list); +struct osp_last_committed_cb { + struct dt_txn_commit_cb ospc_cb; + struct osp_device *ospc_dev; + __u64 ospc_transno; +}; -static void osp_sync_tracker_commit_cb(struct thandle *th, void *cookie) +void osp_sync_local_commit_cb(struct lu_env *env, struct thandle *th, + struct dt_txn_commit_cb *dcb, int err) { - struct osp_id_tracker *tr = cookie; - struct osp_device *d; - struct osp_txn_info *txn; + struct osp_last_committed_cb *cb; + struct osp_device *d; - LASSERT(tr); + cb = container_of0(dcb, struct osp_last_committed_cb, ospc_cb); + d = cb->ospc_dev; - txn = osp_txn_info(&th->th_ctx); - if (txn == NULL || txn->oti_current_id < tr->otr_committed_id) - return; + CDEBUG(D_HA, "%s: %llu committed\n", d->opd_obd->obd_name, + cb->ospc_transno); - spin_lock(&tr->otr_lock); - if (likely(txn->oti_current_id > tr->otr_committed_id)) { - CDEBUG(D_OTHER, "committed: %u -> %u\n", - tr->otr_committed_id, txn->oti_current_id); - tr->otr_committed_id = txn->oti_current_id; + spin_lock(&d->opd_sync_lock); + if (cb->ospc_transno > d->opd_sync_last_committed_id) + d->opd_sync_last_committed_id = cb->ospc_transno; + spin_unlock(&d->opd_sync_lock); - cfs_list_for_each_entry(d, &tr->otr_wakeup_list, - opd_syn_ontrack) { - d->opd_syn_last_committed_id = tr->otr_committed_id; - wake_up(&d->opd_syn_waitq); - } - } - spin_unlock(&tr->otr_lock); + osp_sync_check_for_work(d); + lu_device_put(osp2lu_dev(d)); + if (atomic_dec_and_test(&d->opd_commits_registered)) + wake_up(&d->opd_sync_waitq); + + OBD_FREE_PTR(cb); } -static int osp_sync_id_traction_init(struct osp_device *d) +static int osp_sync_add_commit_cb(const struct lu_env *env, + struct osp_device *d, struct thandle *th) { - struct osp_id_tracker *tr, *found = NULL; - int rc = 0; + struct osp_last_committed_cb *cb; + struct dt_txn_commit_cb *dcb; + int rc = 0; + + OBD_ALLOC_PTR(cb); + if (cb == NULL) + return -ENOMEM; + cb->ospc_dev = d; + dcb = &cb->ospc_cb; + dcb->dcb_func = osp_sync_local_commit_cb; + spin_lock(&d->opd_sync_lock); + cb->ospc_transno = ++d->opd_sync_last_used_id; + spin_unlock(&d->opd_sync_lock); + + rc = dt_trans_cb_add(th, dcb); + CDEBUG(D_HA, "%s: add commit cb at %lluns, next at %lluns, rc = %d\n", + d->opd_obd->obd_name, ktime_get_ns(), + ktime_to_ns(d->opd_sync_next_commit_cb), rc); - LASSERT(d); - LASSERT(d->opd_storage); - LASSERT(d->opd_syn_tracker == NULL); - CFS_INIT_LIST_HEAD(&d->opd_syn_ontrack); - - mutex_lock(&osp_id_tracker_sem); - cfs_list_for_each_entry(tr, &osp_id_tracker_list, otr_list) { - if (tr->otr_dev == d->opd_storage) { - LASSERT(cfs_atomic_read(&tr->otr_refcount)); - cfs_atomic_inc(&tr->otr_refcount); - d->opd_syn_tracker = tr; - found = tr; - break; - } - } - - if (found == NULL) { - rc = -ENOMEM; - OBD_ALLOC_PTR(tr); - if (tr) { - d->opd_syn_tracker = tr; - spin_lock_init(&tr->otr_lock); - tr->otr_dev = d->opd_storage; - tr->otr_next_id = 1; - tr->otr_committed_id = 0; - cfs_atomic_set(&tr->otr_refcount, 1); - CFS_INIT_LIST_HEAD(&tr->otr_wakeup_list); - cfs_list_add(&tr->otr_list, &osp_id_tracker_list); - tr->otr_tx_cb.dtc_txn_commit = - osp_sync_tracker_commit_cb; - tr->otr_tx_cb.dtc_cookie = tr; - tr->otr_tx_cb.dtc_tag = LCT_MD_THREAD; - dt_txn_callback_add(d->opd_storage, &tr->otr_tx_cb); - rc = 0; - } - } - mutex_unlock(&osp_id_tracker_sem); + if (likely(rc == 0)) { + lu_device_get(osp2lu_dev(d)); + atomic_inc(&d->opd_commits_registered); + } else + OBD_FREE_PTR(cb); return rc; } -static void osp_sync_id_traction_fini(struct osp_device *d) +/* add the commit callback every second */ +int osp_sync_add_commit_cb_1s(const struct lu_env *env, struct osp_device *d, + struct thandle *th) { - struct osp_id_tracker *tr; + ktime_t now = ktime_get(); + bool add = false; - ENTRY; + /* fast path */ + if (ktime_before(now, d->opd_sync_next_commit_cb)) + return 0; - LASSERT(d); - tr = d->opd_syn_tracker; - if (tr == NULL) { - EXIT; - return; + spin_lock(&d->opd_sync_lock); + if (ktime_before(d->opd_sync_next_commit_cb, now)) { + add = true; + d->opd_sync_next_commit_cb = ktime_add_ns(now, NSEC_PER_SEC); } + spin_unlock(&d->opd_sync_lock); - osp_sync_remove_from_tracker(d); - - mutex_lock(&osp_id_tracker_sem); - if (cfs_atomic_dec_and_test(&tr->otr_refcount)) { - dt_txn_callback_del(d->opd_storage, &tr->otr_tx_cb); - LASSERT(cfs_list_empty(&tr->otr_wakeup_list)); - cfs_list_del(&tr->otr_list); - OBD_FREE_PTR(tr); - d->opd_syn_tracker = NULL; - } - mutex_unlock(&osp_id_tracker_sem); + if (!add) + return 0; - EXIT; + return osp_sync_add_commit_cb(env, d, th); } /* - * generates id for the tracker + * generate an empty transaction and hook the commit callback in + * then force transaction commit */ -static __u32 osp_sync_id_get(struct osp_device *d, __u32 id) -{ - struct osp_id_tracker *tr; - - tr = d->opd_syn_tracker; - LASSERT(tr); - - /* XXX: we can improve this introducing per-cpu preallocated ids? */ - spin_lock(&tr->otr_lock); - if (unlikely(tr->otr_next_id <= d->opd_syn_last_used_id)) { - spin_unlock(&tr->otr_lock); - CERROR("%s: next %u, last synced %lu\n", - d->opd_obd->obd_name, tr->otr_next_id, - d->opd_syn_last_used_id); - LBUG(); - } - - if (id == 0) - id = tr->otr_next_id++; - if (id > d->opd_syn_last_used_id) - d->opd_syn_last_used_id = id; - if (cfs_list_empty(&d->opd_syn_ontrack)) - cfs_list_add(&d->opd_syn_ontrack, &tr->otr_wakeup_list); - spin_unlock(&tr->otr_lock); - CDEBUG(D_OTHER, "new id %u\n", (unsigned) id); - - return id; -} - -static void osp_sync_remove_from_tracker(struct osp_device *d) +void osp_sync_force(const struct lu_env *env, struct osp_device *d) { - struct osp_id_tracker *tr; + struct thandle *th; + int rc; - tr = d->opd_syn_tracker; - LASSERT(tr); - - if (cfs_list_empty(&d->opd_syn_ontrack)) + th = dt_trans_create(env, d->opd_storage); + if (IS_ERR(th)) { + CERROR("%s: can't sync\n", d->opd_obd->obd_name); return; + } + rc = dt_trans_start_local(env, d->opd_storage, th); + if (rc == 0) { + CDEBUG(D_OTHER, "%s: sync forced, %d changes\n", + d->opd_obd->obd_name, + atomic_read(&d->opd_sync_changes)); + rc = osp_sync_add_commit_cb(env, d, th); + dt_trans_stop(env, d->opd_storage, th); + } - spin_lock(&tr->otr_lock); - cfs_list_del_init(&d->opd_syn_ontrack); - spin_unlock(&tr->otr_lock); + dt_commit_async(env, d->opd_storage); } -