X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fosp%2Fosp_sync.c;h=454732558e8837e762cc18abe9949ec48ad46a99;hb=3cce65712d94cffe8f1626545845b95b88aef672;hp=083a751d158feb7dad84c06d27ce5613a981c98f;hpb=979203503af2f77d51bcf27375a1a09f5f28a4a3;p=fs%2Flustre-release.git diff --git a/lustre/osp/osp_sync.c b/lustre/osp/osp_sync.c index 083a751..4547325 100644 --- a/lustre/osp/osp_sync.c +++ b/lustre/osp/osp_sync.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -27,7 +23,7 @@ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2012, 2013, Intel Corporation. + * Copyright (c) 2012, 2017, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -43,15 +39,11 @@ #define DEBUG_SUBSYSTEM S_MDS +#include #include #include #include "osp_internal.h" -static int osp_sync_id_traction_init(struct osp_device *d); -static void osp_sync_id_traction_fini(struct osp_device *d); -static __u32 osp_sync_id_get(struct osp_device *d, __u32 id); -static void osp_sync_remove_from_tracker(struct osp_device *d); - /* * this is a components of OSP implementing synchronization between MDS and OST * it llogs all interesting changes (currently it's uig/gid change and object @@ -62,49 +54,53 @@ static void osp_sync_remove_from_tracker(struct osp_device *d); * the first queue is llog itself, once read a change is stored in 2nd queue * in form of RPC (but RPC isn't fired yet). * - * the second queue (opd_syn_waiting_for_commit) holds changes awaiting local + * the second queue (opd_sync_waiting_for_commit) holds changes awaiting local * commit. once change is committed locally it migrates onto 3rd queue. * - * the third queue (opd_syn_committed_here) holds changes committed locally, + * the third queue (opd_sync_committed_here) holds changes committed locally, * but not sent to OST (as the pipe can be full). once pipe becomes non-full * we take a change from the queue and fire corresponded RPC. * * once RPC is reported committed by OST (using regular last_committed mech.) - * the change jumps into 4th queue (opd_syn_committed_there), now we can + * the change jumps into 4th queue (opd_sync_committed_there), now we can * cancel corresponded llog record and release RPC * - * opd_syn_changes is a number of unread llog records (to be processed). + * opd_sync_changes is a number of unread llog records (to be processed). * notice this number doesn't include llog records from previous boots. - * with OSP_SYN_THRESHOLD we try to batch processing a bit (TO BE IMPLEMENTED) + * with OSP_SYNC_THRESHOLD we try to batch processing a bit (TO BE IMPLEMENTED) * - * opd_syn_rpc_in_progress is a number of requests in 2-4 queues. - * we control this with OSP_MAX_IN_PROGRESS so that OSP don't consume + * opd_sync_rpcs_in_progress is total number of requests in above 2-4 queues. + * we control this with OSP_MAX_RPCS_IN_PROGRESS so that OSP don't consume * too much memory -- how to deal with 1000th OSTs ? batching could help? * - * opd_syn_rpc_in_flight is a number of RPC in flight. - * we control this with OSP_MAX_IN_FLIGHT + * opd_sync_rpcs_in_flight is a number of RPC in flight. + * we control this with OSP_MAX_RPCS_IN_FLIGHT */ /* XXX: do math to learn reasonable threshold * should it be ~ number of changes fitting bulk? */ -#define OSP_SYN_THRESHOLD 10 -#define OSP_MAX_IN_FLIGHT 8 -#define OSP_MAX_IN_PROGRESS 4096 +#define OSP_SYNC_THRESHOLD 10 +#define OSP_MAX_RPCS_IN_FLIGHT 8 +#define OSP_MAX_RPCS_IN_PROGRESS 4096 #define OSP_JOB_MAGIC 0x26112005 -/** - * Return status: whether OSP thread should keep running - * - * \param[in] d OSP device - * - * \retval 1 should keep running - * \retval 0 should stop - */ +struct osp_job_req_args { + /** bytes reserved for ptlrpc_replay_req() */ + struct ptlrpc_replay_async_args jra_raa; + struct list_head jra_committed_link; + struct list_head jra_in_flight_link; + struct llog_cookie jra_lcookie; + __u32 jra_magic; +}; + +static int osp_sync_add_commit_cb(const struct lu_env *env, + struct osp_device *d, struct thandle *th); + static inline int osp_sync_running(struct osp_device *d) { - return !!(d->opd_syn_thread.t_flags & SVC_RUNNING); + return !!(d->opd_sync_thread.t_flags & SVC_RUNNING); } /** @@ -117,7 +113,7 @@ static inline int osp_sync_running(struct osp_device *d) */ static inline int osp_sync_stopped(struct osp_device *d) { - return !!(d->opd_syn_thread.t_flags & SVC_STOPPED); + return !!(d->opd_sync_thread.t_flags & SVC_STOPPED); } /* @@ -130,14 +126,73 @@ static inline int osp_sync_stopped(struct osp_device *d) */ static inline int osp_sync_has_new_job(struct osp_device *d) { - return ((d->opd_syn_last_processed_id < d->opd_syn_last_used_id) && - (d->opd_syn_last_processed_id < d->opd_syn_last_committed_id)) - || (d->opd_syn_prev_done == 0); + return atomic_read(&d->opd_sync_changes) > 0 || + d->opd_sync_prev_done == 0; +} + +static inline int osp_sync_in_flight_conflict(struct osp_device *d, + struct llog_rec_hdr *h) +{ + struct osp_job_req_args *jra; + struct ost_id ostid; + int conflict = 0; + + if (h == NULL || h->lrh_type == LLOG_GEN_REC || + list_empty(&d->opd_sync_in_flight_list)) + return conflict; + + memset(&ostid, 0, sizeof(ostid)); + switch (h->lrh_type) { + case MDS_UNLINK_REC: { + struct llog_unlink_rec *unlink = (struct llog_unlink_rec *)h; + + ostid_set_seq(&ostid, unlink->lur_oseq); + if (ostid_set_id(&ostid, unlink->lur_oid)) { + CERROR("Bad %llu to set " DOSTID "\n", + (unsigned long long)(unlink->lur_oid), + POSTID(&ostid)); + return 1; + } + } + break; + case MDS_UNLINK64_REC: + fid_to_ostid(&((struct llog_unlink64_rec *)h)->lur_fid, &ostid); + break; + case MDS_SETATTR64_REC: + ostid = ((struct llog_setattr64_rec *)h)->lsr_oi; + break; + default: + LBUG(); + } + + spin_lock(&d->opd_sync_lock); + list_for_each_entry(jra, &d->opd_sync_in_flight_list, + jra_in_flight_link) { + struct ptlrpc_request *req; + struct ost_body *body; + + LASSERT(jra->jra_magic == OSP_JOB_MAGIC); + + req = container_of((void *)jra, struct ptlrpc_request, + rq_async_args); + body = req_capsule_client_get(&req->rq_pill, + &RMF_OST_BODY); + LASSERT(body); + + if (memcmp(&ostid, &body->oa.o_oi, sizeof(ostid)) == 0) { + conflict = 1; + break; + } + } + spin_unlock(&d->opd_sync_lock); + + return conflict; } -static inline int osp_sync_low_in_progress(struct osp_device *d) +static inline int osp_sync_rpcs_in_progress_low(struct osp_device *d) { - return d->opd_syn_rpc_in_progress < d->opd_syn_max_rpc_in_progress; + return atomic_read(&d->opd_sync_rpcs_in_progress) < + d->opd_sync_max_rpcs_in_progress; } /** @@ -148,9 +203,10 @@ static inline int osp_sync_low_in_progress(struct osp_device *d) * \retval 1 there is room * \retval 0 no room, the pipe is full */ -static inline int osp_sync_low_in_flight(struct osp_device *d) +static inline int osp_sync_rpcs_in_flight_low(struct osp_device *d) { - return d->opd_syn_rpc_in_flight < d->opd_syn_max_rpc_in_flight; + return atomic_read(&d->opd_sync_rpcs_in_flight) < + d->opd_sync_max_rpcs_in_flight; } /** @@ -161,30 +217,44 @@ static inline int osp_sync_low_in_flight(struct osp_device *d) * \retval 1 time to wake up * \retval 0 no need to wake up */ -static inline int osp_sync_has_work(struct osp_device *d) +static inline int osp_sync_has_work(struct osp_device *osp) { /* has new/old changes and low in-progress? */ - if (osp_sync_has_new_job(d) && osp_sync_low_in_progress(d) && - osp_sync_low_in_flight(d) && d->opd_imp_connected) + if (osp_sync_has_new_job(osp) && osp_sync_rpcs_in_progress_low(osp) && + osp_sync_rpcs_in_flight_low(osp) && osp->opd_imp_connected) return 1; /* has remotely committed? */ - if (!list_empty(&d->opd_syn_committed_there)) + if (!list_empty(&osp->opd_sync_committed_there)) return 1; return 0; } -#define osp_sync_check_for_work(d) \ -{ \ - if (osp_sync_has_work(d)) { \ - wake_up(&d->opd_syn_waitq); \ - } \ +void osp_sync_check_for_work(struct osp_device *osp) +{ + if (osp_sync_has_work(osp)) + wake_up(&osp->opd_sync_waitq); } -void __osp_sync_check_for_work(struct osp_device *d) +static inline __u64 osp_sync_correct_id(struct osp_device *d, + struct llog_rec_hdr *rec) { - osp_sync_check_for_work(d); + /* + * llog use cyclic store with 32 bit lrh_id + * so overflow lrh_id is possible. Range between + * last_processed and last_committed is less than + * 64745 ^ 2 and less than 2^32 - 1 + */ + __u64 correct_id = d->opd_sync_last_committed_id; + + if ((correct_id & 0xffffffffULL) < rec->lrh_id) + correct_id -= 0x100000000ULL; + + correct_id &= ~0xffffffffULL; + correct_id |= rec->lrh_id; + + return correct_id; } /** @@ -206,19 +276,24 @@ static inline int osp_sync_can_process_new(struct osp_device *d, { LASSERT(d); - if (unlikely(atomic_read(&d->opd_syn_barrier) > 0)) + if (unlikely(atomic_read(&d->opd_sync_barrier) > 0)) + return 0; + if (unlikely(osp_sync_in_flight_conflict(d, rec))) return 0; - if (!osp_sync_low_in_progress(d)) + if (!osp_sync_rpcs_in_progress_low(d)) return 0; - if (!osp_sync_low_in_flight(d)) + if (!osp_sync_rpcs_in_flight_low(d)) return 0; if (!d->opd_imp_connected) return 0; - if (d->opd_syn_prev_done == 0) + if (d->opd_sync_prev_done == 0) return 1; - if (d->opd_syn_changes == 0) + if (atomic_read(&d->opd_sync_changes) == 0) return 0; - if (rec == NULL || rec->lrh_id <= d->opd_syn_last_committed_id) + if (rec == NULL) + return 1; + /* notice "<" not "<=" */ + if (osp_sync_correct_id(d, rec) < d->opd_sync_last_committed_id) return 1; return 0; } @@ -239,37 +314,39 @@ static inline int osp_sync_can_process_new(struct osp_device *d, * \retval negative negated errno on error */ int osp_sync_declare_add(const struct lu_env *env, struct osp_object *o, - llog_op_type type, struct thandle *th) + enum llog_op_type type, struct thandle *th) { struct osp_thread_info *osi = osp_env_info(env); struct osp_device *d = lu2osp_dev(o->opo_obj.do_lu.lo_dev); struct llog_ctxt *ctxt; + struct thandle *storage_th; int rc; ENTRY; /* it's a layering violation, to access internals of th, * but we can do this as a sanity check, for a while */ - LASSERT(th->th_dev == d->opd_storage); + LASSERT(th->th_top != NULL); + storage_th = thandle_get_sub_by_dt(env, th->th_top, d->opd_storage); + if (IS_ERR(storage_th)) + RETURN(PTR_ERR(storage_th)); switch (type) { case MDS_UNLINK64_REC: osi->osi_hdr.lrh_len = sizeof(struct llog_unlink64_rec); break; case MDS_SETATTR64_REC: - osi->osi_hdr.lrh_len = sizeof(struct llog_setattr64_rec); + osi->osi_hdr.lrh_len = sizeof(struct llog_setattr64_rec_v2); break; default: LBUG(); } - /* we want ->dt_trans_start() to allocate per-thandle structure */ - th->th_tags |= LCT_OSP_THREAD; - ctxt = llog_get_context(d->opd_obd, LLOG_MDS_OST_ORIG_CTXT); LASSERT(ctxt); - rc = llog_declare_add(env, ctxt->loc_handle, &osi->osi_hdr, th); + rc = llog_declare_add(env, ctxt->loc_handle, &osi->osi_hdr, + storage_th); llog_ctxt_put(ctxt); RETURN(rc); @@ -297,20 +374,24 @@ int osp_sync_declare_add(const struct lu_env *env, struct osp_object *o, * \retval negative negated errno on error */ static int osp_sync_add_rec(const struct lu_env *env, struct osp_device *d, - const struct lu_fid *fid, llog_op_type type, + const struct lu_fid *fid, enum llog_op_type type, int count, struct thandle *th, const struct lu_attr *attr) { struct osp_thread_info *osi = osp_env_info(env); struct llog_ctxt *ctxt; - struct osp_txn_info *txn; + struct thandle *storage_th; + bool immediate_commit_cb = false; int rc; ENTRY; /* it's a layering violation, to access internals of th, * but we can do this as a sanity check, for a while */ - LASSERT(th->th_dev == d->opd_storage); + LASSERT(th->th_top != NULL); + storage_th = thandle_get_sub_by_dt(env, th->th_top, d->opd_storage); + if (IS_ERR(storage_th)) + RETURN(PTR_ERR(storage_th)); switch (type) { case MDS_UNLINK64_REC: @@ -328,46 +409,60 @@ static int osp_sync_add_rec(const struct lu_env *env, struct osp_device *d, LASSERT(attr); osi->osi_setattr.lsr_uid = attr->la_uid; osi->osi_setattr.lsr_gid = attr->la_gid; + osi->osi_setattr.lsr_layout_version = attr->la_layout_version; + osi->osi_setattr.lsr_projid = attr->la_projid; osi->osi_setattr.lsr_valid = ((attr->la_valid & LA_UID) ? OBD_MD_FLUID : 0) | - ((attr->la_valid & LA_GID) ? OBD_MD_FLGID : 0); + ((attr->la_valid & LA_GID) ? OBD_MD_FLGID : 0) | + ((attr->la_valid & LA_PROJID) ? OBD_MD_FLPROJID : 0); + if (attr->la_valid & LA_LAYOUT_VERSION) { + osi->osi_setattr.lsr_valid |= OBD_MD_LAYOUT_VERSION; + + /* FLR: the layout version has to be transferred to + * OST objects ASAP, otherwise clients will have to + * experience delay to be able to write OST objects. */ + immediate_commit_cb = true; + } break; default: LBUG(); } - txn = osp_txn_info(&th->th_ctx); - LASSERT(txn); - - txn->oti_current_id = osp_sync_id_get(d, txn->oti_current_id); - osi->osi_hdr.lrh_id = txn->oti_current_id; + /* we keep the same id, but increment it when the callback + * is registered, so that all records upto the one taken + * by the callback are subject to processing */ + spin_lock(&d->opd_sync_lock); + osi->osi_hdr.lrh_id = d->opd_sync_last_used_id; + spin_unlock(&d->opd_sync_lock); ctxt = llog_get_context(d->opd_obd, LLOG_MDS_OST_ORIG_CTXT); if (ctxt == NULL) RETURN(-ENOMEM); + rc = llog_add(env, ctxt->loc_handle, &osi->osi_hdr, &osi->osi_cookie, - th); + storage_th); llog_ctxt_put(ctxt); - CDEBUG(D_OTHER, "%s: new record "DOSTID":%lu/%lu: %d\n", - d->opd_obd->obd_name, POSTID(&osi->osi_cookie.lgc_lgl.lgl_oi), - (unsigned long) osi->osi_cookie.lgc_lgl.lgl_ogen, - (unsigned long) osi->osi_cookie.lgc_index, rc); - - if (rc > 0) - rc = 0; - - if (likely(rc == 0)) { - spin_lock(&d->opd_syn_lock); - d->opd_syn_changes++; - spin_unlock(&d->opd_syn_lock); + if (likely(rc >= 0)) { + CDEBUG(D_OTHER, "%s: new record "DFID":%x.%u: rc = %d\n", + d->opd_obd->obd_name, + PFID(&osi->osi_cookie.lgc_lgl.lgl_oi.oi_fid), + osi->osi_cookie.lgc_lgl.lgl_ogen, + osi->osi_cookie.lgc_index, rc); + atomic_inc(&d->opd_sync_changes); } - RETURN(rc); + if (immediate_commit_cb) + rc = osp_sync_add_commit_cb(env, d, th); + else + rc = osp_sync_add_commit_cb_1s(env, d, th); + + /* return 0 always here, error case just cause no llog record */ + RETURN(0); } int osp_sync_add(const struct lu_env *env, struct osp_object *o, - llog_op_type type, struct thandle *th, + enum llog_op_type type, struct thandle *th, const struct lu_attr *attr) { return osp_sync_add_rec(env, lu2osp_dev(o->opo_obj.do_lu.lo_dev), @@ -376,7 +471,7 @@ int osp_sync_add(const struct lu_env *env, struct osp_object *o, } int osp_sync_gap(const struct lu_env *env, struct osp_device *d, - struct lu_fid *fid, int lost, struct thandle *th) + struct lu_fid *fid, int lost, struct thandle *th) { return osp_sync_add_rec(env, d, fid, MDS_UNLINK64_REC, lost, th, NULL); } @@ -410,27 +505,29 @@ int osp_sync_gap(const struct lu_env *env, struct osp_device *d, static void osp_sync_request_commit_cb(struct ptlrpc_request *req) { struct osp_device *d = req->rq_cb_data; + struct osp_job_req_args *jra; - CDEBUG(D_HA, "commit req %p, transno "LPU64"\n", req, req->rq_transno); + CDEBUG(D_HA, "commit req %p, transno %llu\n", req, req->rq_transno); if (unlikely(req->rq_transno == 0)) return; - /* do not do any opd_dyn_rpc_* accounting here + /* do not do any opd_sync_rpcs_* accounting here * it's done in osp_sync_interpret sooner or later */ - LASSERT(d); - LASSERT(req->rq_svc_thread == (void *) OSP_JOB_MAGIC); - LASSERT(list_empty(&req->rq_exp_list)); + + jra = ptlrpc_req_async_args(req); + LASSERT(jra->jra_magic == OSP_JOB_MAGIC); + LASSERT(list_empty(&jra->jra_committed_link)); ptlrpc_request_addref(req); - spin_lock(&d->opd_syn_lock); - list_add(&req->rq_exp_list, &d->opd_syn_committed_there); - spin_unlock(&d->opd_syn_lock); + spin_lock(&d->opd_sync_lock); + list_add(&jra->jra_committed_link, &d->opd_sync_committed_there); + spin_unlock(&d->opd_sync_lock); /* XXX: some batching wouldn't hurt */ - wake_up(&d->opd_syn_waitq); + wake_up(&d->opd_sync_waitq); } /** @@ -439,7 +536,7 @@ static void osp_sync_request_commit_cb(struct ptlrpc_request *req) * The callback is called by ptlrpc when RPC is replied. Now we have to decide * whether we should: * - put request on a special list to wait until it's committed by the target, - * if the request is succesful + * if the request is successful * - schedule llog record cancel if no target object is found * - try later (essentially after reboot) in case of unexpected error * @@ -451,19 +548,20 @@ static void osp_sync_request_commit_cb(struct ptlrpc_request *req) * \retval 0 always */ static int osp_sync_interpret(const struct lu_env *env, - struct ptlrpc_request *req, void *aa, int rc) + struct ptlrpc_request *req, void *args, int rc) { + struct osp_job_req_args *jra = args; struct osp_device *d = req->rq_cb_data; - if (req->rq_svc_thread != (void *) OSP_JOB_MAGIC) - DEBUG_REQ(D_ERROR, req, "bad magic %p\n", req->rq_svc_thread); - LASSERT(req->rq_svc_thread == (void *) OSP_JOB_MAGIC); + if (jra->jra_magic != OSP_JOB_MAGIC) { + DEBUG_REQ(D_ERROR, req, "bad magic %u\n", jra->jra_magic); + LBUG(); + } LASSERT(d); CDEBUG(D_HA, "reply req %p/%d, rc %d, transno %u\n", req, atomic_read(&req->rq_refcount), rc, (unsigned) req->rq_transno); - LASSERT(rc || req->rq_transno); if (rc == -ENOENT) { /* @@ -471,36 +569,35 @@ static int osp_sync_interpret(const struct lu_env *env, * but object doesn't exist anymore - cancell llog record */ LASSERT(req->rq_transno == 0); - LASSERT(list_empty(&req->rq_exp_list)); + LASSERT(list_empty(&jra->jra_committed_link)); ptlrpc_request_addref(req); - spin_lock(&d->opd_syn_lock); - list_add(&req->rq_exp_list, &d->opd_syn_committed_there); - spin_unlock(&d->opd_syn_lock); + spin_lock(&d->opd_sync_lock); + list_add(&jra->jra_committed_link, + &d->opd_sync_committed_there); + spin_unlock(&d->opd_sync_lock); - wake_up(&d->opd_syn_waitq); + wake_up(&d->opd_sync_waitq); } else if (rc) { struct obd_import *imp = req->rq_import; /* * error happened, we'll try to repeat on next boot ? */ - LASSERTF(req->rq_transno == 0 || + LASSERTF(req->rq_transno == 0 || rc == -EIO || req->rq_import_generation < imp->imp_generation, - "transno "LPU64", rc %d, gen: req %d, imp %d\n", + "transno %llu, rc %d, gen: req %d, imp %d\n", req->rq_transno, rc, req->rq_import_generation, imp->imp_generation); if (req->rq_transno == 0) { /* this is the last time we see the request * if transno is not zero, then commit cb * will be called at some point */ - LASSERT(d->opd_syn_rpc_in_progress > 0); - spin_lock(&d->opd_syn_lock); - d->opd_syn_rpc_in_progress--; - spin_unlock(&d->opd_syn_lock); + LASSERT(atomic_read(&d->opd_sync_rpcs_in_progress) > 0); + atomic_dec(&d->opd_sync_rpcs_in_progress); } - wake_up(&d->opd_syn_waitq); + wake_up(&d->opd_sync_waitq); } else if (d->opd_pre != NULL && unlikely(d->opd_pre_status == -ENOSPC)) { /* @@ -511,15 +608,16 @@ static int osp_sync_interpret(const struct lu_env *env, osp_statfs_need_now(d); } - LASSERT(d->opd_syn_rpc_in_flight > 0); - spin_lock(&d->opd_syn_lock); - d->opd_syn_rpc_in_flight--; - spin_unlock(&d->opd_syn_lock); - if (unlikely(atomic_read(&d->opd_syn_barrier) > 0)) - wake_up(&d->opd_syn_barrier_waitq); + spin_lock(&d->opd_sync_lock); + list_del_init(&jra->jra_in_flight_link); + spin_unlock(&d->opd_sync_lock); + LASSERT(atomic_read(&d->opd_sync_rpcs_in_flight) > 0); + atomic_dec(&d->opd_sync_rpcs_in_flight); + if (unlikely(atomic_read(&d->opd_sync_barrier) > 0)) + wake_up(&d->opd_sync_barrier_waitq); CDEBUG(D_OTHER, "%s: %d in flight, %d in progress\n", - d->opd_obd->obd_name, d->opd_syn_rpc_in_flight, - d->opd_syn_rpc_in_progress); + d->opd_obd->obd_name, atomic_read(&d->opd_sync_rpcs_in_flight), + atomic_read(&d->opd_sync_rpcs_in_progress)); osp_sync_check_for_work(d); @@ -532,15 +630,31 @@ static int osp_sync_interpret(const struct lu_env *env, * This is just a tiny helper function to put the request on the sending list * * \param[in] d OSP device + * \param[in] llh llog handle where the record is stored + * \param[in] h llog record * \param[in] req request */ static void osp_sync_send_new_rpc(struct osp_device *d, + struct llog_handle *llh, + struct llog_rec_hdr *h, struct ptlrpc_request *req) { - LASSERT(d->opd_syn_rpc_in_flight <= d->opd_syn_max_rpc_in_flight); - LASSERT(req->rq_svc_thread == (void *) OSP_JOB_MAGIC); - - ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1); + struct osp_job_req_args *jra; + + LASSERT(atomic_read(&d->opd_sync_rpcs_in_flight) <= + d->opd_sync_max_rpcs_in_flight); + + jra = ptlrpc_req_async_args(req); + jra->jra_magic = OSP_JOB_MAGIC; + jra->jra_lcookie.lgc_lgl = llh->lgh_id; + jra->jra_lcookie.lgc_subsys = LLOG_MDS_OST_ORIG_CTXT; + jra->jra_lcookie.lgc_index = h->lrh_index; + INIT_LIST_HEAD(&jra->jra_committed_link); + spin_lock(&d->opd_sync_lock); + list_add_tail(&jra->jra_in_flight_link, &d->opd_sync_in_flight_list); + spin_unlock(&d->opd_sync_lock); + + ptlrpcd_add_req(req); } @@ -553,8 +667,6 @@ static void osp_sync_send_new_rpc(struct osp_device *d, * are initialized. * * \param[in] d OSP device - * \param[in] llh llog handle where the record is stored - * \param[in] h llog record * \param[in] op type of the change * \param[in] format request format to be used * @@ -562,19 +674,20 @@ static void osp_sync_send_new_rpc(struct osp_device *d, * \retval ERR_PTR(errno) on error */ static struct ptlrpc_request *osp_sync_new_job(struct osp_device *d, - struct llog_handle *llh, - struct llog_rec_hdr *h, - ost_cmd_t op, + enum ost_cmd op, const struct req_format *format) { struct ptlrpc_request *req; - struct ost_body *body; struct obd_import *imp; int rc; /* Prepare the request */ imp = d->opd_obd->u.cli.cl_import; LASSERT(imp); + + if (OBD_FAIL_CHECK(OBD_FAIL_OSP_CHECK_ENOMEM)) + RETURN(ERR_PTR(-ENOMEM)); + req = ptlrpc_request_alloc(imp, format); if (req == NULL) RETURN(ERR_PTR(-ENOMEM)); @@ -585,20 +698,6 @@ static struct ptlrpc_request *osp_sync_new_job(struct osp_device *d, return ERR_PTR(rc); } - /* - * this is a trick: to save on memory allocations we put cookie - * into the request, but don't set corresponded flag in o_valid - * so that OST doesn't interpret this cookie. once the request - * is committed on OST we take cookie from the request and cancel - */ - body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); - LASSERT(body); - body->oa.o_lcookie.lgc_lgl = llh->lgh_id; - body->oa.o_lcookie.lgc_subsys = LLOG_MDS_OST_ORIG_CTXT; - body->oa.o_lcookie.lgc_index = h->lrh_index; - INIT_LIST_HEAD(&req->rq_exp_list); - req->rq_svc_thread = (void *) OSP_JOB_MAGIC; - req->rq_interpret_reply = osp_sync_interpret; req->rq_commit_cb = osp_sync_request_commit_cb; req->rq_cb_data = d; @@ -619,6 +718,7 @@ static struct ptlrpc_request *osp_sync_new_job(struct osp_device *d, * \param[in] h llog record * * \retval 0 on success + * \retval 1 on invalid record * \retval negative negated errno on error */ static int osp_sync_new_setattr_job(struct osp_device *d, @@ -632,17 +732,20 @@ static int osp_sync_new_setattr_job(struct osp_device *d, ENTRY; LASSERT(h->lrh_type == MDS_SETATTR64_REC); - /* lsr_valid can only be 0 or have OBD_MD_{FLUID,FLGID} set, + if (OBD_FAIL_CHECK(OBD_FAIL_OSP_CHECK_INVALID_REC)) + RETURN(1); + + /* lsr_valid can only be 0 or HAVE OBD_MD_{FLUID, FLGID, FLPROJID} set, * so no bits other than these should be set. */ - if ((rec->lsr_valid & ~(OBD_MD_FLUID | OBD_MD_FLGID)) != 0) { - CERROR("%s: invalid setattr record, lsr_valid:"LPU64"\n", - d->opd_obd->obd_name, rec->lsr_valid); - /* return 0 so that sync thread can continue processing - * other records. */ - RETURN(0); + if ((rec->lsr_valid & ~(OBD_MD_FLUID | OBD_MD_FLGID | + OBD_MD_FLPROJID | OBD_MD_LAYOUT_VERSION)) != 0) { + CERROR("%s: invalid setattr record, lsr_valid:%llu\n", + d->opd_obd->obd_name, rec->lsr_valid); + /* return 1 on invalid record */ + RETURN(1); } - req = osp_sync_new_job(d, llh, h, OST_SETATTR, &RQF_OST_SETATTR); + req = osp_sync_new_job(d, OST_SETATTR, &RQF_OST_SETATTR); if (IS_ERR(req)) RETURN(PTR_ERR(req)); @@ -652,6 +755,12 @@ static int osp_sync_new_setattr_job(struct osp_device *d, body->oa.o_uid = rec->lsr_uid; body->oa.o_gid = rec->lsr_gid; body->oa.o_valid = OBD_MD_FLGROUP | OBD_MD_FLID; + if (h->lrh_len > sizeof(struct llog_setattr64_rec)) { + struct llog_setattr64_rec_v2 *rec_v2 = (typeof(rec_v2))rec; + body->oa.o_projid = rec_v2->lsr_projid; + body->oa.o_layout_version = rec_v2->lsr_layout_version; + } + /* old setattr record (prior 2.6.0) doesn't have 'valid' stored, * we assume that both UID and GID are valid in that case. */ if (rec->lsr_valid == 0) @@ -659,8 +768,14 @@ static int osp_sync_new_setattr_job(struct osp_device *d, else body->oa.o_valid |= rec->lsr_valid; - osp_sync_send_new_rpc(d, req); - RETURN(1); + if (body->oa.o_valid & OBD_MD_LAYOUT_VERSION) { + OBD_FAIL_TIMEOUT(OBD_FAIL_FLR_LV_DELAY, cfs_fail_val); + if (unlikely(OBD_FAIL_CHECK(OBD_FAIL_FLR_LV_INC))) + ++body->oa.o_layout_version; + } + + osp_sync_send_new_rpc(d, llh, h, req); + RETURN(0); } /** @@ -685,104 +800,28 @@ static int osp_sync_new_unlink_job(struct osp_device *d, struct llog_unlink_rec *rec = (struct llog_unlink_rec *)h; struct ptlrpc_request *req; struct ost_body *body; + int rc; ENTRY; LASSERT(h->lrh_type == MDS_UNLINK_REC); - req = osp_sync_new_job(d, llh, h, OST_DESTROY, &RQF_OST_DESTROY); + req = osp_sync_new_job(d, OST_DESTROY, &RQF_OST_DESTROY); if (IS_ERR(req)) RETURN(PTR_ERR(req)); body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); LASSERT(body); ostid_set_seq(&body->oa.o_oi, rec->lur_oseq); - ostid_set_id(&body->oa.o_oi, rec->lur_oid); + rc = ostid_set_id(&body->oa.o_oi, rec->lur_oid); + if (rc) + return rc; body->oa.o_misc = rec->lur_count; body->oa.o_valid = OBD_MD_FLGROUP | OBD_MD_FLID; if (rec->lur_count) body->oa.o_valid |= OBD_MD_FLOBJCOUNT; - osp_sync_send_new_rpc(d, req); - RETURN(1); -} - -/** - * Prepare OUT-based object destroy RPC. - * - * The function allocates a new RPC with OUT format. Then initializes the RPC - * to contain OUT_DESTROY update against the object specified in the llog - * record provided by the caller. - * - * \param[in] env LU environment provided by the caller - * \param[in] osp OSP device - * \param[in] llh llog handle where the record is stored - * \param[in] h llog record - * \param[out] reqp request prepared - * - * \retval 0 on success - * \retval negative negated errno on error - */ -static int osp_prep_unlink_update_req(const struct lu_env *env, - struct osp_device *osp, - struct llog_handle *llh, - struct llog_rec_hdr *h, - struct ptlrpc_request **reqp) -{ - struct llog_unlink64_rec *rec = (struct llog_unlink64_rec *)h; - struct dt_update_request *update = NULL; - struct ptlrpc_request *req; - struct llog_cookie lcookie; - const void *buf; - __u16 size; - int rc; - ENTRY; - - update = dt_update_request_create(&osp->opd_dt_dev); - if (IS_ERR(update)) - RETURN(PTR_ERR(update)); - - /* This can only happens for unlink slave directory, so decrease - * ref for ".." and "." */ - rc = out_update_pack(env, &update->dur_buf, OUT_REF_DEL, &rec->lur_fid, - 0, NULL, NULL, 0); - if (rc != 0) - GOTO(out, rc); - - rc = out_update_pack(env, &update->dur_buf, OUT_REF_DEL, &rec->lur_fid, - 0, NULL, NULL, 0); - if (rc != 0) - GOTO(out, rc); - - lcookie.lgc_lgl = llh->lgh_id; - lcookie.lgc_subsys = LLOG_MDS_OST_ORIG_CTXT; - lcookie.lgc_index = h->lrh_index; - size = sizeof(lcookie); - buf = &lcookie; - - rc = out_update_pack(env, &update->dur_buf, OUT_DESTROY, &rec->lur_fid, - 1, &size, &buf, 0); - if (rc != 0) - GOTO(out, rc); - - rc = out_prep_update_req(env, osp->opd_obd->u.cli.cl_import, - update->dur_buf.ub_req, &req); - if (rc != 0) - GOTO(out, rc); - - INIT_LIST_HEAD(&req->rq_exp_list); - req->rq_svc_thread = (void *)OSP_JOB_MAGIC; - - req->rq_interpret_reply = osp_sync_interpret; - req->rq_commit_cb = osp_sync_request_commit_cb; - req->rq_cb_data = osp; - - ptlrpc_request_set_replen(req); - *reqp = req; -out: - if (update != NULL) - dt_update_request_destroy(update); - - RETURN(rc); + osp_sync_send_new_rpc(d, llh, h, req); + RETURN(0); } /** @@ -796,7 +835,6 @@ out: * use OUT for OST as well, this will allow batching and better code * unification. * - * \param[in] env LU environment provided by the caller * \param[in] d OSP device * \param[in] llh llog handle where the record is stored * \param[in] h llog record @@ -804,8 +842,7 @@ out: * \retval 0 on success * \retval negative negated errno on error */ -static int osp_sync_new_unlink64_job(const struct lu_env *env, - struct osp_device *d, +static int osp_sync_new_unlink64_job(struct osp_device *d, struct llog_handle *llh, struct llog_rec_hdr *h) { @@ -816,29 +853,21 @@ static int osp_sync_new_unlink64_job(const struct lu_env *env, ENTRY; LASSERT(h->lrh_type == MDS_UNLINK64_REC); + req = osp_sync_new_job(d, OST_DESTROY, &RQF_OST_DESTROY); + if (IS_ERR(req)) + RETURN(PTR_ERR(req)); - if (d->opd_connect_mdt) { - rc = osp_prep_unlink_update_req(env, d, llh, h, &req); - if (rc != 0) - RETURN(rc); - } else { - req = osp_sync_new_job(d, llh, h, OST_DESTROY, - &RQF_OST_DESTROY); - if (IS_ERR(req)) - RETURN(PTR_ERR(req)); - - body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); - if (body == NULL) - RETURN(-EFAULT); - rc = fid_to_ostid(&rec->lur_fid, &body->oa.o_oi); - if (rc < 0) - RETURN(rc); - body->oa.o_misc = rec->lur_count; - body->oa.o_valid = OBD_MD_FLGROUP | OBD_MD_FLID | - OBD_MD_FLOBJCOUNT; - } - osp_sync_send_new_rpc(d, req); - RETURN(1); + body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); + if (body == NULL) + RETURN(-EFAULT); + rc = fid_to_ostid(&rec->lur_fid, &body->oa.o_oi); + if (rc < 0) + RETURN(rc); + body->oa.o_misc = rec->lur_count; + body->oa.o_valid = OBD_MD_FLGROUP | OBD_MD_FLID | + OBD_MD_FLOBJCOUNT; + osp_sync_send_new_rpc(d, llh, h, req); + RETURN(0); } /** @@ -856,38 +885,42 @@ static int osp_sync_new_unlink64_job(const struct lu_env *env, * \param[in] d OSP device * \param[in] llh llog handle where the record is stored * \param[in] rec llog record - * - * \retval 0 on success - * \retval negative negated errno on error */ -static int osp_sync_process_record(const struct lu_env *env, - struct osp_device *d, - struct llog_handle *llh, - struct llog_rec_hdr *rec) +static void osp_sync_process_record(const struct lu_env *env, + struct osp_device *d, + struct llog_handle *llh, + struct llog_rec_hdr *rec) { + struct llog_handle *cathandle = llh->u.phd.phd_cat_handle; struct llog_cookie cookie; int rc = 0; + ENTRY; + cookie.lgc_lgl = llh->lgh_id; cookie.lgc_subsys = LLOG_MDS_OST_ORIG_CTXT; cookie.lgc_index = rec->lrh_index; + d->opd_sync_last_catalog_idx = llh->lgh_hdr->llh_cat_idx; + if (unlikely(rec->lrh_type == LLOG_GEN_REC)) { struct llog_gen_rec *gen = (struct llog_gen_rec *)rec; /* we're waiting for the record generated by this instance */ - LASSERT(d->opd_syn_prev_done == 0); - if (!memcmp(&d->opd_syn_generation, &gen->lgr_gen, + LASSERT(d->opd_sync_prev_done == 0); + if (!memcmp(&d->opd_sync_generation, &gen->lgr_gen, sizeof(gen->lgr_gen))) { CDEBUG(D_HA, "processed all old entries\n"); - d->opd_syn_prev_done = 1; + d->opd_sync_prev_done = 1; } /* cancel any generation record */ - rc = llog_cat_cancel_records(env, llh->u.phd.phd_cat_handle, - 1, &cookie); + rc = llog_cat_cancel_records(env, cathandle, 1, &cookie); - return rc; + /* flush all pending records ASAP */ + osp_sync_force(env, d); + + RETURN_EXIT; } /* @@ -897,10 +930,8 @@ static int osp_sync_process_record(const struct lu_env *env, /* notice we increment counters before sending RPC, to be consistent * in RPC interpret callback which may happen very quickly */ - spin_lock(&d->opd_syn_lock); - d->opd_syn_rpc_in_flight++; - d->opd_syn_rpc_in_progress++; - spin_unlock(&d->opd_syn_lock); + atomic_inc(&d->opd_sync_rpcs_in_flight); + atomic_inc(&d->opd_sync_rpcs_in_progress); switch (rec->lrh_type) { /* case MDS_UNLINK_REC is kept for compatibility */ @@ -908,7 +939,7 @@ static int osp_sync_process_record(const struct lu_env *env, rc = osp_sync_new_unlink_job(d, llh, rec); break; case MDS_UNLINK64_REC: - rc = osp_sync_new_unlink64_job(env, d, llh, rec); + rc = osp_sync_new_unlink64_job(d, llh, rec); break; case MDS_SETATTR64_REC: rc = osp_sync_new_setattr_job(d, llh, rec); @@ -916,41 +947,44 @@ static int osp_sync_process_record(const struct lu_env *env, default: CERROR("%s: unknown record type: %x\n", d->opd_obd->obd_name, rec->lrh_type); - /* we should continue processing */ + /* treat "unknown record type" as "invalid" */ + rc = 1; + break; } - /* rc > 0 means sync RPC being added to the queue */ - if (likely(rc > 0)) { - spin_lock(&d->opd_syn_lock); - if (d->opd_syn_prev_done) { - LASSERT(d->opd_syn_changes > 0); - LASSERT(rec->lrh_id <= d->opd_syn_last_committed_id); - /* - * NOTE: it's possible to meet same id if - * OST stores few stripes of same file - */ - if (rec->lrh_id > d->opd_syn_last_processed_id) { - d->opd_syn_last_processed_id = rec->lrh_id; - wake_up(&d->opd_syn_barrier_waitq); - } + /* For all kinds of records, not matter successful or not, + * we should decrease changes and bump last_processed_id. + */ + if (d->opd_sync_prev_done) { + LASSERT(atomic_read(&d->opd_sync_changes) > 0); + atomic_dec(&d->opd_sync_changes); + wake_up(&d->opd_sync_barrier_waitq); + } + atomic64_inc(&d->opd_sync_processed_recs); + if (rc != 0) { + atomic_dec(&d->opd_sync_rpcs_in_flight); + atomic_dec(&d->opd_sync_rpcs_in_progress); + } - d->opd_syn_changes--; - } - CDEBUG(D_OTHER, "%s: %d in flight, %d in progress\n", - d->opd_obd->obd_name, d->opd_syn_rpc_in_flight, - d->opd_syn_rpc_in_progress); - spin_unlock(&d->opd_syn_lock); - rc = 0; - } else { - spin_lock(&d->opd_syn_lock); - d->opd_syn_rpc_in_flight--; - d->opd_syn_rpc_in_progress--; - spin_unlock(&d->opd_syn_lock); + CDEBUG(D_OTHER, "%s: %d in flight, %d in progress\n", + d->opd_obd->obd_name, atomic_read(&d->opd_sync_rpcs_in_flight), + atomic_read(&d->opd_sync_rpcs_in_progress)); + + /* Delete the invalid record */ + if (rc == 1) { + rc = llog_cat_cancel_records(env, cathandle, 1, &cookie); + if (rc != 0) + CERROR("%s: can't delete invalid record: " + "fid = "DFID", rec_id = %u, rc = %d\n", + d->opd_obd->obd_name, + PFID(lu_object_fid(&cathandle->lgh_obj->do_lu)), + rec->lrh_id, rc); } - CDEBUG(D_HA, "found record %x, %d, idx %u, id %u: %d\n", - rec->lrh_type, rec->lrh_len, rec->lrh_index, rec->lrh_id, rc); - return rc; + CDEBUG(D_HA, "found record %x, %d, idx %u, id %u\n", + rec->lrh_type, rec->lrh_len, rec->lrh_index, rec->lrh_id); + + RETURN_EXIT; } /** @@ -969,15 +1003,17 @@ static void osp_sync_process_committed(const struct lu_env *env, struct obd_device *obd = d->opd_obd; struct obd_import *imp = obd->u.cli.cl_import; struct ost_body *body; - struct ptlrpc_request *req, *tmp; + struct ptlrpc_request *req; struct llog_ctxt *ctxt; struct llog_handle *llh; - struct list_head list; - int rc, done = 0; + int *arr; + struct list_head list, *le; + struct llog_logid lgid; + int rc, i, count = 0, done = 0; ENTRY; - if (list_empty(&d->opd_syn_committed_there)) + if (list_empty(&d->opd_sync_committed_there)) return; /* @@ -1003,69 +1039,83 @@ static void osp_sync_process_committed(const struct lu_env *env, LASSERT(llh); INIT_LIST_HEAD(&list); - spin_lock(&d->opd_syn_lock); - list_splice(&d->opd_syn_committed_there, &list); - INIT_LIST_HEAD(&d->opd_syn_committed_there); - spin_unlock(&d->opd_syn_lock); - - list_for_each_entry_safe(req, tmp, &list, rq_exp_list) { - struct llog_cookie *lcookie = NULL; - - LASSERT(req->rq_svc_thread == (void *) OSP_JOB_MAGIC); - list_del_init(&req->rq_exp_list); - - if (d->opd_connect_mdt) { - struct object_update_request *ureq; - struct object_update *update; - ureq = req_capsule_client_get(&req->rq_pill, - &RMF_OUT_UPDATE); - LASSERT(ureq != NULL && - ureq->ourq_magic == UPDATE_REQUEST_MAGIC); - - /* 1st/2nd is for decref . and .., 3rd one is for - * destroy, where the log cookie is stored. - * See osp_prep_unlink_update_req */ - update = object_update_request_get(ureq, 2, NULL); - LASSERT(update != NULL); - lcookie = object_update_param_get(update, 0, NULL); - LASSERT(lcookie != NULL); - } else { - body = req_capsule_client_get(&req->rq_pill, - &RMF_OST_BODY); - LASSERT(body); - lcookie = &body->oa.o_lcookie; - } + spin_lock(&d->opd_sync_lock); + list_splice(&d->opd_sync_committed_there, &list); + INIT_LIST_HEAD(&d->opd_sync_committed_there); + spin_unlock(&d->opd_sync_lock); + + list_for_each(le, &list) + count++; + if (count > 2) + OBD_ALLOC_WAIT(arr, sizeof(int) * count); + else + arr = NULL; + i = 0; + while (!list_empty(&list)) { + struct osp_job_req_args *jra; + + jra = list_entry(list.next, struct osp_job_req_args, + jra_committed_link); + LASSERT(jra->jra_magic == OSP_JOB_MAGIC); + list_del_init(&jra->jra_committed_link); + + req = container_of((void *)jra, struct ptlrpc_request, + rq_async_args); + body = req_capsule_client_get(&req->rq_pill, + &RMF_OST_BODY); + LASSERT(body); /* import can be closing, thus all commit cb's are * called we can check committness directly */ - if (req->rq_transno <= imp->imp_peer_committed_transno) { - rc = llog_cat_cancel_records(env, llh, 1, lcookie); - if (rc) - CERROR("%s: can't cancel record: %d\n", - obd->obd_name, rc); + if (req->rq_import_generation == imp->imp_generation) { + if (arr && (!i || + !memcmp(&jra->jra_lcookie.lgc_lgl, &lgid, + sizeof(lgid)))) { + if (unlikely(!i)) + lgid = jra->jra_lcookie.lgc_lgl; + + arr[i++] = jra->jra_lcookie.lgc_index; + } else { + rc = llog_cat_cancel_records(env, llh, 1, + &jra->jra_lcookie); + if (rc) + CERROR("%s: can't cancel record: %d\n", + obd->obd_name, rc); + } } else { - DEBUG_REQ(D_HA, req, "not committed"); + DEBUG_REQ(D_OTHER, req, "imp_committed = %llu", + imp->imp_peer_committed_transno); } - ptlrpc_req_finished(req); done++; } + if (arr && i > 0) { + rc = llog_cat_cancel_arr_rec(env, llh, &lgid, i, arr); + + if (rc) + CERROR("%s: can't cancel %d records rc: %d\n", + obd->obd_name, i, rc); + else + CDEBUG(D_OTHER, "%s: massive records cancel id "DFID\ + " num %d\n", obd->obd_name, + PFID(&lgid.lgl_oi.oi_fid), i); + } + if (arr) + OBD_FREE(arr, sizeof(int) * count); llog_ctxt_put(ctxt); - LASSERT(d->opd_syn_rpc_in_progress >= done); - spin_lock(&d->opd_syn_lock); - d->opd_syn_rpc_in_progress -= done; - spin_unlock(&d->opd_syn_lock); - CDEBUG(D_OTHER, "%s: %d in flight, %d in progress\n", - d->opd_obd->obd_name, d->opd_syn_rpc_in_flight, - d->opd_syn_rpc_in_progress); + LASSERT(atomic_read(&d->opd_sync_rpcs_in_progress) >= done); + atomic_sub(done, &d->opd_sync_rpcs_in_progress); + CDEBUG(D_OTHER, "%s: %d in flight, %d in progress, done %d\n", + d->opd_obd->obd_name, atomic_read(&d->opd_sync_rpcs_in_flight), + atomic_read(&d->opd_sync_rpcs_in_progress), done); osp_sync_check_for_work(d); /* wake up the thread if requested to stop: * it might be waiting for in-progress to complete */ if (unlikely(osp_sync_running(d) == 0)) - wake_up(&d->opd_syn_waitq); + wake_up(&d->opd_sync_waitq); EXIT; } @@ -1092,7 +1142,6 @@ static int osp_sync_process_queues(const struct lu_env *env, void *data) { struct osp_device *d = data; - int rc; do { struct l_wait_info lwi = { 0 }; @@ -1110,45 +1159,22 @@ static int osp_sync_process_queues(const struct lu_env *env, if (osp_sync_can_process_new(d, rec)) { if (llh == NULL) { /* ask llog for another record */ - CDEBUG(D_HA, "%lu changes, %u in progress," - " %u in flight\n", - d->opd_syn_changes, - d->opd_syn_rpc_in_progress, - d->opd_syn_rpc_in_flight); + CDEBUG(D_HA, "%u changes, %u in progress," + " %u in flight\n", + atomic_read(&d->opd_sync_changes), + atomic_read(&d->opd_sync_rpcs_in_progress), + atomic_read(&d->opd_sync_rpcs_in_flight)); return 0; } - - /* - * try to send, in case of disconnection, suspend - * processing till we can send this request - */ - do { - rc = osp_sync_process_record(env, d, llh, rec); - /* - * XXX: probably different handling is needed - * for some bugs, like immediate exit or if - * OSP gets inactive - */ - if (rc) { - CERROR("can't send: %d\n", rc); - l_wait_event(d->opd_syn_waitq, - !osp_sync_running(d) || - osp_sync_has_work(d), - &lwi); - } - } while (rc != 0 && osp_sync_running(d)); - + osp_sync_process_record(env, d, llh, rec); llh = NULL; rec = NULL; } - if (d->opd_syn_last_processed_id == d->opd_syn_last_used_id) - osp_sync_remove_from_tracker(d); - - l_wait_event(d->opd_syn_waitq, + l_wait_event(d->opd_sync_waitq, !osp_sync_running(d) || osp_sync_can_process_new(d, rec) || - !list_empty(&d->opd_syn_committed_there), + !list_empty(&d->opd_sync_committed_there), &lwi); } while (1); } @@ -1176,13 +1202,14 @@ static int osp_sync_process_queues(const struct lu_env *env, static int osp_sync_thread(void *_arg) { struct osp_device *d = _arg; - struct ptlrpc_thread *thread = &d->opd_syn_thread; + struct ptlrpc_thread *thread = &d->opd_sync_thread; struct l_wait_info lwi = { 0 }; struct llog_ctxt *ctxt; struct obd_device *obd = d->opd_obd; struct llog_handle *llh; struct lu_env env; int rc, count; + bool wrapped; ENTRY; @@ -1190,12 +1217,18 @@ static int osp_sync_thread(void *_arg) if (rc) { CERROR("%s: can't initialize env: rc = %d\n", obd->obd_name, rc); + + spin_lock(&d->opd_sync_lock); + thread->t_flags = SVC_STOPPED; + spin_unlock(&d->opd_sync_lock); + wake_up(&thread->t_ctl_waitq); + RETURN(rc); } - spin_lock(&d->opd_syn_lock); + spin_lock(&d->opd_sync_lock); thread->t_flags = SVC_RUNNING; - spin_unlock(&d->opd_syn_lock); + spin_unlock(&d->opd_sync_lock); wake_up(&thread->t_ctl_waitq); ctxt = llog_get_context(obd, LLOG_MDS_OST_ORIG_CTXT); @@ -1211,46 +1244,84 @@ static int osp_sync_thread(void *_arg) GOTO(out, rc = -EINVAL); } - rc = llog_cat_process(&env, llh, osp_sync_process_queues, d, 0, 0); + /* + * Catalog processing stops when it processed last catalog record + * with index equal to the end of catalog bitmap. Or if it is wrapped, + * processing stops with index equal to the lgh_last_idx. We need to + * continue processing. + */ + d->opd_sync_last_catalog_idx = 0; + do { + int size; + + wrapped = (llh->lgh_hdr->llh_cat_idx >= llh->lgh_last_idx && + llh->lgh_hdr->llh_count > 1); + + rc = llog_cat_process(&env, llh, osp_sync_process_queues, d, + d->opd_sync_last_catalog_idx, 0); + + size = OBD_FAIL_PRECHECK(OBD_FAIL_CAT_RECORDS) ? + cfs_fail_val : (LLOG_HDR_BITMAP_SIZE(llh->lgh_hdr) - 1); + /* processing reaches catalog bottom */ + if (d->opd_sync_last_catalog_idx == size) + d->opd_sync_last_catalog_idx = LLOG_CAT_FIRST; + else if (wrapped) + /* If catalog is wrapped we can`t predict last index of + * processing because lgh_last_idx could be changed. + * Starting form the next one */ + d->opd_sync_last_catalog_idx++; + + } while (rc == 0 && (wrapped || + d->opd_sync_last_catalog_idx == LLOG_CAT_FIRST)); + + if (rc < 0) { + CERROR("%s: llog process with osp_sync_process_queues " + "failed: %d\n", d->opd_obd->obd_name, rc); + GOTO(close, rc); + } LASSERTF(rc == 0 || rc == LLOG_PROC_BREAK, - "%lu changes, %u in progress, %u in flight: %d\n", - d->opd_syn_changes, d->opd_syn_rpc_in_progress, - d->opd_syn_rpc_in_flight, rc); + "%u changes, %u in progress, %u in flight: %d\n", + atomic_read(&d->opd_sync_changes), + atomic_read(&d->opd_sync_rpcs_in_progress), + atomic_read(&d->opd_sync_rpcs_in_flight), rc); /* we don't expect llog_process_thread() to exit till umount */ LASSERTF(thread->t_flags != SVC_RUNNING, - "%lu changes, %u in progress, %u in flight\n", - d->opd_syn_changes, d->opd_syn_rpc_in_progress, - d->opd_syn_rpc_in_flight); + "%u changes, %u in progress, %u in flight\n", + atomic_read(&d->opd_sync_changes), + atomic_read(&d->opd_sync_rpcs_in_progress), + atomic_read(&d->opd_sync_rpcs_in_flight)); /* wait till all the requests are completed */ count = 0; - while (d->opd_syn_rpc_in_progress > 0) { + while (atomic_read(&d->opd_sync_rpcs_in_progress) > 0) { osp_sync_process_committed(&env, d); lwi = LWI_TIMEOUT(cfs_time_seconds(5), NULL, NULL); - rc = l_wait_event(d->opd_syn_waitq, - d->opd_syn_rpc_in_progress == 0, + rc = l_wait_event(d->opd_sync_waitq, + atomic_read(&d->opd_sync_rpcs_in_progress) == 0, &lwi); if (rc == -ETIMEDOUT) count++; LASSERTF(count < 10, "%s: %d %d %sempty\n", - d->opd_obd->obd_name, d->opd_syn_rpc_in_progress, - d->opd_syn_rpc_in_flight, - list_empty(&d->opd_syn_committed_there) ? "" : "!"); + d->opd_obd->obd_name, + atomic_read(&d->opd_sync_rpcs_in_progress), + atomic_read(&d->opd_sync_rpcs_in_flight), + list_empty(&d->opd_sync_committed_there) ? "" : "!"); } +close: llog_cat_close(&env, llh); rc = llog_cleanup(&env, ctxt); if (rc) CERROR("can't cleanup llog: %d\n", rc); out: - LASSERTF(d->opd_syn_rpc_in_progress == 0, - "%s: %d %d %sempty\n", - d->opd_obd->obd_name, d->opd_syn_rpc_in_progress, - d->opd_syn_rpc_in_flight, - list_empty(&d->opd_syn_committed_there) ? "" : "!"); + LASSERTF(atomic_read(&d->opd_sync_rpcs_in_progress) == 0, + "%s: %d %d %sempty\n", d->opd_obd->obd_name, + atomic_read(&d->opd_sync_rpcs_in_progress), + atomic_read(&d->opd_sync_rpcs_in_flight), + list_empty(&d->opd_sync_committed_there) ? "" : "!"); thread->t_flags = SVC_STOPPED; @@ -1294,10 +1365,7 @@ static int osp_sync_llog_init(const struct lu_env *env, struct osp_device *d) OBD_SET_CTXT_MAGIC(&obd->obd_lvfs_ctxt); obd->obd_lvfs_ctxt.dt = d->opd_storage; - if (d->opd_connect_mdt) - lu_local_obj_fid(fid, SLAVE_LLOG_CATALOGS_OID); - else - lu_local_obj_fid(fid, LLOG_CATALOGS_OID); + lu_local_obj_fid(fid, LLOG_CATALOGS_OID); rc = llog_osd_get_cat_list(env, d->opd_storage, d->opd_index, 1, &osi->osi_cid, fid); @@ -1316,13 +1384,14 @@ static int osp_sync_llog_init(const struct lu_env *env, struct osp_device *d) rc = 0; } - CDEBUG(D_INFO, "%s: Init llog for %d - catid "DOSTID":%x\n", + CDEBUG(D_INFO, "%s: Init llog for %d - catid "DFID":%x\n", obd->obd_name, d->opd_index, - POSTID(&osi->osi_cid.lci_logid.lgl_oi), + PFID(&osi->osi_cid.lci_logid.lgl_oi.oi_fid), osi->osi_cid.lci_logid.lgl_ogen); - rc = llog_setup(env, obd, &obd->obd_olg, LLOG_MDS_OST_ORIG_CTXT, obd, - &osp_mds_ost_orig_logops); + rc = llog_setup(env, obd, &obd->obd_olg, LLOG_MDS_OST_ORIG_CTXT, + d->opd_storage->dd_lu_dev.ld_obd, + &llog_common_cat_ops); if (rc) RETURN(rc); @@ -1349,7 +1418,7 @@ static int osp_sync_llog_init(const struct lu_env *env, struct osp_device *d) LASSERT(lgh != NULL); ctxt->loc_handle = lgh; - rc = llog_cat_init_and_process(env, lgh); + rc = llog_init_handle(env, lgh, LLOG_F_IS_CAT, NULL); if (rc) GOTO(out_close, rc); @@ -1362,13 +1431,13 @@ static int osp_sync_llog_init(const struct lu_env *env, struct osp_device *d) * put a mark in the llog till which we'll be processing * old records restless */ - d->opd_syn_generation.mnt_cnt = cfs_time_current(); - d->opd_syn_generation.conn_cnt = cfs_time_current(); + d->opd_sync_generation.mnt_cnt = ktime_get_ns(); + d->opd_sync_generation.conn_cnt = ktime_get_ns(); osi->osi_hdr.lrh_type = LLOG_GEN_REC; osi->osi_hdr.lrh_len = sizeof(osi->osi_gen); - memcpy(&osi->osi_gen.lgr_gen, &d->opd_syn_generation, + memcpy(&osi->osi_gen.lgr_gen, &d->opd_sync_generation, sizeof(osi->osi_gen.lgr_gen)); rc = llog_cat_add(env, lgh, &osi->osi_gen.lgr_hdr, &osi->osi_cookie); @@ -1397,9 +1466,10 @@ static void osp_sync_llog_fini(const struct lu_env *env, struct osp_device *d) struct llog_ctxt *ctxt; ctxt = llog_get_context(d->opd_obd, LLOG_MDS_OST_ORIG_CTXT); - if (ctxt != NULL) + if (ctxt) { llog_cat_close(env, ctxt->loc_handle); - llog_cleanup(env, ctxt); + llog_cleanup(env, ctxt); + } } /** @@ -1422,9 +1492,18 @@ int osp_sync_init(const struct lu_env *env, struct osp_device *d) ENTRY; - rc = osp_sync_id_traction_init(d); - if (rc) - RETURN(rc); + d->opd_sync_max_rpcs_in_flight = OSP_MAX_RPCS_IN_FLIGHT; + d->opd_sync_max_rpcs_in_progress = OSP_MAX_RPCS_IN_PROGRESS; + spin_lock_init(&d->opd_sync_lock); + init_waitqueue_head(&d->opd_sync_waitq); + init_waitqueue_head(&d->opd_sync_barrier_waitq); + thread_set_flags(&d->opd_sync_thread, SVC_INIT); + init_waitqueue_head(&d->opd_sync_thread.t_ctl_waitq); + INIT_LIST_HEAD(&d->opd_sync_in_flight_list); + INIT_LIST_HEAD(&d->opd_sync_committed_there); + + if (d->opd_storage->dd_rdonly) + RETURN(0); /* * initialize llog storing changes @@ -1439,14 +1518,6 @@ int osp_sync_init(const struct lu_env *env, struct osp_device *d) /* * Start synchronization thread */ - d->opd_syn_max_rpc_in_flight = OSP_MAX_IN_FLIGHT; - d->opd_syn_max_rpc_in_progress = OSP_MAX_IN_PROGRESS; - spin_lock_init(&d->opd_syn_lock); - init_waitqueue_head(&d->opd_syn_waitq); - init_waitqueue_head(&d->opd_syn_barrier_waitq); - init_waitqueue_head(&d->opd_syn_thread.t_ctl_waitq); - INIT_LIST_HEAD(&d->opd_syn_committed_there); - task = kthread_run(osp_sync_thread, d, "osp-syn-%u-%u", d->opd_index, d->opd_group); if (IS_ERR(task)) { @@ -1456,14 +1527,13 @@ int osp_sync_init(const struct lu_env *env, struct osp_device *d) GOTO(err_llog, rc); } - l_wait_event(d->opd_syn_thread.t_ctl_waitq, + l_wait_event(d->opd_sync_thread.t_ctl_waitq, osp_sync_running(d) || osp_sync_stopped(d), &lwi); RETURN(0); err_llog: osp_sync_llog_fini(env, d); err_id: - osp_sync_id_traction_fini(d); return rc; } @@ -1478,227 +1548,127 @@ err_id: */ int osp_sync_fini(struct osp_device *d) { - struct ptlrpc_thread *thread = &d->opd_syn_thread; + struct ptlrpc_thread *thread = &d->opd_sync_thread; ENTRY; - thread->t_flags = SVC_STOPPING; - wake_up(&d->opd_syn_waitq); - wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_STOPPED); - - /* - * unregister transaction callbacks only when sync thread - * has finished operations with llog - */ - osp_sync_id_traction_fini(d); + if (!thread_is_init(thread) && !thread_is_stopped(thread)) { + thread->t_flags = SVC_STOPPING; + wake_up(&d->opd_sync_waitq); + wait_event(thread->t_ctl_waitq, thread_is_stopped(thread)); + } RETURN(0); } -static DEFINE_MUTEX(osp_id_tracker_sem); -static struct list_head osp_id_tracker_list = - LIST_HEAD_INIT(osp_id_tracker_list); +struct osp_last_committed_cb { + struct dt_txn_commit_cb ospc_cb; + struct osp_device *ospc_dev; + __u64 ospc_transno; +}; -/** - * OSD commit callback. - * - * The function is used as a local OSD commit callback to track the highest - * committed llog record id. see osp_sync_id_traction_init() for the details. - * - * \param[in] th local transaction handle committed - * \param[in] cookie commit callback data (our private structure) - */ -static void osp_sync_tracker_commit_cb(struct thandle *th, void *cookie) +void osp_sync_local_commit_cb(struct lu_env *env, struct thandle *th, + struct dt_txn_commit_cb *dcb, int err) { - struct osp_id_tracker *tr = cookie; - struct osp_device *d; - struct osp_txn_info *txn; + struct osp_last_committed_cb *cb; + struct osp_device *d; - LASSERT(tr); + cb = container_of0(dcb, struct osp_last_committed_cb, ospc_cb); + d = cb->ospc_dev; - txn = osp_txn_info(&th->th_ctx); - if (txn == NULL || txn->oti_current_id < tr->otr_committed_id) - return; + CDEBUG(D_HA, "%s: %llu committed\n", d->opd_obd->obd_name, + cb->ospc_transno); - spin_lock(&tr->otr_lock); - if (likely(txn->oti_current_id > tr->otr_committed_id)) { - CDEBUG(D_OTHER, "committed: %u -> %u\n", - tr->otr_committed_id, txn->oti_current_id); - tr->otr_committed_id = txn->oti_current_id; + spin_lock(&d->opd_sync_lock); + if (cb->ospc_transno > d->opd_sync_last_committed_id) + d->opd_sync_last_committed_id = cb->ospc_transno; + spin_unlock(&d->opd_sync_lock); - list_for_each_entry(d, &tr->otr_wakeup_list, - opd_syn_ontrack) { - d->opd_syn_last_committed_id = tr->otr_committed_id; - wake_up(&d->opd_syn_waitq); - } - } - spin_unlock(&tr->otr_lock); + osp_sync_check_for_work(d); + lu_device_put(osp2lu_dev(d)); + if (atomic_dec_and_test(&d->opd_commits_registered)) + wake_up(&d->opd_sync_waitq); + + OBD_FREE_PTR(cb); } -/** - * Initialize commit tracking mechanism. - * - * Some setups may have thousands of OSTs and each will be represented by OSP. - * Meaning order of magnitute many more changes to apply every second. In order - * to keep the number of commit callbacks low this mechanism was introduced. - * The mechanism is very similar to transno used by MDT service: it's an single - * ID stream which can be assigned by any OSP to its llog records. The tricky - * part is that ID is stored in per-transaction data and re-used by all the OSPs - * involved in that transaction. Then all these OSPs are woken up utilizing a single OSD commit callback. - * - * The function initializes the data used by the tracker described above. - * A singler tracker per OSD device is created. - * - * \param[in] d OSP device - * - * \retval 0 on success - * \retval negative negated errno on error - */ -static int osp_sync_id_traction_init(struct osp_device *d) +static int osp_sync_add_commit_cb(const struct lu_env *env, + struct osp_device *d, struct thandle *th) { - struct osp_id_tracker *tr, *found = NULL; - int rc = 0; + struct osp_last_committed_cb *cb; + struct dt_txn_commit_cb *dcb; + int rc = 0; + + OBD_ALLOC_PTR(cb); + if (cb == NULL) + return -ENOMEM; + cb->ospc_dev = d; + dcb = &cb->ospc_cb; + dcb->dcb_func = osp_sync_local_commit_cb; + spin_lock(&d->opd_sync_lock); + cb->ospc_transno = ++d->opd_sync_last_used_id; + spin_unlock(&d->opd_sync_lock); + + rc = dt_trans_cb_add(th, dcb); + CDEBUG(D_HA, "%s: add commit cb at %lluns, next at %lluns, rc = %d\n", + d->opd_obd->obd_name, ktime_get_ns(), + ktime_to_ns(d->opd_sync_next_commit_cb), rc); - LASSERT(d); - LASSERT(d->opd_storage); - LASSERT(d->opd_syn_tracker == NULL); - INIT_LIST_HEAD(&d->opd_syn_ontrack); - - mutex_lock(&osp_id_tracker_sem); - list_for_each_entry(tr, &osp_id_tracker_list, otr_list) { - if (tr->otr_dev == d->opd_storage) { - LASSERT(atomic_read(&tr->otr_refcount)); - atomic_inc(&tr->otr_refcount); - d->opd_syn_tracker = tr; - found = tr; - break; - } - } - - if (found == NULL) { - rc = -ENOMEM; - OBD_ALLOC_PTR(tr); - if (tr) { - d->opd_syn_tracker = tr; - spin_lock_init(&tr->otr_lock); - tr->otr_dev = d->opd_storage; - tr->otr_next_id = 1; - tr->otr_committed_id = 0; - atomic_set(&tr->otr_refcount, 1); - INIT_LIST_HEAD(&tr->otr_wakeup_list); - list_add(&tr->otr_list, &osp_id_tracker_list); - tr->otr_tx_cb.dtc_txn_commit = - osp_sync_tracker_commit_cb; - tr->otr_tx_cb.dtc_cookie = tr; - tr->otr_tx_cb.dtc_tag = LCT_MD_THREAD; - dt_txn_callback_add(d->opd_storage, &tr->otr_tx_cb); - rc = 0; - } - } - mutex_unlock(&osp_id_tracker_sem); + if (likely(rc == 0)) { + lu_device_get(osp2lu_dev(d)); + atomic_inc(&d->opd_commits_registered); + } else + OBD_FREE_PTR(cb); return rc; } -/** - * Release commit tracker. - * - * Decrease a refcounter on the tracker used by the given OSP device \a d. - * If no more users left, then the tracker is released. - * - * \param[in] d OSP device - */ -static void osp_sync_id_traction_fini(struct osp_device *d) +/* add the commit callback every second */ +int osp_sync_add_commit_cb_1s(const struct lu_env *env, struct osp_device *d, + struct thandle *th) { - struct osp_id_tracker *tr; + ktime_t now = ktime_get(); + bool add = false; - ENTRY; - - LASSERT(d); - tr = d->opd_syn_tracker; - if (tr == NULL) { - EXIT; - return; - } - - osp_sync_remove_from_tracker(d); + /* fast path */ + if (ktime_before(now, d->opd_sync_next_commit_cb)) + return 0; - mutex_lock(&osp_id_tracker_sem); - if (atomic_dec_and_test(&tr->otr_refcount)) { - dt_txn_callback_del(d->opd_storage, &tr->otr_tx_cb); - LASSERT(list_empty(&tr->otr_wakeup_list)); - list_del(&tr->otr_list); - OBD_FREE_PTR(tr); - d->opd_syn_tracker = NULL; + spin_lock(&d->opd_sync_lock); + if (ktime_before(d->opd_sync_next_commit_cb, now)) { + add = true; + d->opd_sync_next_commit_cb = ktime_add_ns(now, NSEC_PER_SEC); } - mutex_unlock(&osp_id_tracker_sem); + spin_unlock(&d->opd_sync_lock); - EXIT; -} - -/** - * Generate a new ID on a tracker. - * - * Generates a new ID using the tracker associated with the given OSP device - * \a d, if the given ID \a id is non-zero. Unconditially adds OSP device to - * the wakeup list, so OSP won't miss when a transaction using the ID is - * committed. Notice ID is 32bit, but llog doesn't support >2^32 records anyway. - * - * \param[in] d OSP device - * \param[in] id 0 or ID generated previously - * - * \retval ID the caller should use - */ -static __u32 osp_sync_id_get(struct osp_device *d, __u32 id) -{ - struct osp_id_tracker *tr; - - tr = d->opd_syn_tracker; - LASSERT(tr); - - /* XXX: we can improve this introducing per-cpu preallocated ids? */ - spin_lock(&tr->otr_lock); - if (unlikely(tr->otr_next_id <= d->opd_syn_last_used_id)) { - spin_unlock(&tr->otr_lock); - CERROR("%s: next %u, last synced %lu\n", - d->opd_obd->obd_name, tr->otr_next_id, - d->opd_syn_last_used_id); - LBUG(); - } - - if (id == 0) - id = tr->otr_next_id++; - if (id > d->opd_syn_last_used_id) - d->opd_syn_last_used_id = id; - if (list_empty(&d->opd_syn_ontrack)) - list_add(&d->opd_syn_ontrack, &tr->otr_wakeup_list); - spin_unlock(&tr->otr_lock); - CDEBUG(D_OTHER, "new id %u\n", (unsigned) id); + if (!add) + return 0; - return id; + return osp_sync_add_commit_cb(env, d, th); } -/** - * Stop to propagate commit status to OSP. - * - * If the OSP does not have any llog records she's waiting to commit, then - * it is possible to unsubscribe from wakeups from the tracking using this - * method. - * - * \param[in] d OSP device not willing to wakeup +/* + * generate an empty transaction and hook the commit callback in + * then force transaction commit */ -static void osp_sync_remove_from_tracker(struct osp_device *d) +void osp_sync_force(const struct lu_env *env, struct osp_device *d) { - struct osp_id_tracker *tr; - - tr = d->opd_syn_tracker; - LASSERT(tr); + struct thandle *th; + int rc; - if (list_empty(&d->opd_syn_ontrack)) + th = dt_trans_create(env, d->opd_storage); + if (IS_ERR(th)) { + CERROR("%s: can't sync\n", d->opd_obd->obd_name); return; + } + rc = dt_trans_start_local(env, d->opd_storage, th); + if (rc == 0) { + CDEBUG(D_OTHER, "%s: sync forced, %d changes\n", + d->opd_obd->obd_name, + atomic_read(&d->opd_sync_changes)); + rc = osp_sync_add_commit_cb(env, d, th); + dt_trans_stop(env, d->opd_storage, th); + } - spin_lock(&tr->otr_lock); - list_del_init(&d->opd_syn_ontrack); - spin_unlock(&tr->otr_lock); + dt_commit_async(env, d->opd_storage); } -