X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fosp%2Fosp_sync.c;h=f0d785a6fe3456ef5f14b6ceb418b8e905cc05b9;hb=b046468f58a1f40e85cb59ed9abf75fd2fd5ea5a;hp=298626d5b8715b8e977b77f8c474f9b03559941c;hpb=294e507d34de0d7990ec507334f71ff1d1a7db01;p=fs%2Flustre-release.git diff --git a/lustre/osp/osp_sync.c b/lustre/osp/osp_sync.c index 298626d..f0d785a 100644 --- a/lustre/osp/osp_sync.c +++ b/lustre/osp/osp_sync.c @@ -27,7 +27,7 @@ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2012, 2013, Intel Corporation. + * Copyright (c) 2012, 2014, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -43,7 +43,9 @@ #define DEBUG_SUBSYSTEM S_MDS +#include #include +#include #include "osp_internal.h" static int osp_sync_id_traction_init(struct osp_device *d); @@ -93,16 +95,39 @@ static void osp_sync_remove_from_tracker(struct osp_device *d); #define OSP_JOB_MAGIC 0x26112005 +struct osp_job_req_args { + /** bytes reserved for ptlrpc_replay_req() */ + struct ptlrpc_replay_async_args jra_raa; + struct list_head jra_link; + __u32 jra_magic; +}; + static inline int osp_sync_running(struct osp_device *d) { return !!(d->opd_syn_thread.t_flags & SVC_RUNNING); } +/** + * Check status: whether OSP thread has stopped + * + * \param[in] d OSP device + * + * \retval 0 still running + * \retval 1 stopped + */ static inline int osp_sync_stopped(struct osp_device *d) { return !!(d->opd_syn_thread.t_flags & SVC_STOPPED); } +/* + ** Check for new changes to sync + * + * \param[in] d OSP device + * + * \retval 1 there are changes + * \retval 0 there are no changes + */ static inline int osp_sync_has_new_job(struct osp_device *d) { return ((d->opd_syn_last_processed_id < d->opd_syn_last_used_id) && @@ -115,11 +140,27 @@ static inline int osp_sync_low_in_progress(struct osp_device *d) return d->opd_syn_rpc_in_progress < d->opd_syn_max_rpc_in_progress; } +/** + * Check for room in the network pipe to OST + * + * \param[in] d OSP device + * + * \retval 1 there is room + * \retval 0 no room, the pipe is full + */ static inline int osp_sync_low_in_flight(struct osp_device *d) { return d->opd_syn_rpc_in_flight < d->opd_syn_max_rpc_in_flight; } +/** + * Wake up check for the main sync thread + * + * \param[in] d OSP device + * + * \retval 1 time to wake up + * \retval 0 no need to wake up + */ static inline int osp_sync_has_work(struct osp_device *d) { /* has new/old changes and low in-progress? */ @@ -128,7 +169,7 @@ static inline int osp_sync_has_work(struct osp_device *d) return 1; /* has remotely committed? */ - if (!cfs_list_empty(&d->opd_syn_committed_there)) + if (!list_empty(&d->opd_syn_committed_there)) return 1; return 0; @@ -146,11 +187,27 @@ void __osp_sync_check_for_work(struct osp_device *d) osp_sync_check_for_work(d); } +/** + * Check and return ready-for-new status. + * + * The thread processing llog record uses this function to check whether + * it's time to take another record and process it. The number of conditions + * must be met: the connection should be ready, RPCs in flight not exceeding + * the limit, the record is committed locally, etc (see the lines below). + * + * \param[in] d OSP device + * \param[in] rec next llog record to process + * + * \retval 0 not ready + * \retval 1 ready + */ static inline int osp_sync_can_process_new(struct osp_device *d, struct llog_rec_hdr *rec) { LASSERT(d); + if (unlikely(atomic_read(&d->opd_syn_barrier) > 0)) + return 0; if (!osp_sync_low_in_progress(d)) return 0; if (!osp_sync_low_in_flight(d)) @@ -166,19 +223,38 @@ static inline int osp_sync_can_process_new(struct osp_device *d, return 0; } +/** + * Declare intention to add a new change. + * + * With regard to OSD API, we have to declare any changes ahead. In this + * case we declare an intention to add a llog record representing the + * change on the local storage. + * + * \param[in] env LU environment provided by the caller + * \param[in] o OSP object + * \param[in] type type of change: MDS_UNLINK64_REC or MDS_SETATTR64_REC + * \param[in] th transaction handle (local) + * + * \retval 0 on success + * \retval negative negated errno on error + */ int osp_sync_declare_add(const struct lu_env *env, struct osp_object *o, llog_op_type type, struct thandle *th) { struct osp_thread_info *osi = osp_env_info(env); struct osp_device *d = lu2osp_dev(o->opo_obj.do_lu.lo_dev); struct llog_ctxt *ctxt; + struct thandle *storage_th; int rc; ENTRY; /* it's a layering violation, to access internals of th, * but we can do this as a sanity check, for a while */ - LASSERT(th->th_dev == d->opd_storage); + LASSERT(th->th_top != NULL); + storage_th = thandle_get_sub_by_dt(env, th->th_top, d->opd_storage); + if (IS_ERR(storage_th)) + RETURN(PTR_ERR(storage_th)); switch (type) { case MDS_UNLINK64_REC: @@ -192,17 +268,39 @@ int osp_sync_declare_add(const struct lu_env *env, struct osp_object *o, } /* we want ->dt_trans_start() to allocate per-thandle structure */ - th->th_tags |= LCT_OSP_THREAD; + storage_th->th_tags |= LCT_OSP_THREAD; ctxt = llog_get_context(d->opd_obd, LLOG_MDS_OST_ORIG_CTXT); LASSERT(ctxt); - rc = llog_declare_add(env, ctxt->loc_handle, &osi->osi_hdr, th); + rc = llog_declare_add(env, ctxt->loc_handle, &osi->osi_hdr, + storage_th); llog_ctxt_put(ctxt); RETURN(rc); } +/** + * Generate a llog record for a given change. + * + * Generates a llog record for the change passed. The change can be of two + * types: unlink and setattr. The record gets an ID which later will be + * used to track commit status of the change. For unlink changes, the caller + * can supply a starting FID and the count of the objects to destroy. For + * setattr the caller should apply attributes to apply. + * + * + * \param[in] env LU environment provided by the caller + * \param[in] d OSP device + * \param[in] fid fid of the object the change should be applied to + * \param[in] type type of change: MDS_UNLINK64_REC or MDS_SETATTR64_REC + * \param[in] count count of objects to destroy + * \param[in] th transaction handle (local) + * \param[in] attr attributes for setattr + * + * \retval 0 on success + * \retval negative negated errno on error + */ static int osp_sync_add_rec(const struct lu_env *env, struct osp_device *d, const struct lu_fid *fid, llog_op_type type, int count, struct thandle *th, @@ -211,13 +309,17 @@ static int osp_sync_add_rec(const struct lu_env *env, struct osp_device *d, struct osp_thread_info *osi = osp_env_info(env); struct llog_ctxt *ctxt; struct osp_txn_info *txn; + struct thandle *storage_th; int rc; ENTRY; /* it's a layering violation, to access internals of th, * but we can do this as a sanity check, for a while */ - LASSERT(th->th_dev == d->opd_storage); + LASSERT(th->th_top != NULL); + storage_th = thandle_get_sub_by_dt(env, th->th_top, d->opd_storage); + if (IS_ERR(storage_th)) + RETURN(PTR_ERR(storage_th)); switch (type) { case MDS_UNLINK64_REC: @@ -235,12 +337,15 @@ static int osp_sync_add_rec(const struct lu_env *env, struct osp_device *d, LASSERT(attr); osi->osi_setattr.lsr_uid = attr->la_uid; osi->osi_setattr.lsr_gid = attr->la_gid; + osi->osi_setattr.lsr_valid = + ((attr->la_valid & LA_UID) ? OBD_MD_FLUID : 0) | + ((attr->la_valid & LA_GID) ? OBD_MD_FLGID : 0); break; default: LBUG(); } - txn = osp_txn_info(&th->th_ctx); + txn = osp_txn_info(&storage_th->th_ctx); LASSERT(txn); txn->oti_current_id = osp_sync_id_get(d, txn->oti_current_id); @@ -249,25 +354,23 @@ static int osp_sync_add_rec(const struct lu_env *env, struct osp_device *d, ctxt = llog_get_context(d->opd_obd, LLOG_MDS_OST_ORIG_CTXT); if (ctxt == NULL) RETURN(-ENOMEM); + rc = llog_add(env, ctxt->loc_handle, &osi->osi_hdr, &osi->osi_cookie, - NULL, th); + storage_th); llog_ctxt_put(ctxt); - CDEBUG(D_OTHER, "%s: new record "DOSTID":%lu/%lu: %d\n", - d->opd_obd->obd_name, POSTID(&osi->osi_cookie.lgc_lgl.lgl_oi), - (unsigned long) osi->osi_cookie.lgc_lgl.lgl_ogen, - (unsigned long) osi->osi_cookie.lgc_index, rc); - - if (rc > 0) - rc = 0; - - if (likely(rc == 0)) { + if (likely(rc >= 0)) { + CDEBUG(D_OTHER, "%s: new record "DOSTID":%lu/%lu: %d\n", + d->opd_obd->obd_name, + POSTID(&osi->osi_cookie.lgc_lgl.lgl_oi), + (unsigned long)osi->osi_cookie.lgc_lgl.lgl_ogen, + (unsigned long)osi->osi_cookie.lgc_index, rc); spin_lock(&d->opd_syn_lock); d->opd_syn_changes++; spin_unlock(&d->opd_syn_lock); } - - RETURN(rc); + /* return 0 always here, error case just cause no llog record */ + RETURN(0); } int osp_sync_add(const struct lu_env *env, struct osp_object *o, @@ -280,7 +383,7 @@ int osp_sync_add(const struct lu_env *env, struct osp_object *o, } int osp_sync_gap(const struct lu_env *env, struct osp_device *d, - struct lu_fid *fid, int lost, struct thandle *th) + struct lu_fid *fid, int lost, struct thandle *th) { return osp_sync_add_rec(env, d, fid, MDS_UNLINK64_REC, lost, th, NULL); } @@ -298,18 +401,23 @@ int osp_sync_gap(const struct lu_env *env, struct osp_device *d, * subsequent commit callback (at the most) */ -/* - * called for each atomic on-disk change (not once per transaction batch) - * and goes over the list - * XXX: should be optimized? - */ - /** - * called for each RPC reported committed + * ptlrpc commit callback. + * + * The callback is called by PTLRPC when a RPC is reported committed by the + * target (OST). We register the callback for the every RPC applying a change + * from the llog. This way we know then the llog records can be cancelled. + * Notice the callback can be called when OSP is finishing. We can detect this + * checking that actual transno in the request is less or equal of known + * committed transno (see osp_sync_process_committed() for the details). + * XXX: this is pretty expensive and can be improved later using batching. + * + * \param[in] req request */ static void osp_sync_request_commit_cb(struct ptlrpc_request *req) { struct osp_device *d = req->rq_cb_data; + struct osp_job_req_args *jra; CDEBUG(D_HA, "commit req %p, transno "LPU64"\n", req, req->rq_transno); @@ -318,29 +426,49 @@ static void osp_sync_request_commit_cb(struct ptlrpc_request *req) /* do not do any opd_dyn_rpc_* accounting here * it's done in osp_sync_interpret sooner or later */ - LASSERT(d); - LASSERT(req->rq_svc_thread == (void *) OSP_JOB_MAGIC); - LASSERT(cfs_list_empty(&req->rq_exp_list)); + + jra = ptlrpc_req_async_args(req); + LASSERT(jra->jra_magic == OSP_JOB_MAGIC); + LASSERT(list_empty(&jra->jra_link)); ptlrpc_request_addref(req); spin_lock(&d->opd_syn_lock); - cfs_list_add(&req->rq_exp_list, &d->opd_syn_committed_there); + list_add(&jra->jra_link, &d->opd_syn_committed_there); spin_unlock(&d->opd_syn_lock); /* XXX: some batching wouldn't hurt */ wake_up(&d->opd_syn_waitq); } +/** + * RPC interpretation callback. + * + * The callback is called by ptlrpc when RPC is replied. Now we have to decide + * whether we should: + * - put request on a special list to wait until it's committed by the target, + * if the request is successful + * - schedule llog record cancel if no target object is found + * - try later (essentially after reboot) in case of unexpected error + * + * \param[in] env LU environment provided by the caller + * \param[in] req request replied + * \param[in] aa callback data + * \param[in] rc result of RPC + * + * \retval 0 always + */ static int osp_sync_interpret(const struct lu_env *env, struct ptlrpc_request *req, void *aa, int rc) { struct osp_device *d = req->rq_cb_data; + struct osp_job_req_args *jra = aa; - if (req->rq_svc_thread != (void *) OSP_JOB_MAGIC) - DEBUG_REQ(D_ERROR, req, "bad magic %p\n", req->rq_svc_thread); - LASSERT(req->rq_svc_thread == (void *) OSP_JOB_MAGIC); + if (jra->jra_magic != OSP_JOB_MAGIC) { + DEBUG_REQ(D_ERROR, req, "bad magic %u\n", jra->jra_magic); + LBUG(); + } LASSERT(d); CDEBUG(D_HA, "reply req %p/%d, rc %d, transno %u\n", req, @@ -354,12 +482,12 @@ static int osp_sync_interpret(const struct lu_env *env, * but object doesn't exist anymore - cancell llog record */ LASSERT(req->rq_transno == 0); - LASSERT(cfs_list_empty(&req->rq_exp_list)); + LASSERT(list_empty(&jra->jra_link)); ptlrpc_request_addref(req); spin_lock(&d->opd_syn_lock); - cfs_list_add(&req->rq_exp_list, &d->opd_syn_committed_there); + list_add(&jra->jra_link, &d->opd_syn_committed_there); spin_unlock(&d->opd_syn_lock); wake_up(&d->opd_syn_waitq); @@ -398,6 +526,8 @@ static int osp_sync_interpret(const struct lu_env *env, spin_lock(&d->opd_syn_lock); d->opd_syn_rpc_in_flight--; spin_unlock(&d->opd_syn_lock); + if (unlikely(atomic_read(&d->opd_syn_barrier) > 0)) + wake_up(&d->opd_syn_barrier_waitq); CDEBUG(D_OTHER, "%s: %d in flight, %d in progress\n", d->opd_obd->obd_name, d->opd_syn_rpc_in_flight, d->opd_syn_rpc_in_progress); @@ -408,18 +538,45 @@ static int osp_sync_interpret(const struct lu_env *env, } /* - * the function walks through list of committed locally changes - * and send them to RPC until the pipe is full + ** Add request to ptlrpc queue. + * + * This is just a tiny helper function to put the request on the sending list + * + * \param[in] d OSP device + * \param[in] req request */ static void osp_sync_send_new_rpc(struct osp_device *d, struct ptlrpc_request *req) { + struct osp_job_req_args *jra; + LASSERT(d->opd_syn_rpc_in_flight <= d->opd_syn_max_rpc_in_flight); - LASSERT(req->rq_svc_thread == (void *) OSP_JOB_MAGIC); - ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1); + jra = ptlrpc_req_async_args(req); + jra->jra_magic = OSP_JOB_MAGIC; + INIT_LIST_HEAD(&jra->jra_link); + + ptlrpcd_add_req(req); } + +/** + * Allocate and prepare RPC for a new change. + * + * The function allocates and initializes an RPC which will be sent soon to + * apply the change to the target OST. The request is initialized from the + * llog record passed. Notice only the fields common to all type of changes + * are initialized. + * + * \param[in] d OSP device + * \param[in] llh llog handle where the record is stored + * \param[in] h llog record + * \param[in] op type of the change + * \param[in] format request format to be used + * + * \retval pointer new request on success + * \retval ERR_PTR(errno) on error + */ static struct ptlrpc_request *osp_sync_new_job(struct osp_device *d, struct llog_handle *llh, struct llog_rec_hdr *h, @@ -455,8 +612,6 @@ static struct ptlrpc_request *osp_sync_new_job(struct osp_device *d, body->oa.o_lcookie.lgc_lgl = llh->lgh_id; body->oa.o_lcookie.lgc_subsys = LLOG_MDS_OST_ORIG_CTXT; body->oa.o_lcookie.lgc_index = h->lrh_index; - CFS_INIT_LIST_HEAD(&req->rq_exp_list); - req->rq_svc_thread = (void *) OSP_JOB_MAGIC; req->rq_interpret_reply = osp_sync_interpret; req->rq_commit_cb = osp_sync_request_commit_cb; @@ -467,6 +622,19 @@ static struct ptlrpc_request *osp_sync_new_job(struct osp_device *d, return req; } +/** + * Generate a request for setattr change. + * + * The function prepares a new RPC, initializes it with setattr specific + * bits and send the RPC. + * + * \param[in] d OSP device + * \param[in] llh llog handle where the record is stored + * \param[in] h llog record + * + * \retval 0 on success + * \retval negative negated errno on error + */ static int osp_sync_new_setattr_job(struct osp_device *d, struct llog_handle *llh, struct llog_rec_hdr *h) @@ -478,6 +646,16 @@ static int osp_sync_new_setattr_job(struct osp_device *d, ENTRY; LASSERT(h->lrh_type == MDS_SETATTR64_REC); + /* lsr_valid can only be 0 or have OBD_MD_{FLUID,FLGID} set, + * so no bits other than these should be set. */ + if ((rec->lsr_valid & ~(OBD_MD_FLUID | OBD_MD_FLGID)) != 0) { + CERROR("%s: invalid setattr record, lsr_valid:"LPU64"\n", + d->opd_obd->obd_name, rec->lsr_valid); + /* return 0 so that sync thread can continue processing + * other records. */ + RETURN(0); + } + req = osp_sync_new_job(d, llh, h, OST_SETATTR, &RQF_OST_SETATTR); if (IS_ERR(req)) RETURN(PTR_ERR(req)); @@ -487,14 +665,33 @@ static int osp_sync_new_setattr_job(struct osp_device *d, body->oa.o_oi = rec->lsr_oi; body->oa.o_uid = rec->lsr_uid; body->oa.o_gid = rec->lsr_gid; - body->oa.o_valid = OBD_MD_FLGROUP | OBD_MD_FLID | - OBD_MD_FLUID | OBD_MD_FLGID; + body->oa.o_valid = OBD_MD_FLGROUP | OBD_MD_FLID; + /* old setattr record (prior 2.6.0) doesn't have 'valid' stored, + * we assume that both UID and GID are valid in that case. */ + if (rec->lsr_valid == 0) + body->oa.o_valid |= (OBD_MD_FLUID | OBD_MD_FLGID); + else + body->oa.o_valid |= rec->lsr_valid; osp_sync_send_new_rpc(d, req); - RETURN(0); + RETURN(1); } -/* Old records may be in old format, so we handle that too */ +/** + * Generate a request for unlink change. + * + * The function prepares a new RPC, initializes it with unlink(destroy) + * specific bits and sends the RPC. The function is used to handle + * llog_unlink_rec which were used in the older versions of Lustre. + * Current version uses llog_unlink_rec64. + * + * \param[in] d OSP device + * \param[in] llh llog handle where the record is stored + * \param[in] h llog record + * + * \retval 0 on success + * \retval negative negated errno on error + */ static int osp_sync_new_unlink_job(struct osp_device *d, struct llog_handle *llh, struct llog_rec_hdr *h) @@ -520,72 +717,28 @@ static int osp_sync_new_unlink_job(struct osp_device *d, body->oa.o_valid |= OBD_MD_FLOBJCOUNT; osp_sync_send_new_rpc(d, req); - RETURN(0); -} - -static int osp_prep_unlink_update_req(const struct lu_env *env, - struct osp_device *osp, - struct llog_handle *llh, - struct llog_rec_hdr *h, - struct ptlrpc_request **reqp) -{ - struct llog_unlink64_rec *rec = (struct llog_unlink64_rec *)h; - struct dt_update_request *update = NULL; - struct ptlrpc_request *req; - const char *buf; - struct llog_cookie lcookie; - int size; - int rc; - ENTRY; - - update = out_create_update_req(&osp->opd_dt_dev); - if (IS_ERR(update)) - RETURN(PTR_ERR(update)); - - /* This can only happens for unlink slave directory, so decrease - * ref for ".." and "." */ - rc = out_insert_update(env, update, OUT_REF_DEL, &rec->lur_fid, 0, - NULL, NULL); - if (rc != 0) - GOTO(out, rc); - - rc = out_insert_update(env, update, OUT_REF_DEL, &rec->lur_fid, 0, - NULL, NULL); - if (rc != 0) - GOTO(out, rc); - - lcookie.lgc_lgl = llh->lgh_id; - lcookie.lgc_subsys = LLOG_MDS_OST_ORIG_CTXT; - lcookie.lgc_index = h->lrh_index; - size = sizeof(lcookie); - buf = (const char *)&lcookie; - - rc = out_insert_update(env, update, OUT_DESTROY, &rec->lur_fid, 1, - &size, &buf); - if (rc != 0) - GOTO(out, rc); - - rc = out_prep_update_req(env, osp->opd_obd->u.cli.cl_import, - update->dur_req, &req); - if (rc != 0) - GOTO(out, rc); - - INIT_LIST_HEAD(&req->rq_exp_list); - req->rq_svc_thread = (void *)OSP_JOB_MAGIC; - - req->rq_interpret_reply = osp_sync_interpret; - req->rq_commit_cb = osp_sync_request_commit_cb; - req->rq_cb_data = osp; - - ptlrpc_request_set_replen(req); - *reqp = req; -out: - if (update != NULL) - out_destroy_update_req(update); - - RETURN(rc); + RETURN(1); } +/** + * Generate a request for unlink change. + * + * The function prepares a new RPC, initializes it with unlink(destroy) + * specific bits and sends the RPC. Depending on the target (MDT or OST) + * two different protocols are used. For MDT we use OUT (basically OSD API + * updates transferred via a network). For OST we still use the old + * protocol (OBD?), originally for compatibility. Later we can start to + * use OUT for OST as well, this will allow batching and better code + * unification. + * + * \param[in] env LU environment provided by the caller + * \param[in] d OSP device + * \param[in] llh llog handle where the record is stored + * \param[in] h llog record + * + * \retval 0 on success + * \retval negative negated errno on error + */ static int osp_sync_new_unlink64_job(const struct lu_env *env, struct osp_device *d, struct llog_handle *llh, @@ -598,31 +751,43 @@ static int osp_sync_new_unlink64_job(const struct lu_env *env, ENTRY; LASSERT(h->lrh_type == MDS_UNLINK64_REC); + req = osp_sync_new_job(d, llh, h, OST_DESTROY, + &RQF_OST_DESTROY); + if (IS_ERR(req)) + RETURN(PTR_ERR(req)); - if (d->opd_connect_mdt) { - rc = osp_prep_unlink_update_req(env, d, llh, h, &req); - if (rc != 0) - RETURN(rc); - } else { - req = osp_sync_new_job(d, llh, h, OST_DESTROY, - &RQF_OST_DESTROY); - if (IS_ERR(req)) - RETURN(PTR_ERR(req)); - - body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); - if (body == NULL) - RETURN(-EFAULT); - rc = fid_to_ostid(&rec->lur_fid, &body->oa.o_oi); - if (rc < 0) - RETURN(rc); - body->oa.o_misc = rec->lur_count; - body->oa.o_valid = OBD_MD_FLGROUP | OBD_MD_FLID | - OBD_MD_FLOBJCOUNT; - } + body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); + if (body == NULL) + RETURN(-EFAULT); + rc = fid_to_ostid(&rec->lur_fid, &body->oa.o_oi); + if (rc < 0) + RETURN(rc); + body->oa.o_misc = rec->lur_count; + body->oa.o_valid = OBD_MD_FLGROUP | OBD_MD_FLID | + OBD_MD_FLOBJCOUNT; osp_sync_send_new_rpc(d, req); - RETURN(0); + RETURN(1); } +/** + * Process llog records. + * + * This function is called to process the llog records committed locally. + * In the recovery model used by OSP we can apply a change to a remote + * target once corresponding transaction (like posix unlink) is committed + * locally so can't revert. + * Depending on the llog record type, a given handler is called that is + * responsible for preparing and sending the RPC to apply the change. + * Special record type LLOG_GEN_REC marking a reboot is cancelled right away. + * + * \param[in] env LU environment provided by the caller + * \param[in] d OSP device + * \param[in] llh llog handle where the record is stored + * \param[in] rec llog record + * + * \retval 0 on success + * \retval negative negated errno on error + */ static int osp_sync_process_record(const struct lu_env *env, struct osp_device *d, struct llog_handle *llh, @@ -680,10 +845,10 @@ static int osp_sync_process_record(const struct lu_env *env, CERROR("%s: unknown record type: %x\n", d->opd_obd->obd_name, rec->lrh_type); /* we should continue processing */ - return 0; } - if (likely(rc == 0)) { + /* rc > 0 means sync RPC being added to the queue */ + if (likely(rc > 0)) { spin_lock(&d->opd_syn_lock); if (d->opd_syn_prev_done) { LASSERT(d->opd_syn_changes > 0); @@ -692,8 +857,10 @@ static int osp_sync_process_record(const struct lu_env *env, * NOTE: it's possible to meet same id if * OST stores few stripes of same file */ - if (rec->lrh_id > d->opd_syn_last_processed_id) + if (rec->lrh_id > d->opd_syn_last_processed_id) { d->opd_syn_last_processed_id = rec->lrh_id; + wake_up(&d->opd_syn_barrier_waitq); + } d->opd_syn_changes--; } @@ -701,6 +868,7 @@ static int osp_sync_process_record(const struct lu_env *env, d->opd_obd->obd_name, d->opd_syn_rpc_in_flight, d->opd_syn_rpc_in_progress); spin_unlock(&d->opd_syn_lock); + rc = 0; } else { spin_lock(&d->opd_syn_lock); d->opd_syn_rpc_in_flight--; @@ -713,21 +881,31 @@ static int osp_sync_process_record(const struct lu_env *env, return rc; } +/** + * Cancel llog records for the committed changes. + * + * The function walks through the list of the committed RPCs and cancels + * corresponding llog records. see osp_sync_request_commit_cb() for the + * details. + * + * \param[in] env LU environment provided by the caller + * \param[in] d OSP device + */ static void osp_sync_process_committed(const struct lu_env *env, struct osp_device *d) { struct obd_device *obd = d->opd_obd; struct obd_import *imp = obd->u.cli.cl_import; struct ost_body *body; - struct ptlrpc_request *req, *tmp; + struct ptlrpc_request *req; struct llog_ctxt *ctxt; struct llog_handle *llh; - cfs_list_t list; + struct list_head list; int rc, done = 0; ENTRY; - if (cfs_list_empty(&d->opd_syn_committed_there)) + if (list_empty(&d->opd_syn_committed_there)) return; /* @@ -752,39 +930,26 @@ static void osp_sync_process_committed(const struct lu_env *env, llh = ctxt->loc_handle; LASSERT(llh); - CFS_INIT_LIST_HEAD(&list); + INIT_LIST_HEAD(&list); spin_lock(&d->opd_syn_lock); - cfs_list_splice(&d->opd_syn_committed_there, &list); - CFS_INIT_LIST_HEAD(&d->opd_syn_committed_there); + list_splice(&d->opd_syn_committed_there, &list); + INIT_LIST_HEAD(&d->opd_syn_committed_there); spin_unlock(&d->opd_syn_lock); - cfs_list_for_each_entry_safe(req, tmp, &list, rq_exp_list) { + while (!list_empty(&list)) { struct llog_cookie *lcookie = NULL; - - LASSERT(req->rq_svc_thread == (void *) OSP_JOB_MAGIC); - cfs_list_del_init(&req->rq_exp_list); - - if (d->opd_connect_mdt) { - struct object_update_request *ureq; - struct object_update *update; - ureq = req_capsule_client_get(&req->rq_pill, - &RMF_OUT_UPDATE); - LASSERT(ureq != NULL && - ureq->ourq_magic == UPDATE_REQUEST_MAGIC); - - /* 1st/2nd is for decref . and .., 3rd one is for - * destroy, where the log cookie is stored. - * See osp_prep_unlink_update_req */ - update = object_update_request_get(ureq, 2, NULL); - LASSERT(update != NULL); - lcookie = object_update_param_get(update, 0, NULL); - LASSERT(lcookie != NULL); - } else { - body = req_capsule_client_get(&req->rq_pill, - &RMF_OST_BODY); - LASSERT(body); - lcookie = &body->oa.o_lcookie; - } + struct osp_job_req_args *jra; + + jra = list_entry(list.next, struct osp_job_req_args, jra_link); + LASSERT(jra->jra_magic == OSP_JOB_MAGIC); + list_del_init(&jra->jra_link); + + req = container_of((void *)jra, struct ptlrpc_request, + rq_async_args); + body = req_capsule_client_get(&req->rq_pill, + &RMF_OST_BODY); + LASSERT(body); + lcookie = &body->oa.o_lcookie; /* import can be closing, thus all commit cb's are * called we can check committness directly */ if (req->rq_transno <= imp->imp_peer_committed_transno) { @@ -820,8 +985,21 @@ static void osp_sync_process_committed(const struct lu_env *env, EXIT; } -/* - * this is where most of queues processing happens +/** + * The core of the syncing mechanism. + * + * This is a callback called by the llog processing function. Essentially it + * suspends llog processing until there is a record to process (it's supposed + * to be committed locally). The function handles RPCs committed by the target + * and cancels corresponding llog records. + * + * \param[in] env LU environment provided by the caller + * \param[in] llh llog handle we're processing + * \param[in] rec current llog record + * \param[in] data callback data containing a pointer to the device + * + * \retval 0 to ask the caller (llog_process()) to continue + * \retval LLOG_PROC_BREAK to ask the caller to break */ static int osp_sync_process_queues(const struct lu_env *env, struct llog_handle *llh, @@ -847,7 +1025,8 @@ static int osp_sync_process_queues(const struct lu_env *env, if (osp_sync_can_process_new(d, rec)) { if (llh == NULL) { /* ask llog for another record */ - CDEBUG(D_HA, "%lu changes, %u in progress, %u in flight\n", + CDEBUG(D_HA, "%lu changes, %u in progress," + " %u in flight\n", d->opd_syn_changes, d->opd_syn_rpc_in_progress, d->opd_syn_rpc_in_flight); @@ -884,13 +1063,15 @@ static int osp_sync_process_queues(const struct lu_env *env, l_wait_event(d->opd_syn_waitq, !osp_sync_running(d) || osp_sync_can_process_new(d, rec) || - !cfs_list_empty(&d->opd_syn_committed_there), + !list_empty(&d->opd_syn_committed_there), &lwi); } while (1); } -/* - * this thread runs llog_cat_process() scanner calling our callback +/** + * OSP sync thread. + * + * This thread runs llog_cat_process() scanner calling our callback * to process llog records. in the callback we implement tricky * state machine as we don't want to start scanning of the llog again * and again, also we don't want to process too many records and send @@ -898,9 +1079,14 @@ static int osp_sync_process_queues(const struct lu_env *env, * being synced to OST) the callback can suspend awaiting for some * new conditions, like syncs completed. * - * in order to process llog records left by previous boots and to allow + * In order to process llog records left by previous boots and to allow * llog_process_thread() to find something (otherwise it'd just exit * immediately) we add a special GENERATATION record on each boot. + * + * \param[in] _arg a pointer to thread's arguments + * + * \retval 0 on success + * \retval negative negated errno on error */ static int osp_sync_thread(void *_arg) { @@ -966,7 +1152,7 @@ static int osp_sync_thread(void *_arg) LASSERTF(count < 10, "%s: %d %d %sempty\n", d->opd_obd->obd_name, d->opd_syn_rpc_in_progress, d->opd_syn_rpc_in_flight, - cfs_list_empty(&d->opd_syn_committed_there) ? "" :"!"); + list_empty(&d->opd_syn_committed_there) ? "" : "!"); } @@ -979,7 +1165,7 @@ out: "%s: %d %d %sempty\n", d->opd_obd->obd_name, d->opd_syn_rpc_in_progress, d->opd_syn_rpc_in_flight, - cfs_list_empty(&d->opd_syn_committed_there) ? "" : "!"); + list_empty(&d->opd_syn_committed_there) ? "" : "!"); thread->t_flags = SVC_STOPPED; @@ -990,6 +1176,20 @@ out: RETURN(0); } +/** + * Initialize llog. + * + * Initializes the llog. Specific llog to be used depends on the type of the + * target OSP represents (OST or MDT). The function adds appends a new llog + * record to mark the place where the records associated with this boot + * start. + * + * \param[in] env LU environment provided by the caller + * \param[in] d OSP device + * + * \retval 0 on success + * \retval negative negated errno on error + */ static int osp_sync_llog_init(const struct lu_env *env, struct osp_device *d) { struct osp_thread_info *osi = osp_env_info(env); @@ -1009,17 +1209,23 @@ static int osp_sync_llog_init(const struct lu_env *env, struct osp_device *d) OBD_SET_CTXT_MAGIC(&obd->obd_lvfs_ctxt); obd->obd_lvfs_ctxt.dt = d->opd_storage; - if (d->opd_connect_mdt) - lu_local_obj_fid(fid, SLAVE_LLOG_CATALOGS_OID); - else - lu_local_obj_fid(fid, LLOG_CATALOGS_OID); + lu_local_obj_fid(fid, LLOG_CATALOGS_OID); rc = llog_osd_get_cat_list(env, d->opd_storage, d->opd_index, 1, &osi->osi_cid, fid); - if (rc) { - CERROR("%s: can't get id from catalogs: rc = %d\n", - obd->obd_name, rc); - RETURN(rc); + if (rc < 0) { + if (rc != -EFAULT) { + CERROR("%s: can't get id from catalogs: rc = %d\n", + obd->obd_name, rc); + RETURN(rc); + } + + /* After sparse OST indices is supported, the CATALOG file + * may become a sparse file that results in failure on + * reading. Skip this error as the llog will be created + * later */ + memset(&osi->osi_cid, 0, sizeof(osi->osi_cid)); + rc = 0; } CDEBUG(D_INFO, "%s: Init llog for %d - catid "DOSTID":%x\n", @@ -1027,7 +1233,8 @@ static int osp_sync_llog_init(const struct lu_env *env, struct osp_device *d) POSTID(&osi->osi_cid.lci_logid.lgl_oi), osi->osi_cid.lci_logid.lgl_ogen); - rc = llog_setup(env, obd, &obd->obd_olg, LLOG_MDS_OST_ORIG_CTXT, obd, + rc = llog_setup(env, obd, &obd->obd_olg, LLOG_MDS_OST_ORIG_CTXT, + d->opd_storage->dd_lu_dev.ld_obd, &osp_mds_ost_orig_logops); if (rc) RETURN(rc); @@ -1077,8 +1284,7 @@ static int osp_sync_llog_init(const struct lu_env *env, struct osp_device *d) memcpy(&osi->osi_gen.lgr_gen, &d->opd_syn_generation, sizeof(osi->osi_gen.lgr_gen)); - rc = llog_cat_add(env, lgh, &osi->osi_gen.lgr_hdr, &osi->osi_cookie, - NULL); + rc = llog_cat_add(env, lgh, &osi->osi_gen.lgr_hdr, &osi->osi_cookie); if (rc < 0) GOTO(out_close, rc); llog_ctxt_put(ctxt); @@ -1090,6 +1296,15 @@ out_cleanup: RETURN(rc); } +/** + * Cleanup llog used for syncing. + * + * Closes and cleanups the llog. The function is called when the device is + * shutting down. + * + * \param[in] env LU environment provided by the caller + * \param[in] d OSP device + */ static void osp_sync_llog_fini(const struct lu_env *env, struct osp_device *d) { struct llog_ctxt *ctxt; @@ -1100,12 +1315,22 @@ static void osp_sync_llog_fini(const struct lu_env *env, struct osp_device *d) llog_cleanup(env, ctxt); } -/* - * initializes sync component of OSP +/** + * Initialization of the sync component of OSP. + * + * Initializes the llog and starts a new thread to handle the changes to + * the remote target (OST or MDT). + * + * \param[in] env LU environment provided by the caller + * \param[in] d OSP device + * + * \retval 0 on success + * \retval negative negated errno on error */ int osp_sync_init(const struct lu_env *env, struct osp_device *d) { struct l_wait_info lwi = { 0 }; + struct task_struct *task; int rc; ENTRY; @@ -1131,13 +1356,15 @@ int osp_sync_init(const struct lu_env *env, struct osp_device *d) d->opd_syn_max_rpc_in_progress = OSP_MAX_IN_PROGRESS; spin_lock_init(&d->opd_syn_lock); init_waitqueue_head(&d->opd_syn_waitq); + init_waitqueue_head(&d->opd_syn_barrier_waitq); init_waitqueue_head(&d->opd_syn_thread.t_ctl_waitq); - CFS_INIT_LIST_HEAD(&d->opd_syn_committed_there); + INIT_LIST_HEAD(&d->opd_syn_committed_there); - rc = PTR_ERR(kthread_run(osp_sync_thread, d, - "osp-syn-%u-%u", d->opd_index, d->opd_group)); - if (IS_ERR_VALUE(rc)) { - CERROR("%s: can't start sync thread: rc = %d\n", + task = kthread_run(osp_sync_thread, d, "osp-syn-%u-%u", + d->opd_index, d->opd_group); + if (IS_ERR(task)) { + rc = PTR_ERR(task); + CERROR("%s: cannot start sync thread: rc = %d\n", d->opd_obd->obd_name, rc); GOTO(err_llog, rc); } @@ -1153,6 +1380,15 @@ err_id: return rc; } +/** + * Stop the syncing thread. + * + * Asks the syncing thread to stop and wait until it's stopped. + * + * \param[in] d OSP device + * + * \retval 0 + */ int osp_sync_fini(struct osp_device *d) { struct ptlrpc_thread *thread = &d->opd_syn_thread; @@ -1173,8 +1409,18 @@ int osp_sync_fini(struct osp_device *d) } static DEFINE_MUTEX(osp_id_tracker_sem); -static CFS_LIST_HEAD(osp_id_tracker_list); +static struct list_head osp_id_tracker_list = + LIST_HEAD_INIT(osp_id_tracker_list); +/** + * OSD commit callback. + * + * The function is used as a local OSD commit callback to track the highest + * committed llog record id. see osp_sync_id_traction_init() for the details. + * + * \param[in] th local transaction handle committed + * \param[in] cookie commit callback data (our private structure) + */ static void osp_sync_tracker_commit_cb(struct thandle *th, void *cookie) { struct osp_id_tracker *tr = cookie; @@ -1193,8 +1439,8 @@ static void osp_sync_tracker_commit_cb(struct thandle *th, void *cookie) tr->otr_committed_id, txn->oti_current_id); tr->otr_committed_id = txn->oti_current_id; - cfs_list_for_each_entry(d, &tr->otr_wakeup_list, - opd_syn_ontrack) { + list_for_each_entry(d, &tr->otr_wakeup_list, + opd_syn_ontrack) { d->opd_syn_last_committed_id = tr->otr_committed_id; wake_up(&d->opd_syn_waitq); } @@ -1202,6 +1448,25 @@ static void osp_sync_tracker_commit_cb(struct thandle *th, void *cookie) spin_unlock(&tr->otr_lock); } +/** + * Initialize commit tracking mechanism. + * + * Some setups may have thousands of OSTs and each will be represented by OSP. + * Meaning order of magnitute many more changes to apply every second. In order + * to keep the number of commit callbacks low this mechanism was introduced. + * The mechanism is very similar to transno used by MDT service: it's an single + * ID stream which can be assigned by any OSP to its llog records. The tricky + * part is that ID is stored in per-transaction data and re-used by all the OSPs + * involved in that transaction. Then all these OSPs are woken up utilizing a single OSD commit callback. + * + * The function initializes the data used by the tracker described above. + * A singler tracker per OSD device is created. + * + * \param[in] d OSP device + * + * \retval 0 on success + * \retval negative negated errno on error + */ static int osp_sync_id_traction_init(struct osp_device *d) { struct osp_id_tracker *tr, *found = NULL; @@ -1210,10 +1475,10 @@ static int osp_sync_id_traction_init(struct osp_device *d) LASSERT(d); LASSERT(d->opd_storage); LASSERT(d->opd_syn_tracker == NULL); - CFS_INIT_LIST_HEAD(&d->opd_syn_ontrack); + INIT_LIST_HEAD(&d->opd_syn_ontrack); mutex_lock(&osp_id_tracker_sem); - cfs_list_for_each_entry(tr, &osp_id_tracker_list, otr_list) { + list_for_each_entry(tr, &osp_id_tracker_list, otr_list) { if (tr->otr_dev == d->opd_storage) { LASSERT(atomic_read(&tr->otr_refcount)); atomic_inc(&tr->otr_refcount); @@ -1233,8 +1498,8 @@ static int osp_sync_id_traction_init(struct osp_device *d) tr->otr_next_id = 1; tr->otr_committed_id = 0; atomic_set(&tr->otr_refcount, 1); - CFS_INIT_LIST_HEAD(&tr->otr_wakeup_list); - cfs_list_add(&tr->otr_list, &osp_id_tracker_list); + INIT_LIST_HEAD(&tr->otr_wakeup_list); + list_add(&tr->otr_list, &osp_id_tracker_list); tr->otr_tx_cb.dtc_txn_commit = osp_sync_tracker_commit_cb; tr->otr_tx_cb.dtc_cookie = tr; @@ -1248,6 +1513,14 @@ static int osp_sync_id_traction_init(struct osp_device *d) return rc; } +/** + * Release commit tracker. + * + * Decrease a refcounter on the tracker used by the given OSP device \a d. + * If no more users left, then the tracker is released. + * + * \param[in] d OSP device + */ static void osp_sync_id_traction_fini(struct osp_device *d) { struct osp_id_tracker *tr; @@ -1266,8 +1539,8 @@ static void osp_sync_id_traction_fini(struct osp_device *d) mutex_lock(&osp_id_tracker_sem); if (atomic_dec_and_test(&tr->otr_refcount)) { dt_txn_callback_del(d->opd_storage, &tr->otr_tx_cb); - LASSERT(cfs_list_empty(&tr->otr_wakeup_list)); - cfs_list_del(&tr->otr_list); + LASSERT(list_empty(&tr->otr_wakeup_list)); + list_del(&tr->otr_list); OBD_FREE_PTR(tr); d->opd_syn_tracker = NULL; } @@ -1276,8 +1549,18 @@ static void osp_sync_id_traction_fini(struct osp_device *d) EXIT; } -/* - * generates id for the tracker +/** + * Generate a new ID on a tracker. + * + * Generates a new ID using the tracker associated with the given OSP device + * \a d, if the given ID \a id is non-zero. Unconditially adds OSP device to + * the wakeup list, so OSP won't miss when a transaction using the ID is + * committed. Notice ID is 32bit, but llog doesn't support >2^32 records anyway. + * + * \param[in] d OSP device + * \param[in] id 0 or ID generated previously + * + * \retval ID the caller should use */ static __u32 osp_sync_id_get(struct osp_device *d, __u32 id) { @@ -1300,14 +1583,23 @@ static __u32 osp_sync_id_get(struct osp_device *d, __u32 id) id = tr->otr_next_id++; if (id > d->opd_syn_last_used_id) d->opd_syn_last_used_id = id; - if (cfs_list_empty(&d->opd_syn_ontrack)) - cfs_list_add(&d->opd_syn_ontrack, &tr->otr_wakeup_list); + if (list_empty(&d->opd_syn_ontrack)) + list_add(&d->opd_syn_ontrack, &tr->otr_wakeup_list); spin_unlock(&tr->otr_lock); CDEBUG(D_OTHER, "new id %u\n", (unsigned) id); return id; } +/** + * Stop to propagate commit status to OSP. + * + * If the OSP does not have any llog records she's waiting to commit, then + * it is possible to unsubscribe from wakeups from the tracking using this + * method. + * + * \param[in] d OSP device not willing to wakeup + */ static void osp_sync_remove_from_tracker(struct osp_device *d) { struct osp_id_tracker *tr; @@ -1315,11 +1607,11 @@ static void osp_sync_remove_from_tracker(struct osp_device *d) tr = d->opd_syn_tracker; LASSERT(tr); - if (cfs_list_empty(&d->opd_syn_ontrack)) + if (list_empty(&d->opd_syn_ontrack)) return; spin_lock(&tr->otr_lock); - cfs_list_del_init(&d->opd_syn_ontrack); + list_del_init(&d->opd_syn_ontrack); spin_unlock(&tr->otr_lock); }