* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2012, 2013, Intel Corporation.
+ * Copyright (c) 2012, 2015, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#define DEBUG_SUBSYSTEM S_MDS
+#include <linux/kthread.h>
#include <lustre_log.h>
#include <lustre_update.h>
#include "osp_internal.h"
#define OSP_JOB_MAGIC 0x26112005
-/**
- * Return status: whether OSP thread should keep running
- *
- * \param[in] d OSP device
- *
- * \retval 1 should keep running
- * \retval 0 should stop
- */
+struct osp_job_req_args {
+ /** bytes reserved for ptlrpc_replay_req() */
+ struct ptlrpc_replay_async_args jra_raa;
+ struct list_head jra_committed_link;
+ struct list_head jra_inflight_link;
+ __u32 jra_magic;
+};
+
static inline int osp_sync_running(struct osp_device *d)
{
return !!(d->opd_syn_thread.t_flags & SVC_RUNNING);
|| (d->opd_syn_prev_done == 0);
}
+static inline int osp_sync_inflight_conflict(struct osp_device *d,
+ struct llog_rec_hdr *h)
+{
+ struct osp_job_req_args *jra;
+ struct ost_id ostid;
+ int conflict = 0;
+
+ if (h == NULL || h->lrh_type == LLOG_GEN_REC ||
+ list_empty(&d->opd_syn_inflight_list))
+ return conflict;
+
+ memset(&ostid, 0, sizeof(ostid));
+ switch (h->lrh_type) {
+ case MDS_UNLINK_REC:
+ ostid_set_seq(&ostid, ((struct llog_unlink_rec *)h)->lur_oseq);
+ ostid_set_id(&ostid, ((struct llog_unlink_rec *)h)->lur_oid);
+ break;
+ case MDS_UNLINK64_REC:
+ fid_to_ostid(&((struct llog_unlink64_rec *)h)->lur_fid, &ostid);
+ break;
+ case MDS_SETATTR64_REC:
+ ostid = ((struct llog_setattr64_rec *)h)->lsr_oi;
+ break;
+ default:
+ LBUG();
+ }
+
+ spin_lock(&d->opd_syn_lock);
+ list_for_each_entry(jra, &d->opd_syn_inflight_list, jra_inflight_link) {
+ struct ptlrpc_request *req;
+ struct ost_body *body;
+
+ LASSERT(jra->jra_magic == OSP_JOB_MAGIC);
+
+ req = container_of((void *)jra, struct ptlrpc_request,
+ rq_async_args);
+ body = req_capsule_client_get(&req->rq_pill,
+ &RMF_OST_BODY);
+ LASSERT(body);
+
+ if (memcmp(&ostid, &body->oa.o_oi, sizeof(ostid)) == 0) {
+ conflict = 1;
+ break;
+ }
+ }
+ spin_unlock(&d->opd_syn_lock);
+
+ return conflict;
+}
+
static inline int osp_sync_low_in_progress(struct osp_device *d)
{
return d->opd_syn_rpc_in_progress < d->opd_syn_max_rpc_in_progress;
if (unlikely(atomic_read(&d->opd_syn_barrier) > 0))
return 0;
+ if (unlikely(osp_sync_inflight_conflict(d, rec)))
+ return 0;
if (!osp_sync_low_in_progress(d))
return 0;
if (!osp_sync_low_in_flight(d))
struct osp_thread_info *osi = osp_env_info(env);
struct osp_device *d = lu2osp_dev(o->opo_obj.do_lu.lo_dev);
struct llog_ctxt *ctxt;
+ struct thandle *storage_th;
int rc;
ENTRY;
/* it's a layering violation, to access internals of th,
* but we can do this as a sanity check, for a while */
- LASSERT(th->th_dev == d->opd_storage);
+ LASSERT(th->th_top != NULL);
+ storage_th = thandle_get_sub_by_dt(env, th->th_top, d->opd_storage);
+ if (IS_ERR(storage_th))
+ RETURN(PTR_ERR(storage_th));
switch (type) {
case MDS_UNLINK64_REC:
}
/* we want ->dt_trans_start() to allocate per-thandle structure */
- th->th_tags |= LCT_OSP_THREAD;
+ storage_th->th_tags |= LCT_OSP_THREAD;
ctxt = llog_get_context(d->opd_obd, LLOG_MDS_OST_ORIG_CTXT);
LASSERT(ctxt);
- rc = llog_declare_add(env, ctxt->loc_handle, &osi->osi_hdr, th);
+ rc = llog_declare_add(env, ctxt->loc_handle, &osi->osi_hdr,
+ storage_th);
llog_ctxt_put(ctxt);
RETURN(rc);
struct osp_thread_info *osi = osp_env_info(env);
struct llog_ctxt *ctxt;
struct osp_txn_info *txn;
+ struct thandle *storage_th;
int rc;
ENTRY;
/* it's a layering violation, to access internals of th,
* but we can do this as a sanity check, for a while */
- LASSERT(th->th_dev == d->opd_storage);
+ LASSERT(th->th_top != NULL);
+ storage_th = thandle_get_sub_by_dt(env, th->th_top, d->opd_storage);
+ if (IS_ERR(storage_th))
+ RETURN(PTR_ERR(storage_th));
switch (type) {
case MDS_UNLINK64_REC:
LBUG();
}
- txn = osp_txn_info(&th->th_ctx);
+ txn = osp_txn_info(&storage_th->th_ctx);
LASSERT(txn);
txn->oti_current_id = osp_sync_id_get(d, txn->oti_current_id);
ctxt = llog_get_context(d->opd_obd, LLOG_MDS_OST_ORIG_CTXT);
if (ctxt == NULL)
RETURN(-ENOMEM);
+
rc = llog_add(env, ctxt->loc_handle, &osi->osi_hdr, &osi->osi_cookie,
- th);
+ storage_th);
llog_ctxt_put(ctxt);
- CDEBUG(D_OTHER, "%s: new record "DOSTID":%lu/%lu: %d\n",
- d->opd_obd->obd_name, POSTID(&osi->osi_cookie.lgc_lgl.lgl_oi),
- (unsigned long) osi->osi_cookie.lgc_lgl.lgl_ogen,
- (unsigned long) osi->osi_cookie.lgc_index, rc);
-
- if (rc > 0)
- rc = 0;
-
- if (likely(rc == 0)) {
+ if (likely(rc >= 0)) {
+ CDEBUG(D_OTHER, "%s: new record "DOSTID":%lu/%lu: %d\n",
+ d->opd_obd->obd_name,
+ POSTID(&osi->osi_cookie.lgc_lgl.lgl_oi),
+ (unsigned long)osi->osi_cookie.lgc_lgl.lgl_ogen,
+ (unsigned long)osi->osi_cookie.lgc_index, rc);
spin_lock(&d->opd_syn_lock);
d->opd_syn_changes++;
spin_unlock(&d->opd_syn_lock);
}
-
- RETURN(rc);
+ /* return 0 always here, error case just cause no llog record */
+ RETURN(0);
}
int osp_sync_add(const struct lu_env *env, struct osp_object *o,
}
int osp_sync_gap(const struct lu_env *env, struct osp_device *d,
- struct lu_fid *fid, int lost, struct thandle *th)
+ struct lu_fid *fid, int lost, struct thandle *th)
{
return osp_sync_add_rec(env, d, fid, MDS_UNLINK64_REC, lost, th, NULL);
}
static void osp_sync_request_commit_cb(struct ptlrpc_request *req)
{
struct osp_device *d = req->rq_cb_data;
+ struct osp_job_req_args *jra;
CDEBUG(D_HA, "commit req %p, transno "LPU64"\n", req, req->rq_transno);
/* do not do any opd_dyn_rpc_* accounting here
* it's done in osp_sync_interpret sooner or later */
-
LASSERT(d);
- LASSERT(req->rq_svc_thread == (void *) OSP_JOB_MAGIC);
- LASSERT(list_empty(&req->rq_exp_list));
+
+ jra = ptlrpc_req_async_args(req);
+ LASSERT(jra->jra_magic == OSP_JOB_MAGIC);
+ LASSERT(list_empty(&jra->jra_committed_link));
ptlrpc_request_addref(req);
spin_lock(&d->opd_syn_lock);
- list_add(&req->rq_exp_list, &d->opd_syn_committed_there);
+ list_add(&jra->jra_committed_link, &d->opd_syn_committed_there);
spin_unlock(&d->opd_syn_lock);
/* XXX: some batching wouldn't hurt */
* The callback is called by ptlrpc when RPC is replied. Now we have to decide
* whether we should:
* - put request on a special list to wait until it's committed by the target,
- * if the request is succesful
+ * if the request is successful
* - schedule llog record cancel if no target object is found
* - try later (essentially after reboot) in case of unexpected error
*
struct ptlrpc_request *req, void *aa, int rc)
{
struct osp_device *d = req->rq_cb_data;
+ struct osp_job_req_args *jra = aa;
- if (req->rq_svc_thread != (void *) OSP_JOB_MAGIC)
- DEBUG_REQ(D_ERROR, req, "bad magic %p\n", req->rq_svc_thread);
- LASSERT(req->rq_svc_thread == (void *) OSP_JOB_MAGIC);
+ if (jra->jra_magic != OSP_JOB_MAGIC) {
+ DEBUG_REQ(D_ERROR, req, "bad magic %u\n", jra->jra_magic);
+ LBUG();
+ }
LASSERT(d);
CDEBUG(D_HA, "reply req %p/%d, rc %d, transno %u\n", req,
* but object doesn't exist anymore - cancell llog record
*/
LASSERT(req->rq_transno == 0);
- LASSERT(list_empty(&req->rq_exp_list));
+ LASSERT(list_empty(&jra->jra_committed_link));
ptlrpc_request_addref(req);
spin_lock(&d->opd_syn_lock);
- list_add(&req->rq_exp_list, &d->opd_syn_committed_there);
+ list_add(&jra->jra_committed_link, &d->opd_syn_committed_there);
spin_unlock(&d->opd_syn_lock);
wake_up(&d->opd_syn_waitq);
LASSERT(d->opd_syn_rpc_in_flight > 0);
spin_lock(&d->opd_syn_lock);
d->opd_syn_rpc_in_flight--;
+ list_del_init(&jra->jra_inflight_link);
spin_unlock(&d->opd_syn_lock);
if (unlikely(atomic_read(&d->opd_syn_barrier) > 0))
wake_up(&d->opd_syn_barrier_waitq);
static void osp_sync_send_new_rpc(struct osp_device *d,
struct ptlrpc_request *req)
{
+ struct osp_job_req_args *jra;
+
LASSERT(d->opd_syn_rpc_in_flight <= d->opd_syn_max_rpc_in_flight);
- LASSERT(req->rq_svc_thread == (void *) OSP_JOB_MAGIC);
- ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
+ jra = ptlrpc_req_async_args(req);
+ jra->jra_magic = OSP_JOB_MAGIC;
+ INIT_LIST_HEAD(&jra->jra_committed_link);
+ spin_lock(&d->opd_syn_lock);
+ list_add_tail(&jra->jra_inflight_link, &d->opd_syn_inflight_list);
+ spin_unlock(&d->opd_syn_lock);
+
+ ptlrpcd_add_req(req);
}
/* Prepare the request */
imp = d->opd_obd->u.cli.cl_import;
LASSERT(imp);
+
+ if (OBD_FAIL_CHECK(OBD_FAIL_OSP_CHECK_ENOMEM))
+ RETURN(ERR_PTR(-ENOMEM));
+
req = ptlrpc_request_alloc(imp, format);
if (req == NULL)
RETURN(ERR_PTR(-ENOMEM));
body->oa.o_lcookie.lgc_lgl = llh->lgh_id;
body->oa.o_lcookie.lgc_subsys = LLOG_MDS_OST_ORIG_CTXT;
body->oa.o_lcookie.lgc_index = h->lrh_index;
- INIT_LIST_HEAD(&req->rq_exp_list);
- req->rq_svc_thread = (void *) OSP_JOB_MAGIC;
req->rq_interpret_reply = osp_sync_interpret;
req->rq_commit_cb = osp_sync_request_commit_cb;
* \param[in] h llog record
*
* \retval 0 on success
+ * \retval 1 on invalid record
* \retval negative negated errno on error
*/
static int osp_sync_new_setattr_job(struct osp_device *d,
ENTRY;
LASSERT(h->lrh_type == MDS_SETATTR64_REC);
+ if (OBD_FAIL_CHECK(OBD_FAIL_OSP_CHECK_INVALID_REC))
+ RETURN(1);
/* lsr_valid can only be 0 or have OBD_MD_{FLUID,FLGID} set,
* so no bits other than these should be set. */
if ((rec->lsr_valid & ~(OBD_MD_FLUID | OBD_MD_FLGID)) != 0) {
CERROR("%s: invalid setattr record, lsr_valid:"LPU64"\n",
d->opd_obd->obd_name, rec->lsr_valid);
- /* return 0 so that sync thread can continue processing
- * other records. */
- RETURN(0);
+ /* return 1 on invalid record */
+ RETURN(1);
}
req = osp_sync_new_job(d, llh, h, OST_SETATTR, &RQF_OST_SETATTR);
body->oa.o_valid |= rec->lsr_valid;
osp_sync_send_new_rpc(d, req);
- RETURN(1);
+ RETURN(0);
}
/**
body->oa.o_valid |= OBD_MD_FLOBJCOUNT;
osp_sync_send_new_rpc(d, req);
- RETURN(1);
-}
-
-/**
- * Prepare OUT-based object destroy RPC.
- *
- * The function allocates a new RPC with OUT format. Then initializes the RPC
- * to contain OUT_DESTROY update against the object specified in the llog
- * record provided by the caller.
- *
- * \param[in] env LU environment provided by the caller
- * \param[in] osp OSP device
- * \param[in] llh llog handle where the record is stored
- * \param[in] h llog record
- * \param[out] reqp request prepared
- *
- * \retval 0 on success
- * \retval negative negated errno on error
- */
-static int osp_prep_unlink_update_req(const struct lu_env *env,
- struct osp_device *osp,
- struct llog_handle *llh,
- struct llog_rec_hdr *h,
- struct ptlrpc_request **reqp)
-{
- struct llog_unlink64_rec *rec = (struct llog_unlink64_rec *)h;
- struct dt_update_request *update = NULL;
- struct ptlrpc_request *req;
- struct llog_cookie lcookie;
- const void *buf;
- __u16 size;
- int rc;
- ENTRY;
-
- update = dt_update_request_create(&osp->opd_dt_dev);
- if (IS_ERR(update))
- RETURN(PTR_ERR(update));
-
- /* This can only happens for unlink slave directory, so decrease
- * ref for ".." and "." */
- rc = out_update_pack(env, &update->dur_buf, OUT_REF_DEL, &rec->lur_fid,
- 0, NULL, NULL, 0);
- if (rc != 0)
- GOTO(out, rc);
-
- rc = out_update_pack(env, &update->dur_buf, OUT_REF_DEL, &rec->lur_fid,
- 0, NULL, NULL, 0);
- if (rc != 0)
- GOTO(out, rc);
-
- lcookie.lgc_lgl = llh->lgh_id;
- lcookie.lgc_subsys = LLOG_MDS_OST_ORIG_CTXT;
- lcookie.lgc_index = h->lrh_index;
- size = sizeof(lcookie);
- buf = &lcookie;
-
- rc = out_update_pack(env, &update->dur_buf, OUT_DESTROY, &rec->lur_fid,
- 1, &size, &buf, 0);
- if (rc != 0)
- GOTO(out, rc);
-
- rc = out_prep_update_req(env, osp->opd_obd->u.cli.cl_import,
- update->dur_buf.ub_req, &req);
- if (rc != 0)
- GOTO(out, rc);
-
- INIT_LIST_HEAD(&req->rq_exp_list);
- req->rq_svc_thread = (void *)OSP_JOB_MAGIC;
-
- req->rq_interpret_reply = osp_sync_interpret;
- req->rq_commit_cb = osp_sync_request_commit_cb;
- req->rq_cb_data = osp;
-
- ptlrpc_request_set_replen(req);
- *reqp = req;
-out:
- if (update != NULL)
- dt_update_request_destroy(update);
-
- RETURN(rc);
+ RETURN(0);
}
/**
* use OUT for OST as well, this will allow batching and better code
* unification.
*
- * \param[in] env LU environment provided by the caller
* \param[in] d OSP device
* \param[in] llh llog handle where the record is stored
* \param[in] h llog record
* \retval 0 on success
* \retval negative negated errno on error
*/
-static int osp_sync_new_unlink64_job(const struct lu_env *env,
- struct osp_device *d,
+static int osp_sync_new_unlink64_job(struct osp_device *d,
struct llog_handle *llh,
struct llog_rec_hdr *h)
{
ENTRY;
LASSERT(h->lrh_type == MDS_UNLINK64_REC);
+ req = osp_sync_new_job(d, llh, h, OST_DESTROY,
+ &RQF_OST_DESTROY);
+ if (IS_ERR(req))
+ RETURN(PTR_ERR(req));
- if (d->opd_connect_mdt) {
- rc = osp_prep_unlink_update_req(env, d, llh, h, &req);
- if (rc != 0)
- RETURN(rc);
- } else {
- req = osp_sync_new_job(d, llh, h, OST_DESTROY,
- &RQF_OST_DESTROY);
- if (IS_ERR(req))
- RETURN(PTR_ERR(req));
-
- body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
- if (body == NULL)
- RETURN(-EFAULT);
- rc = fid_to_ostid(&rec->lur_fid, &body->oa.o_oi);
- if (rc < 0)
- RETURN(rc);
- body->oa.o_misc = rec->lur_count;
- body->oa.o_valid = OBD_MD_FLGROUP | OBD_MD_FLID |
- OBD_MD_FLOBJCOUNT;
- }
+ body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
+ if (body == NULL)
+ RETURN(-EFAULT);
+ rc = fid_to_ostid(&rec->lur_fid, &body->oa.o_oi);
+ if (rc < 0)
+ RETURN(rc);
+ body->oa.o_misc = rec->lur_count;
+ body->oa.o_valid = OBD_MD_FLGROUP | OBD_MD_FLID |
+ OBD_MD_FLOBJCOUNT;
osp_sync_send_new_rpc(d, req);
- RETURN(1);
+ RETURN(0);
}
/**
* \param[in] d OSP device
* \param[in] llh llog handle where the record is stored
* \param[in] rec llog record
- *
- * \retval 0 on success
- * \retval negative negated errno on error
*/
-static int osp_sync_process_record(const struct lu_env *env,
- struct osp_device *d,
- struct llog_handle *llh,
- struct llog_rec_hdr *rec)
+static void osp_sync_process_record(const struct lu_env *env,
+ struct osp_device *d,
+ struct llog_handle *llh,
+ struct llog_rec_hdr *rec)
{
+ struct llog_handle *cathandle = llh->u.phd.phd_cat_handle;
struct llog_cookie cookie;
int rc = 0;
+ ENTRY;
+
cookie.lgc_lgl = llh->lgh_id;
cookie.lgc_subsys = LLOG_MDS_OST_ORIG_CTXT;
cookie.lgc_index = rec->lrh_index;
}
/* cancel any generation record */
- rc = llog_cat_cancel_records(env, llh->u.phd.phd_cat_handle,
- 1, &cookie);
+ rc = llog_cat_cancel_records(env, cathandle, 1, &cookie);
- return rc;
+ RETURN_EXIT;
}
/*
rc = osp_sync_new_unlink_job(d, llh, rec);
break;
case MDS_UNLINK64_REC:
- rc = osp_sync_new_unlink64_job(env, d, llh, rec);
+ rc = osp_sync_new_unlink64_job(d, llh, rec);
break;
case MDS_SETATTR64_REC:
rc = osp_sync_new_setattr_job(d, llh, rec);
default:
CERROR("%s: unknown record type: %x\n", d->opd_obd->obd_name,
rec->lrh_type);
- /* we should continue processing */
+ /* treat "unknown record type" as "invalid" */
+ rc = 1;
+ break;
}
- /* rc > 0 means sync RPC being added to the queue */
- if (likely(rc > 0)) {
- spin_lock(&d->opd_syn_lock);
- if (d->opd_syn_prev_done) {
- LASSERT(d->opd_syn_changes > 0);
- LASSERT(rec->lrh_id <= d->opd_syn_last_committed_id);
- /*
- * NOTE: it's possible to meet same id if
- * OST stores few stripes of same file
- */
- if (rec->lrh_id > d->opd_syn_last_processed_id) {
- d->opd_syn_last_processed_id = rec->lrh_id;
- wake_up(&d->opd_syn_barrier_waitq);
- }
+ spin_lock(&d->opd_syn_lock);
- d->opd_syn_changes--;
+ /* For all kinds of records, not matter successful or not,
+ * we should decrease changes and bump last_processed_id.
+ */
+ if (d->opd_syn_prev_done) {
+ LASSERT(d->opd_syn_changes > 0);
+ LASSERT(rec->lrh_id <= d->opd_syn_last_committed_id);
+ /* NOTE: it's possible to meet same id if
+ * OST stores few stripes of same file
+ */
+ if (rec->lrh_id > d->opd_syn_last_processed_id) {
+ d->opd_syn_last_processed_id = rec->lrh_id;
+ wake_up(&d->opd_syn_barrier_waitq);
}
- CDEBUG(D_OTHER, "%s: %d in flight, %d in progress\n",
- d->opd_obd->obd_name, d->opd_syn_rpc_in_flight,
- d->opd_syn_rpc_in_progress);
- spin_unlock(&d->opd_syn_lock);
- rc = 0;
- } else {
- spin_lock(&d->opd_syn_lock);
+ d->opd_syn_changes--;
+ }
+ if (rc != 0) {
d->opd_syn_rpc_in_flight--;
d->opd_syn_rpc_in_progress--;
- spin_unlock(&d->opd_syn_lock);
}
+ CDEBUG(D_OTHER, "%s: %d in flight, %d in progress\n",
+ d->opd_obd->obd_name, d->opd_syn_rpc_in_flight,
+ d->opd_syn_rpc_in_progress);
- CDEBUG(D_HA, "found record %x, %d, idx %u, id %u: %d\n",
- rec->lrh_type, rec->lrh_len, rec->lrh_index, rec->lrh_id, rc);
- return rc;
+ spin_unlock(&d->opd_syn_lock);
+
+ /* Delete the invalid record */
+ if (rc == 1) {
+ rc = llog_cat_cancel_records(env, cathandle, 1, &cookie);
+ if (rc != 0)
+ CERROR("%s: can't delete invalid record: "
+ "fid = "DFID", rec_id = %u, rc = %d\n",
+ d->opd_obd->obd_name,
+ PFID(lu_object_fid(&cathandle->lgh_obj->do_lu)),
+ rec->lrh_id, rc);
+ }
+
+ CDEBUG(D_HA, "found record %x, %d, idx %u, id %u\n",
+ rec->lrh_type, rec->lrh_len, rec->lrh_index, rec->lrh_id);
+
+ RETURN_EXIT;
}
/**
struct obd_device *obd = d->opd_obd;
struct obd_import *imp = obd->u.cli.cl_import;
struct ost_body *body;
- struct ptlrpc_request *req, *tmp;
+ struct ptlrpc_request *req;
struct llog_ctxt *ctxt;
struct llog_handle *llh;
struct list_head list;
INIT_LIST_HEAD(&d->opd_syn_committed_there);
spin_unlock(&d->opd_syn_lock);
- list_for_each_entry_safe(req, tmp, &list, rq_exp_list) {
- struct llog_cookie *lcookie = NULL;
-
- LASSERT(req->rq_svc_thread == (void *) OSP_JOB_MAGIC);
- list_del_init(&req->rq_exp_list);
-
- if (d->opd_connect_mdt) {
- struct object_update_request *ureq;
- struct object_update *update;
- ureq = req_capsule_client_get(&req->rq_pill,
- &RMF_OUT_UPDATE);
- LASSERT(ureq != NULL &&
- ureq->ourq_magic == UPDATE_REQUEST_MAGIC);
-
- /* 1st/2nd is for decref . and .., 3rd one is for
- * destroy, where the log cookie is stored.
- * See osp_prep_unlink_update_req */
- update = object_update_request_get(ureq, 2, NULL);
- LASSERT(update != NULL);
- lcookie = object_update_param_get(update, 0, NULL);
- LASSERT(lcookie != NULL);
- } else {
- body = req_capsule_client_get(&req->rq_pill,
- &RMF_OST_BODY);
- LASSERT(body);
- lcookie = &body->oa.o_lcookie;
- }
+ while (!list_empty(&list)) {
+ struct osp_job_req_args *jra;
+
+ jra = list_entry(list.next, struct osp_job_req_args,
+ jra_committed_link);
+ LASSERT(jra->jra_magic == OSP_JOB_MAGIC);
+ list_del_init(&jra->jra_committed_link);
+
+ req = container_of((void *)jra, struct ptlrpc_request,
+ rq_async_args);
+ body = req_capsule_client_get(&req->rq_pill,
+ &RMF_OST_BODY);
+ LASSERT(body);
/* import can be closing, thus all commit cb's are
* called we can check committness directly */
- if (req->rq_transno <= imp->imp_peer_committed_transno) {
- rc = llog_cat_cancel_records(env, llh, 1, lcookie);
+ if (req->rq_import_generation == imp->imp_generation) {
+ rc = llog_cat_cancel_records(env, llh, 1,
+ &body->oa.o_lcookie);
if (rc)
CERROR("%s: can't cancel record: %d\n",
obd->obd_name, rc);
} else {
- DEBUG_REQ(D_HA, req, "not committed");
+ DEBUG_REQ(D_OTHER, req, "imp_committed = "LPU64,
+ imp->imp_peer_committed_transno);
}
-
ptlrpc_req_finished(req);
done++;
}
void *data)
{
struct osp_device *d = data;
- int rc;
do {
struct l_wait_info lwi = { 0 };
d->opd_syn_rpc_in_flight);
return 0;
}
-
- /*
- * try to send, in case of disconnection, suspend
- * processing till we can send this request
- */
- do {
- rc = osp_sync_process_record(env, d, llh, rec);
- /*
- * XXX: probably different handling is needed
- * for some bugs, like immediate exit or if
- * OSP gets inactive
- */
- if (rc) {
- CERROR("can't send: %d\n", rc);
- l_wait_event(d->opd_syn_waitq,
- !osp_sync_running(d) ||
- osp_sync_has_work(d),
- &lwi);
- }
- } while (rc != 0 && osp_sync_running(d));
-
+ osp_sync_process_record(env, d, llh, rec);
llh = NULL;
rec = NULL;
}
OBD_SET_CTXT_MAGIC(&obd->obd_lvfs_ctxt);
obd->obd_lvfs_ctxt.dt = d->opd_storage;
- if (d->opd_connect_mdt)
- lu_local_obj_fid(fid, SLAVE_LLOG_CATALOGS_OID);
- else
- lu_local_obj_fid(fid, LLOG_CATALOGS_OID);
+ lu_local_obj_fid(fid, LLOG_CATALOGS_OID);
rc = llog_osd_get_cat_list(env, d->opd_storage, d->opd_index, 1,
&osi->osi_cid, fid);
POSTID(&osi->osi_cid.lci_logid.lgl_oi),
osi->osi_cid.lci_logid.lgl_ogen);
- rc = llog_setup(env, obd, &obd->obd_olg, LLOG_MDS_OST_ORIG_CTXT, obd,
+ rc = llog_setup(env, obd, &obd->obd_olg, LLOG_MDS_OST_ORIG_CTXT,
+ d->opd_storage->dd_lu_dev.ld_obd,
&osp_mds_ost_orig_logops);
if (rc)
RETURN(rc);
init_waitqueue_head(&d->opd_syn_waitq);
init_waitqueue_head(&d->opd_syn_barrier_waitq);
init_waitqueue_head(&d->opd_syn_thread.t_ctl_waitq);
+ INIT_LIST_HEAD(&d->opd_syn_inflight_list);
INIT_LIST_HEAD(&d->opd_syn_committed_there);
task = kthread_run(osp_sync_thread, d, "osp-syn-%u-%u",