* GPL HEADER END
*/
/*
- * Copyright (c) 2014, Intel Corporation.
+ * Copyright (c) 2014, 2016, Intel Corporation.
*/
/*
* lustre/osp/osp_trans.c
atomic_t *oaua_count;
wait_queue_head_t *oaua_waitq;
bool oaua_flow_control;
+ const struct lu_env *oaua_update_env;
};
/**
ours->ours_req_size = size;
INIT_LIST_HEAD(&ours->ours_list);
list_add_tail(&ours->ours_list, &our->our_req_list);
+ our->our_req_nr++;
return 0;
}
struct osp_update_request *osp_update_request_create(struct dt_device *dt)
{
struct osp_update_request *our;
+ int rc;
OBD_ALLOC_PTR(our);
if (our == NULL)
INIT_LIST_HEAD(&our->our_req_list);
INIT_LIST_HEAD(&our->our_cb_items);
INIT_LIST_HEAD(&our->our_list);
+ INIT_LIST_HEAD(&our->our_invalidate_cb_list);
+ spin_lock_init(&our->our_list_lock);
- osp_object_update_request_create(our, OUT_UPDATE_INIT_BUFFER_SIZE);
+ rc = osp_object_update_request_create(our, OUT_UPDATE_INIT_BUFFER_SIZE);
+ if (rc != 0) {
+ OBD_FREE_PTR(our);
+ return ERR_PTR(rc);
+ }
return our;
}
-void osp_update_request_destroy(struct osp_update_request *our)
+void osp_update_request_destroy(const struct lu_env *env,
+ struct osp_update_request *our)
{
struct osp_update_request_sub *ours;
struct osp_update_request_sub *tmp;
list_for_each_entry_safe(ours, tmp, &our->our_req_list, ours_list) {
list_del(&ours->ours_list);
if (ours->ours_req != NULL)
- OBD_FREE(ours->ours_req, ours->ours_req_size);
+ OBD_FREE_LARGE(ours->ours_req, ours->ours_req_size);
OBD_FREE_PTR(ours);
}
+
+ if (!list_empty(&our->our_invalidate_cb_list)) {
+ struct lu_env lenv;
+ struct osp_object *obj;
+ struct osp_object *next;
+
+ if (env == NULL) {
+ lu_env_init(&lenv, LCT_MD_THREAD | LCT_DT_THREAD);
+ env = &lenv;
+ }
+
+ list_for_each_entry_safe(obj, next,
+ &our->our_invalidate_cb_list,
+ opo_invalidate_cb_list) {
+ spin_lock(&obj->opo_lock);
+ list_del_init(&obj->opo_invalidate_cb_list);
+ spin_unlock(&obj->opo_lock);
+
+ lu_object_put(env, &obj->opo_obj.do_lu);
+ }
+
+ if (env == &lenv)
+ lu_env_fini(&lenv);
+ }
+
OBD_FREE_PTR(our);
}
update = object_update_request_get(ourq, i, &size);
LASSERT(update != NULL);
- CDEBUG(mask, "i = %u fid = "DFID" op = %s master = %u"
- "params = %d batchid = "LPU64" size = %zu\n",
+ CDEBUG(mask, "i = %u fid = "DFID" op = %s "
+ "params = %d batchid = %llu size = %zu repsize %u\n",
i, PFID(&update->ou_fid),
update_op_str(update->ou_type),
- update->ou_master_index, update->ou_params_count,
- update->ou_batchid, size);
+ update->ou_params_count,
+ update->ou_batchid, size,
+ (unsigned)update->ou_result_size);
total_size += size;
}
}
/**
+ * Prepare inline update request
+ *
+ * Prepare OUT update ptlrpc inline request, and the request usually includes
+ * one update buffer, which does not need bulk transfer.
+ *
+ * \param[in] env execution environment
+ * \param[in] req ptlrpc request
+ * \param[in] ours sub osp_update_request to be packed
+ *
+ * \retval 0 if packing succeeds
+ * \retval negative errno if packing fails
+ */
+int osp_prep_inline_update_req(const struct lu_env *env,
+ struct ptlrpc_request *req,
+ struct osp_update_request *our,
+ int repsize)
+{
+ struct osp_update_request_sub *ours;
+ struct out_update_header *ouh;
+ __u32 update_req_size;
+ int rc;
+
+ ours = list_entry(our->our_req_list.next,
+ struct osp_update_request_sub, ours_list);
+ update_req_size = object_update_request_size(ours->ours_req);
+ req_capsule_set_size(&req->rq_pill, &RMF_OUT_UPDATE_HEADER, RCL_CLIENT,
+ update_req_size + sizeof(*ouh));
+
+ rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, OUT_UPDATE);
+ if (rc != 0)
+ RETURN(rc);
+
+ ouh = req_capsule_client_get(&req->rq_pill, &RMF_OUT_UPDATE_HEADER);
+ ouh->ouh_magic = OUT_UPDATE_HEADER_MAGIC;
+ ouh->ouh_count = 1;
+ ouh->ouh_inline_length = update_req_size;
+ ouh->ouh_reply_size = repsize;
+
+ memcpy(ouh->ouh_inline_data, ours->ours_req, update_req_size);
+
+ req_capsule_set_size(&req->rq_pill, &RMF_OUT_UPDATE_REPLY,
+ RCL_SERVER, repsize);
+
+ ptlrpc_request_set_replen(req);
+ req->rq_request_portal = OUT_PORTAL;
+ req->rq_reply_portal = OSC_REPLY_PORTAL;
+
+ RETURN(rc);
+}
+
+/**
* Prepare update request.
*
* Prepare OUT update ptlrpc request, and the request usually includes
struct ptlrpc_request *req;
struct ptlrpc_bulk_desc *desc;
struct osp_update_request_sub *ours;
+ const struct object_update_request *ourq;
struct out_update_header *ouh;
struct out_update_buffer *oub;
__u32 buf_count = 0;
- int rc;
+ int repsize = 0;
+ struct object_update_reply *reply;
+ int rc, i;
+ int total = 0;
ENTRY;
list_for_each_entry(ours, &our->our_req_list, ours_list) {
object_update_request_dump(ours->ours_req, D_INFO);
+
+ ourq = ours->ours_req;
+ for (i = 0; i < ourq->ourq_count; i++) {
+ struct object_update *update;
+ size_t size = 0;
+
+
+ /* XXX: it's very inefficient to lookup update
+ * this way, iterating from the beginning
+ * each time */
+ update = object_update_request_get(ourq, i, &size);
+ LASSERT(update != NULL);
+
+ repsize += sizeof(reply->ourp_lens[0]);
+ repsize += sizeof(struct object_update_result);
+ repsize += update->ou_result_size;
+ }
+
buf_count++;
}
+ repsize += sizeof(*reply);
+ repsize = (repsize + OUT_UPDATE_REPLY_SIZE - 1) &
+ ~(OUT_UPDATE_REPLY_SIZE - 1);
+ LASSERT(buf_count > 0);
req = ptlrpc_request_alloc(imp, &RQF_OUT_UPDATE);
if (req == NULL)
RETURN(-ENOMEM);
+ if (buf_count == 1) {
+ ours = list_entry(our->our_req_list.next,
+ struct osp_update_request_sub, ours_list);
+
+ /* Let's check if it can be packed inline */
+ if (object_update_request_size(ours->ours_req) +
+ sizeof(struct out_update_header) <
+ OUT_UPDATE_MAX_INLINE_SIZE) {
+ rc = osp_prep_inline_update_req(env, req, our, repsize);
+ if (rc == 0)
+ *reqp = req;
+ GOTO(out_req, rc);
+ }
+ }
+
+ req_capsule_set_size(&req->rq_pill, &RMF_OUT_UPDATE_HEADER, RCL_CLIENT,
+ sizeof(struct osp_update_request));
+
req_capsule_set_size(&req->rq_pill, &RMF_OUT_UPDATE_BUF, RCL_CLIENT,
buf_count * sizeof(*oub));
ouh = req_capsule_client_get(&req->rq_pill, &RMF_OUT_UPDATE_HEADER);
ouh->ouh_magic = OUT_UPDATE_HEADER_MAGIC;
ouh->ouh_count = buf_count;
-
+ ouh->ouh_inline_length = 0;
+ ouh->ouh_reply_size = repsize;
oub = req_capsule_client_get(&req->rq_pill, &RMF_OUT_UPDATE_BUF);
list_for_each_entry(ours, &our->our_req_list, ours_list) {
oub->oub_size = ours->ours_req_size;
GOTO(out_req, rc = -ENOMEM);
/* NB req now owns desc and will free it when it gets freed */
- list_for_each_entry(ours, &our->our_req_list, ours_list)
+ list_for_each_entry(ours, &our->our_req_list, ours_list) {
desc->bd_frag_ops->add_iov_frag(desc, ours->ours_req,
ours->ours_req_size);
+ total += ours->ours_req_size;
+ }
+ CDEBUG(D_OTHER, "total %d in %u\n", total, our->our_update_nr);
req_capsule_set_size(&req->rq_pill, &RMF_OUT_UPDATE_REPLY,
- RCL_SERVER, OUT_UPDATE_REPLY_SIZE);
+ RCL_SERVER, repsize);
ptlrpc_request_set_replen(req);
req->rq_request_portal = OUT_PORTAL;
* \retval 0 if RPC succeeds.
* \retval negative errno if RPC fails.
*/
-
int osp_remote_sync(const struct lu_env *env, struct osp_device *osp,
struct osp_update_request *our,
struct ptlrpc_request **reqp)
if (rc != 0)
RETURN(rc);
- /* This will only be called with read-only update, and these updates
- * might be used to retrieve update log during recovery process, so
- * it will be allowed to send during recovery process */
- req->rq_allow_replay = 1;
+ osp_set_req_replay(osp, req);
+ req->rq_allow_intr = 1;
/* Note: some dt index api might return non-zero result here, like
* osd_index_ea_lookup, so we should only check rc < 0 here */
RETURN(rc);
}
-static void osp_trans_stop_cb(struct osp_thandle *oth, int result)
+/**
+ * Invalidate all objects in the osp thandle
+ *
+ * invalidate all of objects in the update request, which will be called
+ * when the transaction is aborted.
+ *
+ * \param[in] oth osp thandle.
+ */
+static void osp_thandle_invalidate_object(const struct lu_env *env,
+ struct osp_thandle *oth,
+ int result)
+{
+ struct osp_update_request *our = oth->ot_our;
+ struct osp_object *obj;
+ struct osp_object *next;
+
+ if (our == NULL)
+ return;
+
+ list_for_each_entry_safe(obj, next, &our->our_invalidate_cb_list,
+ opo_invalidate_cb_list) {
+ if (result < 0)
+ osp_invalidate(env, &obj->opo_obj);
+
+ spin_lock(&obj->opo_lock);
+ list_del_init(&obj->opo_invalidate_cb_list);
+ spin_unlock(&obj->opo_lock);
+
+ lu_object_put(env, &obj->opo_obj.do_lu);
+ }
+}
+
+static void osp_trans_stop_cb(const struct lu_env *env,
+ struct osp_thandle *oth, int result)
{
struct dt_txn_commit_cb *dcb;
struct dt_txn_commit_cb *tmp;
list_del_init(&dcb->dcb_linkage);
dcb->dcb_func(NULL, &oth->ot_super, dcb, result);
}
+
+ osp_thandle_invalidate_object(env, oth, result);
}
/**
if (our == NULL)
RETURN(0);
+ /* Sigh env might be NULL in some cases, see
+ * this calling path.
+ * osp_send_update_thread()
+ * ptlrpc_set_wait() ----> null env.
+ * ptlrpc_check_set()
+ * osp_update_interpret()
+ * Let's use env in oaua for this case.
+ */
+ if (env == NULL)
+ env = oaua->oaua_update_env;
+
oaua->oaua_update = NULL;
oth = our->our_th;
if (oaua->oaua_flow_control) {
}
/* Unpack the results from the reply message. */
- if (req->rq_repmsg != NULL) {
+ if (req->rq_repmsg != NULL && req->rq_replied) {
reply = req_capsule_server_sized_get(&req->rq_pill,
&RMF_OUT_UPDATE_REPLY,
OUT_UPDATE_REPLY_SIZE);
- if (reply == NULL || reply->ourp_magic != UPDATE_REPLY_MAGIC)
- rc1 = -EPROTO;
- else
+ if (reply == NULL || reply->ourp_magic != UPDATE_REPLY_MAGIC) {
+ if (rc == 0)
+ rc = -EPROTO;
+ } else {
count = reply->ourp_count;
- } else {
- rc1 = rc;
+ }
}
list_for_each_entry_safe(ouc, next, &our->our_cb_items, ouc_list) {
/* The peer may only have handled some requests (indicated
* by the 'count') in the packaged OUT RPC, we can only get
* results for the handled part. */
- if (index < count && reply->ourp_lens[index] > 0) {
+ if (index < count && reply->ourp_lens[index] > 0 && rc >= 0) {
struct object_update_result *result;
result = object_update_result_get(reply, index, NULL);
if (result == NULL)
- rc1 = -EPROTO;
+ rc1 = rc = -EPROTO;
else
- rc1 = result->our_rc;
- } else {
- rc1 = rc;
- if (unlikely(rc1 == 0))
+ rc1 = rc = result->our_rc;
+ } else if (rc1 >= 0) {
+ /* The peer did not handle these request, let's return
+ * -EINVAL to update interpret for now */
+ if (rc >= 0)
rc1 = -EINVAL;
+ else
+ rc1 = rc;
}
if (ouc->ouc_interpreter != NULL)
if (oth != NULL) {
/* oth and osp_update_requests will be destoryed in
* osp_thandle_put */
- osp_trans_stop_cb(oth, rc);
- osp_thandle_put(oth);
+ osp_trans_stop_cb(env, oth, rc);
+ osp_thandle_put(env, oth);
} else {
- osp_update_request_destroy(our);
+ osp_update_request_destroy(env, our);
}
- RETURN(0);
+ RETURN(rc);
}
/**
ouc->ouc_data, 0, rc);
osp_update_callback_fini(env, ouc);
}
- osp_update_request_destroy(our);
+ osp_update_request_destroy(env, our);
} else {
args = ptlrpc_req_async_args(req);
args->oaua_update = our;
args->oaua_count = NULL;
args->oaua_waitq = NULL;
+ /* Note: this is asynchronous call for the request, so the
+ * interrupte cb and current function will be different
+ * thread, so we need use different env */
+ args->oaua_update_env = NULL;
args->oaua_flow_control = false;
req->rq_interpret_reply = osp_update_interpret;
- ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1);
+ ptlrpcd_add_req(req);
}
return rc;
* \param[in] lens buffer length array for the subsequent \a bufs
* \param[in] bufs the buffers to compose the request
* \param[in] data pointer to the data used by the interpreter
+ * \param[in] repsize how many bytes the caller allocated for \a data
* \param[in] interpreter pointer to the interpreter function
*
* \retval 0 for success
*/
int osp_insert_async_request(const struct lu_env *env, enum update_type op,
struct osp_object *obj, int count,
- __u16 *lens, const void **bufs, void *data,
+ __u16 *lens, const void **bufs,
+ void *data, __u32 repsize,
osp_update_interpreter_t interpreter)
{
struct osp_device *osp;
object_update = update_buffer_get_update(ureq, ureq->ourq_count);
rc = out_update_pack(env, object_update, &max_update_size, op,
- lu_object_fid(osp2lu_obj(obj)), count, lens, bufs);
+ lu_object_fid(osp2lu_obj(obj)), count, lens, bufs,
+ repsize);
/* The queue is full. */
if (rc == -E2BIG) {
osp->opd_async_requests = NULL;
RETURN(rc);
ureq->ourq_count++;
+ our->our_update_nr++;
}
rc = osp_insert_update_callback(env, our, obj, data, interpreter);
oth->ot_our = our;
our->our_th = oth;
- if (oth->ot_super.th_sync)
- oth->ot_our->our_flags |= UPDATE_FL_SYNC;
-
return 0;
}
-void osp_thandle_destroy(struct osp_thandle *oth)
+void osp_thandle_destroy(const struct lu_env *env,
+ struct osp_thandle *oth)
{
LASSERT(oth->ot_magic == OSP_THANDLE_MAGIC);
LASSERT(list_empty(&oth->ot_commit_dcb_list));
LASSERT(list_empty(&oth->ot_stop_dcb_list));
if (oth->ot_our != NULL)
- osp_update_request_destroy(oth->ot_our);
+ osp_update_request_destroy(env, oth->ot_our);
OBD_FREE_PTR(oth);
}
RETURN_EXIT;
oth = thandle_to_osp_thandle(th);
- if (lustre_msg_get_last_committed(req->rq_repmsg))
+ if (req->rq_repmsg != NULL &&
+ lustre_msg_get_last_committed(req->rq_repmsg))
last_committed_transno =
lustre_msg_get_last_committed(req->rq_repmsg);
last_committed_transno =
req->rq_import->imp_peer_committed_transno;
- CDEBUG(D_HA, "trans no "LPU64" committed transno "LPU64"\n",
+ CDEBUG(D_HA, "trans no %llu committed transno %llu\n",
req->rq_transno, last_committed_transno);
/* If the transaction is not really committed, mark result = 1 */
osp_trans_commit_cb(oth, result);
req->rq_committed = 1;
- osp_thandle_put(oth);
+ osp_thandle_put(NULL, oth);
EXIT;
}
osp_update_callback_fini(env, ouc);
}
}
- osp_trans_stop_cb(oth, rc);
+ osp_trans_stop_cb(env, oth, rc);
osp_trans_commit_cb(oth, rc);
}
{
struct osp_update_args *args;
struct ptlrpc_request *req;
- struct lu_device *top_device;
struct osp_thandle *oth = our->our_th;
int rc = 0;
ENTRY;
LASSERT(oth != NULL);
- LASSERT(our->our_req_sent == 0);
rc = osp_prep_update_req(env, osp->opd_obd->u.cli.cl_import,
our, &req);
if (rc != 0) {
args = ptlrpc_req_async_args(req);
args->oaua_update = our;
+ /* set env to NULL, in case the interrupt cb and current function
+ * are in different thread */
+ args->oaua_update_env = NULL;
osp_thandle_get(oth); /* hold for update interpret */
req->rq_interpret_reply = osp_update_interpret;
if (!oth->ot_super.th_wait_submit && !oth->ot_super.th_sync) {
if (!osp->opd_imp_active || !osp->opd_imp_connected) {
osp_trans_callback(env, oth, rc);
- osp_thandle_put(oth);
+ osp_thandle_put(env, oth);
GOTO(out, rc = -ENOTCONN);
}
rc = obd_get_request_slot(&osp->opd_obd->u.cli);
if (rc != 0) {
osp_trans_callback(env, oth, rc);
- osp_thandle_put(oth);
+ osp_thandle_put(env, oth);
GOTO(out, rc = -ENOTCONN);
}
args->oaua_flow_control = true;
atomic_inc(args->oaua_count);
}
- ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1);
+ ptlrpcd_add_req(req);
req = NULL;
} else {
osp_thandle_get(oth); /* hold for commit callback */
* status, in case the other target is being recoveried
* at the same time, and if we wait here for the import
* to be recoveryed, it might cause deadlock */
- top_device = osp->opd_dt_dev.dd_lu_dev.ld_site->ls_top_dev;
- if (top_device->ld_obd->obd_recovering)
- req->rq_allow_replay = 1;
-
- osp_get_rpc_lock(osp);
+ osp_set_req_replay(osp, req);
+
+ /* Because this req will be synchronus, i.e. it will be called
+ * in the same thread, so it will be safe to use current
+ * env */
+ args->oaua_update_env = env;
+ if (osp->opd_connect_mdt)
+ osp_get_rpc_lock(osp);
rc = ptlrpc_queue_wait(req);
- osp_put_rpc_lock(osp);
+ if (osp->opd_connect_mdt)
+ osp_put_rpc_lock(osp);
if ((rc == -ENOMEM && req->rq_set == NULL) ||
(req->rq_transno == 0 && !req->rq_committed)) {
if (args->oaua_update != NULL) {
/* If osp_update_interpret is not being called,
* release the osp_thandle */
args->oaua_update = NULL;
- osp_thandle_put(oth);
+ osp_thandle_put(env, oth);
}
req->rq_cb_data = NULL;
rc = rc == 0 ? req->rq_status : rc;
osp_trans_callback(env, oth, rc);
- osp_thandle_put(oth);
+ osp_thandle_put(env, oth);
GOTO(out, rc);
}
}
/**
* Set version for the transaction
*
- * Set the version for the transaction, then the osp RPC will be
- * sent in the order of version, i.e. the transaction with lower
- * version will be sent first.
+ * Set the version for the transaction and add the request to
+ * the sending list, then after transaction stop, the request
+ * will be sent in the order of version by the sending thread.
*
* \param [in] oth osp thandle to be set version.
*
* \retval 0 if set version succeeds
* negative errno if set version fails.
*/
-int osp_check_and_set_rpc_version(struct osp_thandle *oth)
+int osp_check_and_set_rpc_version(struct osp_thandle *oth,
+ struct osp_object *obj)
{
struct osp_device *osp = dt2osp_dev(oth->ot_super.th_dev);
struct osp_updates *ou = osp->opd_update;
if (ou == NULL)
return -EIO;
- if (oth->ot_version != 0)
+ if (oth->ot_our->our_version != 0)
return 0;
spin_lock(&ou->ou_lock);
- oth->ot_version = ou->ou_version++;
+ spin_lock(&oth->ot_our->our_list_lock);
+ if (obj->opo_stale) {
+ spin_unlock(&oth->ot_our->our_list_lock);
+ spin_unlock(&ou->ou_lock);
+ return -ESTALE;
+ }
+
+ /* Assign the version and add it to the sending list */
+ osp_thandle_get(oth);
+ oth->ot_our->our_version = ou->ou_version++;
+ oth->ot_our->our_generation = ou->ou_generation;
+ list_add_tail(&oth->ot_our->our_list,
+ &osp->opd_update->ou_list);
+ oth->ot_our->our_req_ready = 0;
+ spin_unlock(&oth->ot_our->our_list_lock);
spin_unlock(&ou->ou_lock);
- CDEBUG(D_INFO, "%s: version "LPU64" oth:version %p:"LPU64"\n",
- osp->opd_obd->obd_name, ou->ou_version, oth, oth->ot_version);
+ LASSERT(oth->ot_super.th_wait_submit == 1);
+ CDEBUG(D_INFO, "%s: version %llu gen %llu oth:version %p:%llu\n",
+ osp->opd_obd->obd_name, ou->ou_version, ou->ou_generation, oth,
+ oth->ot_our->our_version);
return 0;
}
spin_lock(&ou->ou_lock);
list_for_each_entry_safe(our, tmp, &ou->ou_list, our_list) {
LASSERT(our->our_th != NULL);
- CDEBUG(D_INFO, "our %p version "LPU64" rpc_version "LPU64"\n",
- our, our->our_th->ot_version, ou->ou_rpc_version);
- if (our->our_th->ot_version == 0) {
- list_del_init(&our->our_list);
- *ourp = our;
- got_req = true;
- break;
- }
-
+ CDEBUG(D_HA, "ou %p version %llu rpc_version %llu\n",
+ ou, our->our_version, ou->ou_rpc_version);
+ spin_lock(&our->our_list_lock);
/* Find next osp_update_request in the list */
- if (our->our_th->ot_version == ou->ou_rpc_version) {
+ if (our->our_version == ou->ou_rpc_version &&
+ our->our_req_ready) {
list_del_init(&our->our_list);
+ spin_unlock(&our->our_list_lock);
*ourp = our;
got_req = true;
break;
}
+ spin_unlock(&our->our_list_lock);
}
spin_unlock(&ou->ou_lock);
return got_req;
}
-static void osp_update_rpc_version(struct osp_updates *ou,
- struct osp_thandle *oth)
+/**
+ * Invalidate update request
+ *
+ * Invalidate update request in the OSP sending list, so all of
+ * requests in the sending list will return error, which happens
+ * when it finds one update (with writing llog) requests fails or
+ * the OSP is evicted by remote target. see osp_send_update_thread().
+ *
+ * \param[in] osp OSP device whose update requests will be
+ * invalidated.
+ **/
+void osp_invalidate_request(struct osp_device *osp)
{
- if (oth->ot_version == 0)
+ struct lu_env env;
+ struct osp_updates *ou = osp->opd_update;
+ struct osp_update_request *our;
+ struct osp_update_request *tmp;
+ LIST_HEAD(list);
+ int rc;
+ ENTRY;
+
+ if (ou == NULL)
return;
- LASSERT(oth->ot_version == ou->ou_rpc_version);
+ rc = lu_env_init(&env, osp->opd_dt_dev.dd_lu_dev.ld_type->ldt_ctx_tags);
+ if (rc < 0) {
+ CERROR("%s: init env error: rc = %d\n", osp->opd_obd->obd_name,
+ rc);
+
+ spin_lock(&ou->ou_lock);
+ ou->ou_generation++;
+ spin_unlock(&ou->ou_lock);
+
+ return;
+ }
+
+ INIT_LIST_HEAD(&list);
+
spin_lock(&ou->ou_lock);
- ou->ou_rpc_version++;
+ /* invalidate all of request in the sending list */
+ list_for_each_entry_safe(our, tmp, &ou->ou_list, our_list) {
+ spin_lock(&our->our_list_lock);
+ if (our->our_req_ready)
+ list_move(&our->our_list, &list);
+ else
+ list_del_init(&our->our_list);
+
+ if (our->our_th->ot_super.th_result == 0)
+ our->our_th->ot_super.th_result = -EIO;
+
+ if (our->our_version >= ou->ou_rpc_version)
+ ou->ou_rpc_version = our->our_version + 1;
+ spin_unlock(&our->our_list_lock);
+
+ CDEBUG(D_HA, "%s invalidate our %p\n", osp->opd_obd->obd_name,
+ our);
+ }
+
+ /* Increase the generation, then the update request with old generation
+ * will fail with -EIO. */
+ ou->ou_generation++;
spin_unlock(&ou->ou_lock);
+
+ /* invalidate all of request in the sending list */
+ list_for_each_entry_safe(our, tmp, &list, our_list) {
+ spin_lock(&our->our_list_lock);
+ list_del_init(&our->our_list);
+ spin_unlock(&our->our_list_lock);
+ osp_trans_callback(&env, our->our_th,
+ our->our_th->ot_super.th_result);
+ osp_thandle_put(&env, our->our_th);
+ }
+ lu_env_fini(&env);
}
/**
our = NULL;
l_wait_event(ou->ou_waitq,
!osp_send_update_thread_running(osp) ||
- osp_get_next_request(ou, &our),
- &lwi);
+ osp_get_next_request(ou, &our), &lwi);
if (!osp_send_update_thread_running(osp)) {
- if (our != NULL && our->our_th != NULL) {
+ if (our != NULL) {
osp_trans_callback(&env, our->our_th, -EINTR);
- osp_thandle_put(our->our_th);
+ osp_thandle_put(&env, our->our_th);
}
break;
}
- if (our->our_req_sent == 0) {
- if (our->our_th != NULL &&
- our->our_th->ot_super.th_result != 0)
- osp_trans_callback(&env, our->our_th,
- our->our_th->ot_super.th_result);
- else
- rc = osp_send_update_req(&env, osp, our);
+ LASSERT(our->our_th != NULL);
+ if (our->our_th->ot_super.th_result != 0) {
+ osp_trans_callback(&env, our->our_th,
+ our->our_th->ot_super.th_result);
+ rc = our->our_th->ot_super.th_result;
+ } else if (ou->ou_generation != our->our_generation ||
+ OBD_FAIL_CHECK(OBD_FAIL_INVALIDATE_UPDATE)) {
+ rc = -EIO;
+ osp_trans_callback(&env, our->our_th, rc);
+ } else {
+ rc = osp_send_update_req(&env, osp, our);
}
- if (our->our_th != NULL) {
- /* Update the rpc version */
- osp_update_rpc_version(ou, our->our_th);
- /* Balanced for thandle_get in osp_trans_trigger() */
- osp_thandle_put(our->our_th);
- }
+ /* Update the rpc version */
+ spin_lock(&ou->ou_lock);
+ if (our->our_version == ou->ou_rpc_version)
+ ou->ou_rpc_version++;
+ spin_unlock(&ou->ou_lock);
+
+ /* If one update request fails, let's fail all of the requests
+ * in the sending list, because the request in the sending
+ * list are dependent on either other, continue sending these
+ * request might cause llog or filesystem corruption */
+ if (rc < 0)
+ osp_invalidate_request(osp);
+
+ /* Balanced for thandle_get in osp_check_and_set_rpc_version */
+ osp_thandle_put(&env, our->our_th);
}
thread->t_flags = SVC_STOPPED;
}
/**
- * Trigger the request for remote updates.
- *
- * Add the request to the sending list, and wake up osp update
- * sending thread.
- *
- * \param[in] env pointer to the thread context
- * \param[in] osp pointer to the OSP device
- * \param[in] oth pointer to the transaction handler
- *
- */
-static void osp_trans_trigger(const struct lu_env *env,
- struct osp_device *osp,
- struct osp_thandle *oth)
-{
-
- CDEBUG(D_INFO, "%s: add oth %p with version "LPU64"\n",
- osp->opd_obd->obd_name, oth, oth->ot_version);
-
- LASSERT(oth->ot_magic == OSP_THANDLE_MAGIC);
- osp_thandle_get(oth);
- LASSERT(oth->ot_our != NULL);
- spin_lock(&osp->opd_update->ou_lock);
- list_add_tail(&oth->ot_our->our_list,
- &osp->opd_update->ou_list);
- spin_unlock(&osp->opd_update->ou_lock);
-
- wake_up(&osp->opd_update->ou_waitq);
-}
-
-/**
* The OSP layer dt_device_operations::dt_trans_start() interface
* to start the transaction.
*
{
struct osp_thandle *oth = thandle_to_osp_thandle(th);
+ if (oth->ot_super.th_sync)
+ oth->ot_our->our_flags |= UPDATE_FL_SYNC;
/* For remote thandle, if there are local thandle, start it here*/
if (is_only_remote_trans(th) && oth->ot_storage_th != NULL)
return dt_trans_start(env, oth->ot_storage_th->th_dev,
* to stop the transaction.
*
* If the transaction is a remote transaction, related remote
- * updates will be triggered here via osp_trans_trigger().
+ * updates will be triggered at the end of this function.
*
* For synchronous mode update or any failed update, the request
* will be destroyed explicitly when the osp_trans_stop().
}
if (!osp->opd_connect_mdt) {
+ osp_trans_callback(env, oth, th->th_result);
rc = osp_send_update_req(env, osp, oth->ot_our);
GOTO(out, rc);
}
GOTO(out, rc = -EIO);
}
- if (th->th_sync) {
- /* if th_sync is set, then it needs to be sent
- * right away. Note: even thought the RPC has been
- * sent, it still needs to be added to the sending
- * list (see osp_trans_trigger()), so ou_rpc_version
- * can be updated correctly. */
+ CDEBUG(D_HA, "%s: add oth %p with version %llu\n",
+ osp->opd_obd->obd_name, oth, our->our_version);
+
+ LASSERT(our->our_req_ready == 0);
+ spin_lock(&our->our_list_lock);
+ if (likely(!list_empty(&our->our_list))) {
+ /* notify sending thread */
+ our->our_req_ready = 1;
+ wake_up(&osp->opd_update->ou_waitq);
+ spin_unlock(&our->our_list_lock);
+ } else if (th->th_result == 0) {
+ /* if the request does not needs to be serialized,
+ * read-only request etc, let's send it right away */
+ spin_unlock(&our->our_list_lock);
rc = osp_send_update_req(env, osp, our);
- our->our_req_sent = 1;
+ } else {
+ spin_unlock(&our->our_list_lock);
+ osp_trans_callback(env, oth, th->th_result);
}
-
- osp_trans_trigger(env, osp, oth);
out:
- osp_thandle_put(oth);
+ osp_thandle_put(env, oth);
RETURN(rc);
}