Since the csi_end_io is empty remove it.
Provide an initialize that supports a custom end_io handler.
Cray-bug-id: LUS-7330
Signed-off-by: Shaun Tancheff <stancheff@cray.com>
Change-Id: If62c2536772136bfafc6e99ad1dbd9b466735201
Reviewed-on: https://review.whamcloud.com/35400
Tested-by: jenkins <devops@whamcloud.com>
Reviewed-by: Neil Brown <neilb@suse.com>
Reviewed-by: James Simmons <jsimmons@infradead.org>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
/** \defgroup cl_sync_io cl_sync_io
* @{ */
+struct cl_sync_io;
+
+typedef void (cl_sync_io_end_t)(const struct lu_env *, struct cl_sync_io *);
+
+void cl_sync_io_init_notify(struct cl_sync_io *anchor, int nr,
+ cl_sync_io_end_t *end);
+
+int cl_sync_io_wait(const struct lu_env *env, struct cl_sync_io *anchor,
+ long timeout);
+void cl_sync_io_note(const struct lu_env *env, struct cl_sync_io *anchor,
+ int ioret);
+static inline void cl_sync_io_init(struct cl_sync_io *anchor, int nr)
+{
+ cl_sync_io_init_notify(anchor, nr, NULL);
+}
+
/**
* Anchor for synchronous transfer. This is allocated on a stack by thread
* doing synchronous transfer, and a pointer to this structure is set up in
/** completion to be signaled when transfer is complete. */
wait_queue_head_t csi_waitq;
/** callback to invoke when this IO is finished */
- void (*csi_end_io)(const struct lu_env *,
- struct cl_sync_io *);
+ cl_sync_io_end_t *csi_end_io;
};
-void cl_sync_io_init(struct cl_sync_io *anchor, int nr,
- void (*end)(const struct lu_env *, struct cl_sync_io *));
-int cl_sync_io_wait(const struct lu_env *env, struct cl_sync_io *anchor,
- long timeout);
-void cl_sync_io_note(const struct lu_env *env, struct cl_sync_io *anchor,
- int ioret);
-void cl_sync_io_end(const struct lu_env *env, struct cl_sync_io *anchor);
-
/** @} cl_sync_io */
/** \defgroup cl_env cl_env
cl_page_disown(env, io, page);
} else {
anchor = &vvp_env_info(env)->vti_anchor;
- cl_sync_io_init(anchor, 1, &cl_sync_io_end);
+ cl_sync_io_init(anchor, 1);
page->cp_sync_io = anchor;
cl_2queue_add(queue, page);
pg->cp_sync_io = anchor;
}
- cl_sync_io_init(anchor, queue->c2_qin.pl_nr, &cl_sync_io_end);
+ cl_sync_io_init(anchor, queue->c2_qin.pl_nr);
rc = cl_io_submit_rw(env, io, iot, queue);
if (rc == 0) {
/*
}
EXPORT_SYMBOL(cl_req_attr_set);
-/*
- * cl_sync_io_end callback is issued as cl_sync_io completes and before
- * control of anchor reverts to model used by the caller of cl_sync_io_init()
- *
- * NOTE: called with spinlock on anchor->csi_waitq.lock
- */
-void cl_sync_io_end(const struct lu_env *env, struct cl_sync_io *anchor)
-{
- /* deprecated pending future removal */
-}
-EXPORT_SYMBOL(cl_sync_io_end);
-
/**
- * Initialize synchronous io wait anchor
+ * Initialize synchronous io wait \a anchor for \a nr pages with optional
+ * \a end handler.
+ * \param anchor owned by caller, initialzied here.
+ * \param nr number of pages initally pending in sync.
+ * \param end optional callback sync_io completion, can be used to
+ * trigger erasure coding, integrity, dedupe, or similar operation.
+ * \q end is called with a spinlock on anchor->csi_waitq.lock
*/
-void cl_sync_io_init(struct cl_sync_io *anchor, int nr,
- void (*end)(const struct lu_env *, struct cl_sync_io *))
+
+void cl_sync_io_init_notify(struct cl_sync_io *anchor, int nr,
+ cl_sync_io_end_t *end)
{
ENTRY;
memset(anchor, 0, sizeof(*anchor));
atomic_set(&anchor->csi_sync_nr, nr);
anchor->csi_sync_rc = 0;
anchor->csi_end_io = end;
- LASSERT(end != NULL);
EXIT;
}
-EXPORT_SYMBOL(cl_sync_io_init);
+EXPORT_SYMBOL(cl_sync_io_init_notify);
/**
* Wait until all IO completes. Transfer completion routine has to call
LASSERT(atomic_read(&anchor->csi_sync_nr) > 0);
if (atomic_dec_and_lock(&anchor->csi_sync_nr,
&anchor->csi_waitq.lock)) {
+ cl_sync_io_end_t *end_io = anchor->csi_end_io;
+
/*
* Holding the lock across both the decrement and
* the wakeup ensures cl_sync_io_wait() doesn't complete
* completes.
*/
wake_up_all_locked(&anchor->csi_waitq);
- anchor->csi_end_io(env, anchor);
+ if (end_io)
+ end_io(env, anchor);
spin_unlock(&anchor->csi_waitq.lock);
/* Can't access anchor any more */
if ((enq_flags & CEF_GLIMPSE) && !(enq_flags & CEF_SPECULATIVE)) {
anchor = &cl_env_info(env)->clt_anchor;
- cl_sync_io_init(anchor, 1, cl_sync_io_end);
+ cl_sync_io_init(anchor, 1);
}
rc = cl_lock_enqueue(env, io, lock, anchor);
continue;
/* wait for conflicting lock to be canceled */
- cl_sync_io_init(waiter, 1, cl_sync_io_end);
+ cl_sync_io_init(waiter, 1);
oscl->ols_owner = waiter;
spin_lock(&tmp_oscl->ols_lock);