Remove the barrier and rely on the wait queue lock for wake
synchronization.
Leave csi_end_io empty but available for future customization
Inspired by
c3973b4aca6df794c492f6856ffbf02f2f8a9592
Cray-bug-id: LUS-7330
Signed-off-by: Shaun Tancheff <stancheff@cray.com>
Change-Id: Idc632140256cccfa6046a52cbd1c6432955e2b11
Reviewed-on: https://review.whamcloud.com/35381
Tested-by: jenkins <devops@whamcloud.com>
Reviewed-by: Patrick Farrell <pfarrell@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: James Simmons <jsimmons@infradead.org>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
atomic_t csi_sync_nr;
/** error code. */
int csi_sync_rc;
atomic_t csi_sync_nr;
/** error code. */
int csi_sync_rc;
- /** barrier of destroy this structure */
- atomic_t csi_barrier;
/** completion to be signaled when transfer is complete. */
wait_queue_head_t csi_waitq;
/** callback to invoke when this IO is finished */
/** completion to be signaled when transfer is complete. */
wait_queue_head_t csi_waitq;
/** callback to invoke when this IO is finished */
}
EXPORT_SYMBOL(cl_req_attr_set);
}
EXPORT_SYMBOL(cl_req_attr_set);
-/* cl_sync_io_callback assumes the caller must call cl_sync_io_wait() to
- * wait for the IO to finish. */
+/*
+ * cl_sync_io_end callback is issued as cl_sync_io completes and before
+ * control of anchor reverts to model used by the caller of cl_sync_io_init()
+ *
+ * NOTE: called with spinlock on anchor->csi_waitq.lock
+ */
void cl_sync_io_end(const struct lu_env *env, struct cl_sync_io *anchor)
{
void cl_sync_io_end(const struct lu_env *env, struct cl_sync_io *anchor)
{
- wake_up_all(&anchor->csi_waitq);
-
- /* it's safe to nuke or reuse anchor now */
- atomic_set(&anchor->csi_barrier, 0);
+ /* deprecated pending future removal */
}
EXPORT_SYMBOL(cl_sync_io_end);
}
EXPORT_SYMBOL(cl_sync_io_end);
memset(anchor, 0, sizeof(*anchor));
init_waitqueue_head(&anchor->csi_waitq);
atomic_set(&anchor->csi_sync_nr, nr);
memset(anchor, 0, sizeof(*anchor));
init_waitqueue_head(&anchor->csi_waitq);
atomic_set(&anchor->csi_sync_nr, nr);
- atomic_set(&anchor->csi_barrier, nr > 0);
anchor->csi_sync_rc = 0;
anchor->csi_end_io = end;
LASSERT(end != NULL);
anchor->csi_sync_rc = 0;
anchor->csi_end_io = end;
LASSERT(end != NULL);
} else {
rc = anchor->csi_sync_rc;
}
} else {
rc = anchor->csi_sync_rc;
}
+ /* We take the lock to ensure that cl_sync_io_note() has finished */
+ spin_lock(&anchor->csi_waitq.lock);
LASSERT(atomic_read(&anchor->csi_sync_nr) == 0);
LASSERT(atomic_read(&anchor->csi_sync_nr) == 0);
+ spin_unlock(&anchor->csi_waitq.lock);
- /* wait until cl_sync_io_note() has done wakeup */
- while (unlikely(atomic_read(&anchor->csi_barrier) != 0)) {
- cpu_relax();
- }
RETURN(rc);
}
EXPORT_SYMBOL(cl_sync_io_wait);
RETURN(rc);
}
EXPORT_SYMBOL(cl_sync_io_wait);
* IO.
*/
LASSERT(atomic_read(&anchor->csi_sync_nr) > 0);
* IO.
*/
LASSERT(atomic_read(&anchor->csi_sync_nr) > 0);
- if (atomic_dec_and_test(&anchor->csi_sync_nr)) {
- LASSERT(anchor->csi_end_io != NULL);
+ if (atomic_dec_and_lock(&anchor->csi_sync_nr,
+ &anchor->csi_waitq.lock)) {
+ /*
+ * Holding the lock across both the decrement and
+ * the wakeup ensures cl_sync_io_wait() doesn't complete
+ * before the wakeup completes and the contents of
+ * of anchor become unsafe to access as the owner is free
+ * to immediately reclaim anchor when cl_sync_io_wait()
+ * completes.
+ */
+ wake_up_all_locked(&anchor->csi_waitq);
anchor->csi_end_io(env, anchor);
anchor->csi_end_io(env, anchor);
+ spin_unlock(&anchor->csi_waitq.lock);
+
/* Can't access anchor any more */
}
EXIT;
/* Can't access anchor any more */
}
EXIT;