*/
void cl_sync_io_init_notify(struct cl_sync_io *anchor, int nr,
- cl_sync_io_end_t *end)
+ struct cl_dio_aio *aio, cl_sync_io_end_t *end)
{
ENTRY;
memset(anchor, 0, sizeof(*anchor));
atomic_set(&anchor->csi_sync_nr, nr);
anchor->csi_sync_rc = 0;
anchor->csi_end_io = end;
+ anchor->csi_aio = aio;
EXIT;
}
EXPORT_SYMBOL(cl_sync_io_init_notify);
int cl_sync_io_wait(const struct lu_env *env, struct cl_sync_io *anchor,
long timeout)
{
- struct l_wait_info lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(timeout),
- NULL, NULL, NULL);
- int rc;
+ int rc = 0;
ENTRY;
LASSERT(timeout >= 0);
- rc = l_wait_event(anchor->csi_waitq,
- atomic_read(&anchor->csi_sync_nr) == 0,
- &lwi);
- if (rc < 0) {
+ if (timeout > 0 &&
+ wait_event_idle_timeout(anchor->csi_waitq,
+ atomic_read(&anchor->csi_sync_nr) == 0,
+ cfs_time_seconds(timeout)) == 0) {
+ rc = -ETIMEDOUT;
CERROR("IO failed: %d, still wait for %d remaining entries\n",
rc, atomic_read(&anchor->csi_sync_nr));
+ }
- wait_event_idle(anchor->csi_waitq,
- atomic_read(&anchor->csi_sync_nr) == 0);
- } else {
+ wait_event_idle(anchor->csi_waitq,
+ atomic_read(&anchor->csi_sync_nr) == 0);
+ if (!rc)
rc = anchor->csi_sync_rc;
- }
+
/* We take the lock to ensure that cl_sync_io_note() has finished */
spin_lock(&anchor->csi_waitq.lock);
LASSERT(atomic_read(&anchor->csi_sync_nr) == 0);
LASSERT(atomic_read(&anchor->csi_sync_nr) > 0);
if (atomic_dec_and_lock(&anchor->csi_sync_nr,
&anchor->csi_waitq.lock)) {
+ struct cl_dio_aio *aio = NULL;
+
cl_sync_io_end_t *end_io = anchor->csi_end_io;
/*
wake_up_all_locked(&anchor->csi_waitq);
if (end_io)
end_io(env, anchor);
+ if (anchor->csi_aio)
+ aio = anchor->csi_aio;
+
spin_unlock(&anchor->csi_waitq.lock);
- /* Can't access anchor any more */
+ /**
+ * If anchor->csi_aio is set, we are responsible for freeing
+ * memory here rather than when cl_sync_io_wait() completes.
+ */
+ if (aio)
+ OBD_FREE_PTR(aio);
}
EXIT;
}