On linux, EWOULDBLOCK has always been defined as an alias for
EAGAIN. In the interest of readability we should not use two names for
the same thing. So change the remaining uses of EWOULDBLOCK to EAGAIN
and add EWOULDBLOCK||EAGAIN to spelling.txt.
Signed-off-by: John L. Hammond <jhammond@whamcloud.com>
Change-Id: Ib48b8a1e58bfa961d2a4ba411c038c476bfc300d
Reviewed-on: https://review.whamcloud.com/40307
Tested-by: jenkins <devops@whamcloud.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Yingjin Qian <qian@ddn.com>
Reviewed-by: Andreas Dilger <adilger@whamcloud.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
17 files changed:
CLASSERT||BUILD_BUG_ON()
msecs_to_jiffies||cfs_time_seconds
DEFINE_TIMER||CFS_DEFINE_TIMER
CLASSERT||BUILD_BUG_ON()
msecs_to_jiffies||cfs_time_seconds
DEFINE_TIMER||CFS_DEFINE_TIMER
container_of0||container_of_safe
DN_MAX_BONUSLEN||DN_BONUS_SIZE(dnodesize)
DN_OLD_MAX_BONUSLEN||DN_BONUS_SIZE(DNODE_MIN_SIZE)
container_of0||container_of_safe
DN_MAX_BONUSLEN||DN_BONUS_SIZE(dnodesize)
DN_OLD_MAX_BONUSLEN||DN_BONUS_SIZE(DNODE_MIN_SIZE)
.UNINDENT
.sp
\fBllapi_hsm_copytool_recv\fP returns 0 when a message is available. If
.UNINDENT
.sp
\fBllapi_hsm_copytool_recv\fP returns 0 when a message is available. If
-the copytool was set to non\-blocking operation, \-EWOULDBLOCK is
+the copytool was set to non\-blocking operation, \-EAGAIN is
immediately returned if no message is available. On error, a negative
errno is returned.
.SH ERRORS
immediately returned if no message is available. On error, a negative
errno is returned.
.SH ERRORS
.sp
\fB\-EPROTO\fP Lustre protocol error.
.sp
.sp
\fB\-EPROTO\fP Lustre protocol error.
.sp
-\fB\-EWOULDBLOCK\fP No HSM message is available, and the copytool was set
+\fB\-EAGAIN\fP No HSM message is available, and the copytool was set
to not block on receives.
.SH SEE ALSO
.sp
to not block on receives.
.SH SEE ALSO
.sp
if (OBD_FAIL_CHECK(OBD_FAIL_FLD_QUERY_REQ && req->rq_no_delay)) {
/* the same error returned by ptlrpc_import_delay_req */
if (OBD_FAIL_CHECK(OBD_FAIL_FLD_QUERY_REQ && req->rq_no_delay)) {
/* the same error returned by ptlrpc_import_delay_req */
req->rq_status = rc;
} else {
obd_get_request_slot(&exp->exp_obd->u.cli);
req->rq_status = rc;
} else {
obd_get_request_slot(&exp->exp_obd->u.cli);
* \ingroup cl_lock
*/
enum cl_enq_flags {
* \ingroup cl_lock
*/
enum cl_enq_flags {
- /**
- * instruct server to not block, if conflicting lock is found. Instead
- * -EWOULDBLOCK is returned immediately.
- */
- CEF_NONBLOCK = 0x00000001,
+ /**
+ * instruct server to not block, if conflicting lock is found. Instead
+ * -EAGAIN is returned immediately.
+ */
+ CEF_NONBLOCK = 0x00000001,
/**
* Tell lower layers this is a glimpse request, translated to
* LDLM_FL_HAS_INTENT at LDLM layer.
/**
* Tell lower layers this is a glimpse request, translated to
* LDLM_FL_HAS_INTENT at LDLM layer.
* \retval 1 if the lock is compatible
* \retval 2 if \a req is a group lock and it is compatible and requires
* no further checking
* \retval 1 if the lock is compatible
* \retval 2 if \a req is a group lock and it is compatible and requires
* no further checking
- * \retval negative error, such as EWOULDBLOCK for group locks
+ * \retval negative error, such as EAGAIN for group locks
*/
static int
ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
*/
static int
ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
if (tree->lit_mode == LCK_GROUP) {
if (*flags & (LDLM_FL_BLOCK_NOWAIT |
LDLM_FL_SPECULATIVE)) {
if (tree->lit_mode == LCK_GROUP) {
if (*flags & (LDLM_FL_BLOCK_NOWAIT |
LDLM_FL_SPECULATIVE)) {
if (!work_list) {
RETURN(0);
} else {
if (!work_list) {
RETURN(0);
} else {
* immediately */
if (*flags & (LDLM_FL_BLOCK_NOWAIT
| LDLM_FL_SPECULATIVE)) {
* immediately */
if (*flags & (LDLM_FL_BLOCK_NOWAIT
| LDLM_FL_SPECULATIVE)) {
goto destroylock;
}
/* If this group lock is compatible with another
goto destroylock;
}
/* If this group lock is compatible with another
* range does not matter */
if (*flags & (LDLM_FL_BLOCK_NOWAIT
| LDLM_FL_SPECULATIVE)) {
* range does not matter */
if (*flags & (LDLM_FL_BLOCK_NOWAIT
| LDLM_FL_SPECULATIVE)) {
goto destroylock;
}
} else if (lock->l_policy_data.l_extent.end < req_start ||
goto destroylock;
}
} else if (lock->l_policy_data.l_extent.end < req_start ||
RETURN(0);
if (*flags & LDLM_FL_SPECULATIVE) {
RETURN(0);
if (*flags & LDLM_FL_SPECULATIVE) {
*err = ELDLM_OK;
if (intention == LDLM_PROCESS_RESCAN) {
*err = ELDLM_OK;
if (intention == LDLM_PROCESS_RESCAN) {
- /* Careful observers will note that we don't handle -EWOULDBLOCK
+ /* Careful observers will note that we don't handle -EAGAIN
* here, but it's ok for a non-obvious reason -- compat_queue
* here, but it's ok for a non-obvious reason -- compat_queue
- * can only return -EWOULDBLOCK if (flags & BLOCK_NOWAIT |
+ * can only return -EAGAIN if (flags & BLOCK_NOWAIT |
* SPECULATIVE). flags should always be zero here, and if that
* ever stops being true, we want to find out. */
LASSERT(*flags == 0);
* SPECULATIVE). flags should always be zero here, and if that
* ever stops being true, we want to find out. */
LASSERT(*flags == 0);
} else if (result == 0) {
result = cl_glimpse_lock(env, io, inode, io->ci_obj,
agl);
} else if (result == 0) {
result = cl_glimpse_lock(env, io, inode, io->ci_obj,
agl);
- if (!agl && result == -EWOULDBLOCK)
+ if (!agl && result == -EAGAIN)
io->ci_need_restart = 1;
}
io->ci_need_restart = 1;
}
cl_page_export(env, page, 1);
} else if (vpg->vpg_defer_uptodate) {
vpg->vpg_defer_uptodate = 0;
cl_page_export(env, page, 1);
} else if (vpg->vpg_defer_uptodate) {
vpg->vpg_defer_uptodate = 0;
- if (ioret == -EWOULDBLOCK) {
+ if (ioret == -EAGAIN) {
/* mirror read failed, it needs to destroy the page
* because subpage would be from wrong osc when trying
* to read from a new mirror
/* mirror read failed, it needs to destroy the page
* because subpage would be from wrong osc when trying
* to read from a new mirror
if (rc && !result)
result = rc;
if (rc && !result)
result = rc;
- if (result == -EWOULDBLOCK && io->ci_ndelay) {
+ if (result == -EAGAIN && io->ci_ndelay) {
io->ci_need_restart = 1;
result = 0;
}
io->ci_need_restart = 1;
result = 0;
}
*/
if (ios->cis_io->ci_type == CIT_READ && ios->cis_io->ci_ndelay &&
!ios->cis_io->ci_tried_all_mirrors && osc_import_not_healthy(imp)) {
*/
if (ios->cis_io->ci_type == CIT_READ && ios->cis_io->ci_ndelay &&
!ios->cis_io->ci_tried_all_mirrors && osc_import_not_healthy(imp)) {
} else if (likely(!imp->imp_invalid)) {
atomic_inc(&osc->oo_nr_ios);
oio->oi_is_active = 1;
} else if (likely(!imp->imp_invalid)) {
atomic_inc(&osc->oo_nr_ios);
oio->oi_is_active = 1;
/* Hide the error. */
rc = 0;
} else if (rc < 0 && oscl->ols_flags & LDLM_FL_NDELAY) {
/* Hide the error. */
rc = 0;
} else if (rc < 0 && oscl->ols_flags & LDLM_FL_NDELAY) {
}
if (oscl->ols_owner != NULL)
}
if (oscl->ols_owner != NULL)
list_for_each_entry_safe(ext, tmp, &aa->aa_exts, oe_link) {
list_del_init(&ext->oe_link);
osc_extent_finish(env, ext, 1,
list_for_each_entry_safe(ext, tmp, &aa->aa_exts, oe_link) {
list_del_init(&ext->oe_link);
osc_extent_finish(env, ext, 1,
- rc && req->rq_no_delay ? -EWOULDBLOCK : rc);
+ rc && req->rq_no_delay ? -EAGAIN : rc);
}
LASSERT(list_empty(&aa->aa_exts));
LASSERT(list_empty(&aa->aa_oaps));
}
LASSERT(list_empty(&aa->aa_exts));
LASSERT(list_empty(&aa->aa_oaps));
* XXX: decide how do we do here with resend
* if we don't resend, then client may see wrong file size
* if we do resend, then MDS thread can get stuck for quite long
* XXX: decide how do we do here with resend
* if we don't resend, then client may see wrong file size
* if we do resend, then MDS thread can get stuck for quite long
- * and if we don't resend, then client will also get -EWOULDBLOCK !!
+ * and if we don't resend, then client will also get -EAGAIN !!
* (see LU-7975 and sanity/test_27F use cases)
* but let's decide not to resend/delay this truncate request to OST
* and allow Client to decide to resend, in a less agressive way from
* after_reply(), by returning -EINPROGRESS instead of
* (see LU-7975 and sanity/test_27F use cases)
* but let's decide not to resend/delay this truncate request to OST
* and allow Client to decide to resend, in a less agressive way from
* after_reply(), by returning -EINPROGRESS instead of
- * -EAGAIN/-EWOULDBLOCK upon return from ptlrpc_queue_wait() at the
+ * -EAGAIN/-EAGAIN upon return from ptlrpc_queue_wait() at the
* end of this routine
*/
req->rq_no_resend = req->rq_no_delay = 1;
* end of this routine
*/
req->rq_no_resend = req->rq_no_delay = 1;
rc = ptlrpc_queue_wait(req);
if (rc) {
rc = ptlrpc_queue_wait(req);
if (rc) {
- /* -EWOULDBLOCK/-EAGAIN means OST is unreachable at the moment
+ /* -EAGAIN/-EWOULDBLOCK means OST is unreachable at the moment
* since we have decided not to resend/delay, but this could
* lead to wrong size to be seen at Client side and even process
* trying to open to exit/fail if not itself handling -EAGAIN.
* So it should be better to return -EINPROGRESS instead and
* leave the decision to resend at Client side in after_reply()
*/
* since we have decided not to resend/delay, but this could
* lead to wrong size to be seen at Client side and even process
* trying to open to exit/fail if not itself handling -EAGAIN.
* So it should be better to return -EINPROGRESS instead and
* leave the decision to resend at Client side in after_reply()
*/
- if (rc == -EWOULDBLOCK) {
rc = -EINPROGRESS;
CDEBUG(D_HA, "returning -EINPROGRESS instead of "
"-EWOULDBLOCK/-EAGAIN to allow Client to "
rc = -EINPROGRESS;
CDEBUG(D_HA, "returning -EINPROGRESS instead of "
"-EWOULDBLOCK/-EAGAIN to allow Client to "
} else if (req->rq_no_delay &&
imp->imp_generation != imp->imp_initiated_at) {
/* ignore nodelay for requests initiating connections */
} else if (req->rq_no_delay &&
imp->imp_generation != imp->imp_initiated_at) {
/* ignore nodelay for requests initiating connections */
- *status = -EWOULDBLOCK;
} else if (req->rq_allow_replay &&
(imp->imp_state == LUSTRE_IMP_REPLAY ||
imp->imp_state == LUSTRE_IMP_REPLAY_LOCKS ||
} else if (req->rq_allow_replay &&
(imp->imp_state == LUSTRE_IMP_REPLAY ||
imp->imp_state == LUSTRE_IMP_REPLAY_LOCKS ||
* The two translation tables below must define a one-to-one mapping between
* host and network errnos.
*
* The two translation tables below must define a one-to-one mapping between
* host and network errnos.
*
- * EWOULDBLOCK is equal to EAGAIN on all architectures except for parisc, which
- * appears irrelevant. Thus, existing references to EWOULDBLOCK are fine.
+ * EAGAIN is equal to EAGAIN on all architectures except for parisc, which
+ * appears irrelevant. Thus, existing references to EAGAIN are fine.
*
* EDEADLOCK is equal to EDEADLK on x86 but not on sparc, at least. A sparc
* host has no context-free way to determine if a LUSTRE_EDEADLK represents an
*
* EDEADLOCK is equal to EDEADLK on x86 but not on sparc, at least. A sparc
* host has no context-free way to determine if a LUSTRE_EDEADLK represents an
spin_unlock(&ctx->cc_lock);
if (timeout == 0)
spin_unlock(&ctx->cc_lock);
if (timeout == 0)
/* Clear any flags that may be present from previous sends */
LASSERT(req->rq_receiving_reply == 0);
/* Clear any flags that may be present from previous sends */
LASSERT(req->rq_receiving_reply == 0);
/* Hopefully there is nothing lingering */
for (i = 0; i < 1000; i++) {
rc = llapi_hsm_copytool_recv(ctdata, &hal, &msgsize);
/* Hopefully there is nothing lingering */
for (i = 0; i < 1000; i++) {
rc = llapi_hsm_copytool_recv(ctdata, &hal, &msgsize);
- ASSERTF(rc == -EWOULDBLOCK, "llapi_hsm_copytool_recv error: %s",
+ ASSERTF(rc == -EAGAIN, "llapi_hsm_copytool_recv error: %s",
rc, strerror(errno));
rc = llapi_hsm_copytool_recv(ctdata, &hal, &msgsize);
rc, strerror(errno));
rc = llapi_hsm_copytool_recv(ctdata, &hal, &msgsize);
- ASSERTF(rc == -EWOULDBLOCK, "llapi_hsm_copytool_recv error: %s",
+ ASSERTF(rc == -EAGAIN, "llapi_hsm_copytool_recv error: %s",
strerror(-rc));
fds[0].fd = fd;
strerror(-rc));
fds[0].fd = fd;
/*
* task0 attempts GR(gid=1) -- granted
/*
* task0 attempts GR(gid=1) -- granted
- * task1 attempts PR on non-blocking fd -> should return -EWOULDBLOCK
- * task2 attempts PW on non-blocking fd -> should return -EWOULDBLOCK
- * task3 attempts GR(gid=2) on non-blocking fd -> should return -EWOULDBLOCK
+ * task1 attempts PR on non-blocking fd -> should return -EAGAIN
+ * task2 attempts PW on non-blocking fd -> should return -EAGAIN
+ * task3 attempts GR(gid=2) on non-blocking fd -> should return -EAGAIN
*/
void grouplock_nonblock_test(char *filename, int fd)
{
*/
void grouplock_nonblock_test(char *filename, int fd)
{
switch (rank) {
case 1:
rc = read(fd, buf, sizeof(buf));
switch (rank) {
case 1:
rc = read(fd, buf, sizeof(buf));
- if ((rc != -1) || (errno != EWOULDBLOCK))
+ if ((rc != -1) || (errno != EAGAIN))
FAIL("PR lock succeeded while incompatible GROUP LOCK (gid=1) is still held\n");
MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
break;
case 2:
rc = write(fd, buf, sizeof(buf));
FAIL("PR lock succeeded while incompatible GROUP LOCK (gid=1) is still held\n");
MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
break;
case 2:
rc = write(fd, buf, sizeof(buf));
- if ((rc != -1) || (errno != EWOULDBLOCK))
+ if ((rc != -1) || (errno != EAGAIN))
FAIL("PW lock succeeded while incompatible GROUP LOCK (gid=1) is still held\n");
MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
FAIL("PW lock succeeded while incompatible GROUP LOCK (gid=1) is still held\n");
MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
case 3:
gid = 2;
rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid);
case 3:
gid = 2;
rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid);
- if ((rc != -1) || (errno != EWOULDBLOCK))
+ if ((rc != -1) || (errno != EAGAIN))
FAIL("GROUP_LOCK (gid=2) succeeded while incompatible GROUP LOCK (gid=1) is still held.\n");
MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
FAIL("GROUP_LOCK (gid=2) succeeded while incompatible GROUP LOCK (gid=1) is still held.\n");
MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);