struct bio_vec md_kiov[LNET_MAX_IOV];
};
-#define LNET_MD_FLAG_ZOMBIE (1 << 0)
-#define LNET_MD_FLAG_AUTO_UNLINK (1 << 1)
-#define LNET_MD_FLAG_ABORTED (1 << 2)
+#define LNET_MD_FLAG_ZOMBIE BIT(0)
+#define LNET_MD_FLAG_AUTO_UNLINK BIT(1)
+#define LNET_MD_FLAG_ABORTED BIT(2)
struct lnet_test_peer {
/* info about peers we are trying to fail */
};
/* Preferred path added due to traffic on non-MR peer_ni */
-#define LNET_PEER_NI_NON_MR_PREF (1 << 0)
+#define LNET_PEER_NI_NON_MR_PREF BIT(0)
/* peer is being recovered. */
-#define LNET_PEER_NI_RECOVERY_PENDING (1 << 1)
+#define LNET_PEER_NI_RECOVERY_PENDING BIT(1)
/* recovery ping failed */
-#define LNET_PEER_NI_RECOVERY_FAILED (1 << 2)
+#define LNET_PEER_NI_RECOVERY_FAILED BIT(2)
/* peer is being deleted */
-#define LNET_PEER_NI_DELETING (1 << 3)
+#define LNET_PEER_NI_DELETING BIT(3)
struct lnet_peer {
/* chain on pt_peer_list */
*
* A peer is marked ROUTER if it indicates so in the feature bit.
*/
-#define LNET_PEER_MULTI_RAIL (1 << 0) /* Multi-rail aware */
-#define LNET_PEER_NO_DISCOVERY (1 << 1) /* Peer disabled discovery */
-#define LNET_PEER_ROUTER_ENABLED (1 << 2) /* router feature enabled */
+#define LNET_PEER_MULTI_RAIL BIT(0) /* Multi-rail aware */
+#define LNET_PEER_NO_DISCOVERY BIT(1) /* Peer disabled discovery */
+#define LNET_PEER_ROUTER_ENABLED BIT(2) /* router feature enabled */
/*
* A peer is marked CONFIGURED if it was configured by DLC.
* A peer that was created as the result of inbound traffic will not
* be marked at all.
*/
-#define LNET_PEER_CONFIGURED (1 << 3) /* Configured via DLC */
-#define LNET_PEER_DISCOVERED (1 << 4) /* Peer was discovered */
-#define LNET_PEER_REDISCOVER (1 << 5) /* Discovery was disabled */
+#define LNET_PEER_CONFIGURED BIT(3) /* Configured via DLC */
+#define LNET_PEER_DISCOVERED BIT(4) /* Peer was discovered */
+#define LNET_PEER_REDISCOVER BIT(5) /* Discovery was disabled */
/*
* A peer is marked DISCOVERING when discovery is in progress.
* The other flags below correspond to stages of discovery.
*/
-#define LNET_PEER_DISCOVERING (1 << 6) /* Discovering */
-#define LNET_PEER_DATA_PRESENT (1 << 7) /* Remote peer data present */
-#define LNET_PEER_NIDS_UPTODATE (1 << 8) /* Remote peer info uptodate */
-#define LNET_PEER_PING_SENT (1 << 9) /* Waiting for REPLY to Ping */
-#define LNET_PEER_PUSH_SENT (1 << 10) /* Waiting for ACK of Push */
-#define LNET_PEER_PING_FAILED (1 << 11) /* Ping send failure */
-#define LNET_PEER_PUSH_FAILED (1 << 12) /* Push send failure */
+#define LNET_PEER_DISCOVERING BIT(6) /* Discovering */
+#define LNET_PEER_DATA_PRESENT BIT(7) /* Remote peer data present */
+#define LNET_PEER_NIDS_UPTODATE BIT(8) /* Remote peer info uptodate */
+#define LNET_PEER_PING_SENT BIT(9) /* Waiting for REPLY to Ping */
+#define LNET_PEER_PUSH_SENT BIT(10) /* Waiting for ACK of Push */
+#define LNET_PEER_PING_FAILED BIT(11) /* Ping send failure */
+#define LNET_PEER_PUSH_FAILED BIT(12) /* Push send failure */
/*
* A ping can be forced as a way to fix up state, or as a manual
* intervention by an admin.
* A push can be forced in circumstances that would normally not
* allow for one to happen.
*/
-#define LNET_PEER_FORCE_PING (1 << 13) /* Forced Ping */
-#define LNET_PEER_FORCE_PUSH (1 << 14) /* Forced Push */
+#define LNET_PEER_FORCE_PING BIT(13) /* Forced Ping */
+#define LNET_PEER_FORCE_PUSH BIT(14) /* Forced Push */
/* force delete even if router */
-#define LNET_PEER_RTR_NI_FORCE_DEL (1 << 15)
+#define LNET_PEER_RTR_NI_FORCE_DEL BIT(15)
/* gw undergoing alive discovery */
-#define LNET_PEER_RTR_DISCOVERY (1 << 16)
+#define LNET_PEER_RTR_DISCOVERY BIT(16)
/* gw has undergone discovery (does not indicate success or failure) */
-#define LNET_PEER_RTR_DISCOVERED (1 << 17)
+#define LNET_PEER_RTR_DISCOVERED BIT(17)
/* peer is marked for deletion */
-#define LNET_PEER_MARK_DELETION (1 << 18)
+#define LNET_PEER_MARK_DELETION BIT(18)
struct lnet_peer_net {
/* chain on lp_peer_nets */
enum lnet_match_flags {
/* Didn't match anything */
- LNET_MATCHMD_NONE = (1 << 0),
+ LNET_MATCHMD_NONE = BIT(0),
/* Matched OK */
- LNET_MATCHMD_OK = (1 << 1),
+ LNET_MATCHMD_OK = BIT(1),
/* Must be discarded */
- LNET_MATCHMD_DROP = (1 << 2),
+ LNET_MATCHMD_DROP = BIT(2),
/* match and buffer is exhausted */
- LNET_MATCHMD_EXHAUSTED = (1 << 3),
+ LNET_MATCHMD_EXHAUSTED = BIT(3),
/* match or drop */
LNET_MATCHMD_FINISH = (LNET_MATCHMD_OK | LNET_MATCHMD_DROP),
};
/* Options for struct lnet_portal::ptl_options */
-#define LNET_PTL_LAZY (1 << 0)
-#define LNET_PTL_MATCH_UNIQUE (1 << 1) /* unique match, for RDMA */
-#define LNET_PTL_MATCH_WILDCARD (1 << 2) /* wildcard match, request portal */
+#define LNET_PTL_LAZY BIT(0)
+#define LNET_PTL_MATCH_UNIQUE BIT(1) /* unique match, for RDMA */
+#define LNET_PTL_MATCH_WILDCARD BIT(2) /* wildcard match, request portal */
/* parameter for matching operations (GET, PUT) */
struct lnet_match_info {
/* Error Event Categories */
/* WARNING: DO NOT CHANGE THESE UNLESS YOU CHANGE ghal_err_cat.h */
-#define GNI_ERRMASK_CORRECTABLE_MEMORY (1 << 0)
-#define GNI_ERRMASK_CRITICAL (1 << 1)
-#define GNI_ERRMASK_TRANSACTION (1 << 2)
-#define GNI_ERRMASK_ADDRESS_TRANSLATION (1 << 3)
-#define GNI_ERRMASK_TRANSIENT (1 << 4)
-#define GNI_ERRMASK_INFORMATIONAL (1 << 5)
-#define GNI_ERRMASK_DIAG_ONLY (1 << 6)
-#define GNI_ERRMASK_UNKNOWN_TRANSACTION (1 << 7)
+#define GNI_ERRMASK_CORRECTABLE_MEMORY BIT(0)
+#define GNI_ERRMASK_CRITICAL BIT(1)
+#define GNI_ERRMASK_TRANSACTION BIT(2)
+#define GNI_ERRMASK_ADDRESS_TRANSLATION BIT(3)
+#define GNI_ERRMASK_TRANSIENT BIT(4)
+#define GNI_ERRMASK_INFORMATIONAL BIT(5)
+#define GNI_ERRMASK_DIAG_ONLY BIT(6)
+#define GNI_ERRMASK_UNKNOWN_TRANSACTION BIT(7)
/* RDMA mode */
/* local_addr is a physical address (kernel only) */
/* Post CE modes, used during GNI_PostFma(...) */
/* Use two operands (only meaningful for single operand collective operations).
* Single operand CE operations are all variations of AND, OR, XOR and ADD. */
-#define GNI_CEMODE_TWO_OP (1 << 0)
+#define GNI_CEMODE_TWO_OP BIT(0)
/* The provided operands are an intermediate result that has experienced an
* invalid operation floating point exception. */
-#define GNI_CEMODE_FPE_OP_INVAL (1 << 1)
+#define GNI_CEMODE_FPE_OP_INVAL BIT(1)
/* The provided operands are an intermediate result that has experienced an
* overflow floating point exception */
-#define GNI_CEMODE_FPE_OFLOW (1 << 2)
+#define GNI_CEMODE_FPE_OFLOW BIT(2)
/* The provided operands are an intermediate result that has experienced an
* underflow floating point exception. */
-#define GNI_CEMODE_FPE_UFLOW (1 << 3)
+#define GNI_CEMODE_FPE_UFLOW BIT(3)
/* The provided operands are an intermediate result that has experienced an
* inexact result floating point exception. */
-#define GNI_CEMODE_FPE_PRECISION (1 << 4)
+#define GNI_CEMODE_FPE_PRECISION BIT(4)
/* Maximum length in bytes of a datagram transaction */
#define GNI_DATAGRAM_MAXSIZE 128
if (route != NULL) {
/* dissociate conn from route... */
LASSERT(!route->ksnr_deleted);
- LASSERT((route->ksnr_connected & (1 << conn->ksnc_type)) != 0);
+ LASSERT((route->ksnr_connected & BIT(conn->ksnc_type)) != 0);
conn2 = NULL;
list_for_each(tmp, &peer_ni->ksnp_conns) {
conn2 = NULL;
}
if (conn2 == NULL)
- route->ksnr_connected &= ~(1 << conn->ksnc_type);
+ route->ksnr_connected &= ~BIT(conn->ksnc_type);
conn->ksnc_route = NULL;
static inline int
ksocknal_route_mask(void)
{
- if (!*ksocknal_tunables.ksnd_typed_conns)
- return (1 << SOCKLND_CONN_ANY);
+ if (!*ksocknal_tunables.ksnd_typed_conns)
+ return BIT(SOCKLND_CONN_ANY);
- return ((1 << SOCKLND_CONN_CONTROL) |
- (1 << SOCKLND_CONN_BULK_IN) |
- (1 << SOCKLND_CONN_BULK_OUT));
+ return (BIT(SOCKLND_CONN_CONTROL) |
+ BIT(SOCKLND_CONN_BULK_IN) |
+ BIT(SOCKLND_CONN_BULK_OUT));
}
static inline void
route->ksnr_connecting = 1;
- for (;;) {
- wanted = ksocknal_route_mask() & ~route->ksnr_connected;
+ for (;;) {
+ wanted = ksocknal_route_mask() & ~route->ksnr_connected;
- /* stop connecting if peer_ni/route got closed under me, or
- * route got connected while queued */
- if (peer_ni->ksnp_closing || route->ksnr_deleted ||
- wanted == 0) {
- retry_later = 0;
- break;
- }
+ /* stop connecting if peer_ni/route got closed under me, or
+ * route got connected while queued */
+ if (peer_ni->ksnp_closing || route->ksnr_deleted ||
+ wanted == 0) {
+ retry_later = 0;
+ break;
+ }
- /* reschedule if peer_ni is connecting to me */
- if (peer_ni->ksnp_accepting > 0) {
- CDEBUG(D_NET,
- "peer_ni %s(%d) already connecting to me, retry later.\n",
- libcfs_nid2str(peer_ni->ksnp_id.nid), peer_ni->ksnp_accepting);
- retry_later = 1;
- }
+ /* reschedule if peer_ni is connecting to me */
+ if (peer_ni->ksnp_accepting > 0) {
+ CDEBUG(D_NET,
+ "peer_ni %s(%d) already connecting to me, retry later.\n",
+ libcfs_nid2str(peer_ni->ksnp_id.nid), peer_ni->ksnp_accepting);
+ retry_later = 1;
+ }
- if (retry_later) /* needs reschedule */
- break;
+ if (retry_later) /* needs reschedule */
+ break;
- if ((wanted & (1 << SOCKLND_CONN_ANY)) != 0) {
- type = SOCKLND_CONN_ANY;
- } else if ((wanted & (1 << SOCKLND_CONN_CONTROL)) != 0) {
- type = SOCKLND_CONN_CONTROL;
- } else if ((wanted & (1 << SOCKLND_CONN_BULK_IN)) != 0) {
- type = SOCKLND_CONN_BULK_IN;
- } else {
- LASSERT ((wanted & (1 << SOCKLND_CONN_BULK_OUT)) != 0);
- type = SOCKLND_CONN_BULK_OUT;
- }
+ if ((wanted & BIT(SOCKLND_CONN_ANY)) != 0) {
+ type = SOCKLND_CONN_ANY;
+ } else if ((wanted & BIT(SOCKLND_CONN_CONTROL)) != 0) {
+ type = SOCKLND_CONN_CONTROL;
+ } else if ((wanted & BIT(SOCKLND_CONN_BULK_IN)) != 0) {
+ type = SOCKLND_CONN_BULK_IN;
+ } else {
+ LASSERT ((wanted & BIT(SOCKLND_CONN_BULK_OUT)) != 0);
+ type = SOCKLND_CONN_BULK_OUT;
+ }
write_unlock_bh(&ksocknal_data.ksnd_global_lock);
if (ktime_get_seconds() >= deadline) {
- rc = -ETIMEDOUT;
- lnet_connect_console_error(rc, peer_ni->ksnp_id.nid,
- route->ksnr_ipaddr,
- route->ksnr_port);
- goto failed;
- }
+ rc = -ETIMEDOUT;
+ lnet_connect_console_error(rc, peer_ni->ksnp_id.nid,
+ route->ksnr_ipaddr,
+ route->ksnr_port);
+ goto failed;
+ }
sock = lnet_connect(peer_ni->ksnp_id.nid,
route->ksnr_myiface,
goto failed;
}
- rc = ksocknal_create_conn(peer_ni->ksnp_ni, route, sock, type);
- if (rc < 0) {
- lnet_connect_console_error(rc, peer_ni->ksnp_id.nid,
- route->ksnr_ipaddr,
- route->ksnr_port);
- goto failed;
- }
+ rc = ksocknal_create_conn(peer_ni->ksnp_ni, route, sock, type);
+ if (rc < 0) {
+ lnet_connect_console_error(rc, peer_ni->ksnp_id.nid,
+ route->ksnr_ipaddr,
+ route->ksnr_port);
+ goto failed;
+ }
- /* A +ve RC means I have to retry because I lost the connection
- * race or I have to renegotiate protocol version */
- retry_later = (rc != 0);
- if (retry_later)
- CDEBUG(D_NET, "peer_ni %s: conn race, retry later.\n",
- libcfs_nid2str(peer_ni->ksnp_id.nid));
+ /* A +ve RC means I have to retry because I lost the connection
+ * race or I have to renegotiate protocol version */
+ retry_later = (rc != 0);
+ if (retry_later)
+ CDEBUG(D_NET, "peer_ni %s: conn race, retry later.\n",
+ libcfs_nid2str(peer_ni->ksnp_id.nid));
write_lock_bh(&ksocknal_data.ksnd_global_lock);
- }
+ }
route->ksnr_scheduled = 0;
route->ksnr_connecting = 0;
!lnet_fault_nid_match(attr->fa_local_nid, local_nid))
return false;
- if (!(attr->fa_msg_mask & (1 << type)))
+ if (!(attr->fa_msg_mask & BIT(type)))
return false;
/* NB: ACK and REPLY have no portal, but they should have been
return;
}
- if (mask & (1 << choice)) {
+ if (mask & BIT(choice)) {
*hstatus = choice;
return;
}
i = HSTATUS_END;
best_delta = HSTATUS_END;
while (i > 0) {
- if (mask & (1 << i)) {
+ if (mask & BIT(i)) {
delta = choice - i;
if (delta < 0)
delta *= -1;
int
lnet_fault_init(void)
{
- BUILD_BUG_ON(LNET_PUT_BIT != 1 << LNET_MSG_PUT);
- BUILD_BUG_ON(LNET_ACK_BIT != 1 << LNET_MSG_ACK);
- BUILD_BUG_ON(LNET_GET_BIT != 1 << LNET_MSG_GET);
- BUILD_BUG_ON(LNET_REPLY_BIT != 1 << LNET_MSG_REPLY);
+ BUILD_BUG_ON(LNET_PUT_BIT != BIT(LNET_MSG_PUT));
+ BUILD_BUG_ON(LNET_ACK_BIT != BIT(LNET_MSG_ACK));
+ BUILD_BUG_ON(LNET_GET_BIT != BIT(LNET_MSG_GET));
+ BUILD_BUG_ON(LNET_REPLY_BIT != BIT(LNET_MSG_REPLY));
mutex_init(&delay_dd.dd_mutex);
spin_lock_init(&delay_dd.dd_lock);