* for success and do NOT give back a receive credit; that has to wait
* until lnd_recv() gets called. On failure return < 0 and
* release resources; lnd_recv() will not be called. */
- int (*lnd_eager_recv)(struct lnet_ni *ni, void *private, lnet_msg_t *msg,
+ int (*lnd_eager_recv)(struct lnet_ni *ni, void *private, lnet_msg_t *msg,
void **new_privatep);
/* notification of peer health */
__swab32s(&msg->ibm_u.putack.ibpam_rd.rd_key);
__swab32s(&msg->ibm_u.putack.ibpam_rd.rd_nfrags);
}
-
+
n = msg->ibm_u.putack.ibpam_rd.rd_nfrags;
if (n <= 0 || n > IBLND_MAX_RDMA_FRAGS) {
CERROR("Bad PUT_ACK nfrags: %d, should be 0 < n <= %d\n",
n, IBLND_MAX_RDMA_FRAGS);
return -EPROTO;
}
-
+
if (msg_nob < offsetof(kib_msg_t, ibm_u.putack.ibpam_rd.rd_frags[n])) {
CERROR("Short PUT_ACK: %d(%d)\n", msg_nob,
(int)offsetof(kib_msg_t, ibm_u.putack.ibpam_rd.rd_frags[n]));
/* always called with a ref on ni, which prevents ni being shutdown */
LASSERT (net->ibn_shutdown == 0);
-
+
/* npeers only grows with the global lock held */
atomic_inc(&net->ibn_npeers);
CERROR("Can't request completion notificiation: %d\n", rc);
goto failed_2;
}
-
+
memset(init_qp_attr, 0, sizeof(*init_qp_attr));
init_qp_attr->event_handler = kiblnd_qp_event;
init_qp_attr->qp_context = conn;
kiblnd_get_conn_locked (kib_peer_t *peer)
{
LASSERT (!list_empty(&peer->ibp_conns));
-
+
/* just return the first connection */
return list_entry(peer->ibp_conns.next, kib_conn_t, ibc_list);
}
static inline int
-kiblnd_send_keepalive(kib_conn_t *conn)
+kiblnd_send_keepalive(kib_conn_t *conn)
{
return (*kiblnd_tunables.kib_keepalive > 0) &&
time_after(jiffies, conn->ibc_last_send +
LASSERT (net != NULL);
LASSERT (rx->rx_nob < 0); /* was posted */
rx->rx_nob = 0; /* isn't now */
-
+
if (conn->ibc_state > IBLND_CONN_ESTABLISHED)
goto ignore;
dstfrag++;
dstidx++;
}
-
+
tx->tx_nwrq++;
}
time_t last_alive = 0;
int error = 0;
unsigned long flags;
-
+
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
if (list_empty(&peer->ibp_conns) &&
peer->ibp_error != 0) {
error = peer->ibp_error;
peer->ibp_error = 0;
-
+
last_alive = cfs_time_current_sec() -
cfs_duration_sec(cfs_time_current() -
peer->ibp_last_alive);
}
-
+
read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
-
+
if (error != 0)
lnet_notify(peer->ibp_ni,
peer->ibp_nid, 0, last_alive);
kiblnd_conn_decref(conn);
return 0;
- case RDMA_CM_EVENT_DEVICE_REMOVAL:
- LCONSOLE_ERROR_MSG(0x131,
+ case RDMA_CM_EVENT_DEVICE_REMOVAL:
+ LCONSOLE_ERROR_MSG(0x131,
"Received notification of device removal\n"
"Please shutdown LNET to allow this to proceed\n");
/* Can't remove network from underneath LNET for now, so I have
* to ignore this */
- return 0;
- }
+ return 0;
+ }
}
int
} else {
LASSERT (!tx->tx_queued);
LASSERT (tx->tx_waiting || tx->tx_sending != 0);
- }
+ }
if (time_after_eq (jiffies, tx->tx_deadline)) {
timed_out = 1;
#include <lnet/lib-lnet.h>
int
-LNetEQAlloc(unsigned int count, lnet_eq_handler_t callback,
+LNetEQAlloc(unsigned int count, lnet_eq_handler_t callback,
lnet_handle_eq_t *handle)
{
lnet_eq_t *eq;
LASSERT (the_lnet.ln_init);
LASSERT (the_lnet.ln_refcount > 0);
-
+
/* We need count to be a power of 2 so that when eq_{enq,deq}_seq
* overflow, they don't skip entries, so the queue has the same
* apparant capacity at all times */
if (count == 0) /* catch bad parameter / overflow on roundup */
return (-EINVAL);
-
+
eq = lnet_eq_alloc();
if (eq == NULL)
return (-ENOMEM);
LASSERT (the_lnet.ln_init);
LASSERT (the_lnet.ln_refcount > 0);
-
+
LNET_LOCK();
eq = lnet_handle2eq(&eqh);
{
int which;
- return LNetEQPoll(&eventq, 1, 0,
+ return LNetEQPoll(&eventq, 1, 0,
event, &which);
}
RETURN(rc);
}
}
-
+
#ifdef __KERNEL__
if (timeout_ms == 0) {
- LNET_UNLOCK ();
+ LNET_UNLOCK();
RETURN (0);
}
if (timeout_ms < 0) {
cfs_waitq_wait (&wl, CFS_TASK_INTERRUPTIBLE);
- } else {
+ } else {
struct timeval tv;
now = cfs_time_current();
cfs_waitq_timedwait(&wl, CFS_TASK_INTERRUPTIBLE,
cfs_time_seconds(timeout_ms)/1000);
- cfs_duration_usec(cfs_time_sub(cfs_time_current(), now),
- &tv);
+ cfs_duration_usec(cfs_time_sub(cfs_time_current(), now),
+ &tv);
timeout_ms -= tv.tv_sec * 1000 + tv.tv_usec / 1000;
if (timeout_ms < 0)
timeout_ms = 0;
}
-
+
LNET_LOCK();
cfs_waitq_del(&the_lnet.ln_waitq, &wl);
#else
gettimeofday(&then, NULL);
(eqwaitni->ni_lnd->lnd_wait)(eqwaitni, timeout_ms);
-
+
gettimeofday(&now, NULL);
timeout_ms -= (now.tv_sec - then.tv_sec) * 1000 +
(now.tv_usec - then.tv_usec) / 1000;
LBUG();
# else
if (timeout_ms < 0) {
- pthread_cond_wait(&the_lnet.ln_cond,
+ pthread_cond_wait(&the_lnet.ln_cond,
&the_lnet.ln_lock);
} else {
gettimeofday(&then, NULL);
-
+
ts.tv_sec = then.tv_sec + timeout_ms/1000;
ts.tv_nsec = then.tv_usec * 1000 +
(timeout_ms%1000) * 1000000;
ts.tv_sec++;
ts.tv_nsec -= 1000000000;
}
-
+
pthread_cond_timedwait(&the_lnet.ln_cond,
&the_lnet.ln_lock, &ts);
-
+
gettimeofday(&now, NULL);
timeout_ms -= (now.tv_sec - then.tv_sec) * 1000 +
(now.tv_usec - then.tv_usec) / 1000;
-
+
if (timeout_ms < 0)
timeout_ms = 0;
}
}
void
-lnet_do_notify (lnet_peer_t *lp)
+lnet_do_notify (lnet_peer_t *lp)
{
lnet_ni_t *ni = lp->lp_ni;
int alive;
int notifylnd;
-
+
LNET_LOCK();
-
+
/* Notify only in 1 thread at any time to ensure ordered notification.
* NB individual events can be missed; the only guarantee is that you
* always get the most recent news */
}
lp->lp_notifying = 1;
-
+
while (lp->lp_notify) {
alive = lp->lp_alive;
notifylnd = lp->lp_notifylnd;
CDEBUG(D_NET, "Auto-down disabled\n");
return 0;
}
-
+
LNET_LOCK();
lp = lnet_find_peer_locked(nid);
lnet_notify_locked(lp, ni == NULL, alive, when);
LNET_UNLOCK();
-
+
lnet_do_notify(lp);
-
+
LNET_LOCK();
lnet_peer_decref_locked(lp);
else if (route->lr_gateway->lp_ni !=
route2->lr_gateway->lp_ni) {
LNET_UNLOCK();
-
+
CERROR("Routes to %s via %s and %s not supported\n",
libcfs_net2str(rnet->lrn_net),
libcfs_nid2str(route->lr_gateway->lp_nid),
}
}
}
-
+
LNET_UNLOCK();
return 0;
}
/* The router checker thread has unlinked the rc_md
* and exited. */
LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_UNLINKING);
- the_lnet.ln_rc_state = LNET_RC_STATE_UNLINKED;
- mutex_up(&the_lnet.ln_rc_signal);
+ the_lnet.ln_rc_state = LNET_RC_STATE_UNLINKED;
+ mutex_up(&the_lnet.ln_rc_signal);
return;
}
- LASSERT (event->type == LNET_EVENT_SEND ||
+ LASSERT (event->type == LNET_EVENT_SEND ||
event->type == LNET_EVENT_REPLY);
-
+
nid = (event->type == LNET_EVENT_SEND) ?
event->target.nid : event->initiator.nid;
if (lnet_isrouter(lp) && /* ignore if no longer a router */
(event->status != 0 ||
event->type == LNET_EVENT_REPLY)) {
-
+
/* A successful REPLY means the router is up. If _any_ comms
* to the router fail I assume it's down (this will happen if
* we ping alive routers to try to detect router death before
lnet_process_id_t rtr_id;
int secs;
- cfs_daemonize("router_checker");
- cfs_block_allsigs();
+ cfs_daemonize("router_checker");
+ cfs_block_allsigs();
rtr_id.pid = LUSTRE_SRV_LNET_PID;
the_lnet.ln_rc_state = LNET_RC_STATE_RUNNING;
mutex_up(&the_lnet.ln_rc_signal); /* let my parent go */
- while (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING) {
+ while (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING) {
__u64 version;
LNET_LOCK();
}
if (secs <= 0)
secs = 0;
-
+
if (secs != 0 &&
!rtr->lp_ping_notsent &&
now > rtr->lp_ping_timestamp + secs) {
LNET_RESERVED_PORTAL,
LNET_PROTO_PING_MATCHBITS, 0);
}
-
+
LNET_LOCK();
lnet_peer_decref_locked(rtr);
set_current_state(CFS_TASK_INTERRUPTIBLE);
cfs_schedule_timeout(CFS_TASK_INTERRUPTIBLE,
cfs_time_seconds(1));
- }
+ }
LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_STOPTHREAD);
the_lnet.ln_rc_state = LNET_RC_STATE_UNLINKING;
-
+
rc = LNetMDUnlink(mdh);
LASSERT (rc == 0);
/* The unlink event callback will signal final completion */
-
- return 0;
+ return 0;
}
for (;;) {
LNET_LOCK();
-
+
all_known = 1;
list_for_each (entry, &the_lnet.ln_routers) {
rtr = list_entry(entry, lnet_peer_t, lp_rtr_list);
-
+
if (rtr->lp_alive_count == 0) {
all_known = 0;
break;
return;
the_lnet.ln_rc_state = LNET_RC_STATE_STOPTHREAD;
- /* block until event callback signals exit */
- mutex_down(&the_lnet.ln_rc_signal);
+ /* block until event callback signals exit */
+ mutex_down(&the_lnet.ln_rc_signal);
LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_UNLINKED);
rc = LNetEQFree(the_lnet.ln_rc_eqh);
LASSERT (rc == 0);
-
+
the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
}
"\n");
return -EINVAL;
}
-
+
if (live_router_check_interval <= 0 &&
dead_router_check_interval <= 0)
return 0;
- init_mutex_locked(&the_lnet.ln_rc_signal);
+ init_mutex_locked(&the_lnet.ln_rc_signal);
/* EQ size doesn't matter; the callback is guaranteed to get every
* event */
return -ENOMEM;
}
- rc = (int)cfs_kernel_thread(lnet_router_checker, NULL, 0);
- if (rc < 0) {
- CERROR("Can't start router checker thread: %d\n", rc);
+ rc = (int)cfs_kernel_thread(lnet_router_checker, NULL, 0);
+ if (rc < 0) {
+ CERROR("Can't start router checker thread: %d\n", rc);
goto failed;
- }
+ }
- mutex_down(&the_lnet.ln_rc_signal); /* wait for checker to startup */
+ mutex_down(&the_lnet.ln_rc_signal); /* wait for checker to startup */
rc = the_lnet.ln_rc_state;
if (rc < 0) {
the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
goto failed;
}
-
+
LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING);
if (check_routers_before_use) {
* may have to a previous instance of me. */
lnet_wait_known_routerstate();
}
-
+
return 0;
-
+
failed:
rc = LNetEQFree(the_lnet.ln_rc_eqh);
LASSERT (rc == 0);
LASSERT (rbp->rbp_nbuffers == nbufs);
return 0;
}
-
+
for (i = 0; i < nbufs; i++) {
rb = lnet_new_rtrbuf(rbp);
lnet_alloc_rtrpools(int im_a_router)
{
int rc;
-
+
if (!strcmp(forwarding, "")) {
/* not set either way */
if (!im_a_router)
"'enabled' or 'disabled'\n");
return -EINVAL;
}
-
+
if (tiny_router_buffers <= 0) {
LCONSOLE_ERROR_MSG(0x10c, "tiny_router_buffers=%d invalid when "
"routing enabled\n", tiny_router_buffers);
LNET_LOCK();
the_lnet.ln_routing = 1;
LNET_UNLOCK();
-
+
return 0;
failed:
/* this is kind of an abuse of the LNET_MD_OP_{PUT,GET} options.
* they're only meaningful for MDs attached to an ME (i.e. passive
* buffers... */
- if ((options & LNET_MD_OP_PUT) != 0) {
+ if ((options & LNET_MD_OP_PUT) != 0) {
rc = LNetPut(self, *mdh, LNET_NOACK_REQ, peer,
portal, matchbits, 0, 0);
} else {
- LASSERT ((options & LNET_MD_OP_GET) != 0);
+ LASSERT ((options & LNET_MD_OP_GET) != 0);
rc = LNetGet(self, *mdh, peer, portal, matchbits, 0);
}