/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
* vim:expandtab:shiftwidth=8:tabstop=8:
*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
*
* This file is part of Portals
* http://sourceforge.net/projects/sandiaportals/
lp->lp_rtr_refcount++;
if (lp->lp_rtr_refcount == 1) {
- struct list_head *pos;
+ cfs_list_t *pos;
/* a simple insertion sort */
- list_for_each_prev(pos, &the_lnet.ln_routers) {
- lnet_peer_t *rtr = list_entry(pos, lnet_peer_t,
- lp_rtr_list);
+ cfs_list_for_each_prev(pos, &the_lnet.ln_routers) {
+ lnet_peer_t *rtr = cfs_list_entry(pos, lnet_peer_t,
+ lp_rtr_list);
if (rtr->lp_nid < lp->lp_nid)
break;
}
- list_add(&lp->lp_rtr_list, pos);
+ cfs_list_add(&lp->lp_rtr_list, pos);
/* addref for the_lnet.ln_routers */
lnet_peer_addref_locked(lp);
the_lnet.ln_routers_version++;
lp->lp_rtr_refcount--;
if (lp->lp_rtr_refcount == 0) {
if (lp->lp_rcd != NULL) {
- list_add(&lp->lp_rcd->rcd_list,
- &the_lnet.ln_zombie_rcd);
+ cfs_list_add(&lp->lp_rcd->rcd_list,
+ &the_lnet.ln_zombie_rcd);
lp->lp_rcd = NULL;
}
- list_del(&lp->lp_rtr_list);
+ cfs_list_del(&lp->lp_rtr_list);
/* decref for the_lnet.ln_routers */
lnet_peer_decref_locked(lp);
the_lnet.ln_routers_version++;
lnet_find_net_locked (__u32 net)
{
lnet_remotenet_t *rnet;
- struct list_head *tmp;
+ cfs_list_t *tmp;
LASSERT (!the_lnet.ln_shutdown);
- list_for_each (tmp, &the_lnet.ln_remote_nets) {
- rnet = list_entry(tmp, lnet_remotenet_t, lrn_list);
+ cfs_list_for_each (tmp, &the_lnet.ln_remote_nets) {
+ rnet = cfs_list_entry(tmp, lnet_remotenet_t, lrn_list);
if (rnet->lrn_net == net)
return rnet;
return NULL;
}
+static void lnet_shuffle_seed(void)
+{
+ static int seeded = 0;
+ int lnd_type, seed[2];
+ struct timeval tv;
+ lnet_ni_t *ni;
+ cfs_list_t *tmp;
+
+ if (seeded)
+ return;
+
+ cfs_get_random_bytes(seed, sizeof(seed));
+
+ /* Nodes with small feet have little entropy
+ * the NID for this node gives the most entropy in the low bits */
+ cfs_list_for_each(tmp, &the_lnet.ln_nis) {
+ ni = cfs_list_entry(tmp, lnet_ni_t, ni_list);
+ lnd_type = LNET_NETTYP(LNET_NIDNET(ni->ni_nid));
+
+ if (lnd_type != LOLND)
+ seed[0] ^= (LNET_NIDADDR(ni->ni_nid) | lnd_type);
+ }
+
+ cfs_gettimeofday(&tv);
+ cfs_srand(tv.tv_sec ^ seed[0], tv.tv_usec ^ seed[1]);
+ seeded = 1;
+ return;
+}
+
/* NB expects LNET_LOCK held */
void
lnet_add_route_to_rnet (lnet_remotenet_t *rnet, lnet_route_t *route)
{
unsigned int len = 0;
unsigned int offset = 0;
- struct list_head *e;
- extern __u64 lnet_create_interface_cookie(void);
+ cfs_list_t *e;
+
+ lnet_shuffle_seed();
- list_for_each (e, &rnet->lrn_routes) {
+ cfs_list_for_each (e, &rnet->lrn_routes) {
len++;
}
- /* FIXME use Lustre random function when it's moved to libcfs.
- * See bug 18751 */
/* len+1 positions to add a new entry, also prevents division by 0 */
- offset = ((unsigned int) lnet_create_interface_cookie()) % (len + 1);
- list_for_each (e, &rnet->lrn_routes) {
+ offset = cfs_rand() % (len + 1);
+ cfs_list_for_each (e, &rnet->lrn_routes) {
if (offset == 0)
break;
offset--;
}
- list_add(&route->lr_list, e);
+ cfs_list_add(&route->lr_list, e);
the_lnet.ln_remote_nets_version++;
lnet_rtr_addref_locked(route->lr_gateway);
int
lnet_add_route (__u32 net, unsigned int hops, lnet_nid_t gateway)
{
- struct list_head *e;
+ cfs_list_t *e;
lnet_remotenet_t *rnet;
lnet_remotenet_t *rnet2;
lnet_route_t *route;
rnet2 = lnet_find_net_locked(net);
if (rnet2 == NULL) {
/* new network */
- list_add_tail(&rnet->lrn_list, &the_lnet.ln_remote_nets);
+ cfs_list_add_tail(&rnet->lrn_list, &the_lnet.ln_remote_nets);
rnet2 = rnet;
}
/* Search for a duplicate route (it's a NOOP if it is) */
add_route = 1;
- list_for_each (e, &rnet2->lrn_routes) {
- lnet_route_t *route2 = list_entry(e, lnet_route_t, lr_list);
+ cfs_list_for_each (e, &rnet2->lrn_routes) {
+ lnet_route_t *route2 = cfs_list_entry(e, lnet_route_t, lr_list);
if (route2->lr_gateway == route->lr_gateway) {
add_route = 0;
lnet_remotenet_t *rnet;
lnet_route_t *route;
lnet_route_t *route2;
- struct list_head *e1;
- struct list_head *e2;
+ cfs_list_t *e1;
+ cfs_list_t *e2;
LNET_LOCK();
- list_for_each (e1, &the_lnet.ln_remote_nets) {
- rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
+ cfs_list_for_each (e1, &the_lnet.ln_remote_nets) {
+ rnet = cfs_list_entry(e1, lnet_remotenet_t, lrn_list);
route2 = NULL;
- list_for_each (e2, &rnet->lrn_routes) {
- route = list_entry(e2, lnet_route_t, lr_list);
+ cfs_list_for_each (e2, &rnet->lrn_routes) {
+ route = cfs_list_entry(e2, lnet_route_t, lr_list);
if (route2 == NULL)
route2 = route;
{
lnet_remotenet_t *rnet;
lnet_route_t *route;
- struct list_head *e1;
- struct list_head *e2;
+ cfs_list_t *e1;
+ cfs_list_t *e2;
int rc = -ENOENT;
CDEBUG(D_NET, "Del route: net %s : gw %s\n",
again:
LNET_LOCK();
- list_for_each (e1, &the_lnet.ln_remote_nets) {
- rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
+ cfs_list_for_each (e1, &the_lnet.ln_remote_nets) {
+ rnet = cfs_list_entry(e1, lnet_remotenet_t, lrn_list);
if (!(net == LNET_NIDNET(LNET_NID_ANY) ||
net == rnet->lrn_net))
continue;
- list_for_each (e2, &rnet->lrn_routes) {
- route = list_entry(e2, lnet_route_t, lr_list);
+ cfs_list_for_each (e2, &rnet->lrn_routes) {
+ route = cfs_list_entry(e2, lnet_route_t, lr_list);
if (!(gw_nid == LNET_NID_ANY ||
gw_nid == route->lr_gateway->lp_nid))
continue;
- list_del(&route->lr_list);
+ cfs_list_del(&route->lr_list);
the_lnet.ln_remote_nets_version++;
- if (list_empty(&rnet->lrn_routes))
- list_del(&rnet->lrn_list);
+ if (cfs_list_empty(&rnet->lrn_routes))
+ cfs_list_del(&rnet->lrn_list);
else
rnet = NULL;
lnet_get_route (int idx, __u32 *net, __u32 *hops,
lnet_nid_t *gateway, __u32 *alive)
{
- struct list_head *e1;
- struct list_head *e2;
+ cfs_list_t *e1;
+ cfs_list_t *e2;
lnet_remotenet_t *rnet;
lnet_route_t *route;
LNET_LOCK();
- list_for_each (e1, &the_lnet.ln_remote_nets) {
- rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
+ cfs_list_for_each (e1, &the_lnet.ln_remote_nets) {
+ rnet = cfs_list_entry(e1, lnet_remotenet_t, lrn_list);
- list_for_each (e2, &rnet->lrn_routes) {
- route = list_entry(e2, lnet_route_t, lr_list);
+ cfs_list_for_each (e2, &rnet->lrn_routes) {
+ route = cfs_list_entry(e2, lnet_route_t, lr_list);
if (idx-- == 0) {
*net = rnet->lrn_net;
if (info->pi_magic == __swab32(LNET_PROTO_PING_MAGIC)) {
lnet_swap_pinginfo(info);
} else if (info->pi_magic != LNET_PROTO_PING_MAGIC) {
- CDEBUG(D_NETERROR, "%s: Unexpected magic %08x\n",
- libcfs_nid2str(rtr->lp_nid), info->pi_magic);
+ CNETERR("%s: Unexpected magic %08x\n",
+ libcfs_nid2str(rtr->lp_nid), info->pi_magic);
return -EPROTO;
}
return -ENOENT; /* v1 doesn't carry NI status info */
if (info->pi_version != LNET_PROTO_PING_VERSION) {
- CDEBUG(D_NETERROR, "%s: Unexpected version 0x%x\n",
- libcfs_nid2str(rtr->lp_nid), info->pi_version);
+ CNETERR("%s: Unexpected version 0x%x\n",
+ libcfs_nid2str(rtr->lp_nid), info->pi_version);
return -EPROTO;
}
lnet_nid_t nid = stat->ns_nid;
if (nid == LNET_NID_ANY) {
- CDEBUG(D_NETERROR, "%s: unexpected LNET_NID_ANY\n",
- libcfs_nid2str(rtr->lp_nid));
+ CNETERR("%s: unexpected LNET_NID_ANY\n",
+ libcfs_nid2str(rtr->lp_nid));
return -EPROTO;
}
}
if (stat->ns_status != LNET_NI_STATUS_UP) {
- CDEBUG(D_NETERROR, "%s: Unexpected status 0x%x\n",
- libcfs_nid2str(rtr->lp_nid), stat->ns_status);
+ CNETERR("%s: Unexpected status 0x%x\n",
+ libcfs_nid2str(rtr->lp_nid), stat->ns_status);
return -EPROTO;
}
lnet_wait_known_routerstate(void)
{
lnet_peer_t *rtr;
- struct list_head *entry;
+ cfs_list_t *entry;
int all_known;
LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING);
LNET_LOCK();
all_known = 1;
- list_for_each (entry, &the_lnet.ln_routers) {
- rtr = list_entry(entry, lnet_peer_t, lp_rtr_list);
+ cfs_list_for_each (entry, &the_lnet.ln_routers) {
+ rtr = cfs_list_entry(entry, lnet_peer_t, lp_rtr_list);
if (rtr->lp_alive_count == 0) {
all_known = 0;
LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_UNLINKING);
the_lnet.ln_rc_state = LNET_RC_STATE_UNLINKED;
#ifdef __KERNEL__
- mutex_up(&the_lnet.ln_rc_signal);
+ cfs_mutex_up(&the_lnet.ln_rc_signal);
#endif
return;
}
LNET_LOCK();
- list_for_each_entry (ni, &the_lnet.ln_nis, ni_list) {
+ cfs_list_for_each_entry (ni, &the_lnet.ln_nis, ni_list) {
lnet_ni_status_t *ns = ni->ni_status;
LASSERT (ns != NULL);
void
lnet_destroy_rc_data (lnet_rc_data_t *rcd)
{
- LASSERT (list_empty(&rcd->rcd_list));
+ LASSERT (cfs_list_empty(&rcd->rcd_list));
/* detached from network */
LASSERT (LNetHandleIsInvalid(rcd->rcd_mdh));
if (!lnet_isrouter(rtr)) {
lnet_peer_decref_locked(rtr);
if (rcd != NULL)
- list_add(&rcd->rcd_list, &the_lnet.ln_zombie_rcd);
+ cfs_list_add(&rcd->rcd_list, &the_lnet.ln_zombie_rcd);
return; /* router table changed! */
}
* outstanding events as it is allowed outstanding sends */
eqsz = 0;
version = the_lnet.ln_routers_version;
- list_for_each_entry(rtr, &the_lnet.ln_routers, lp_rtr_list) {
+ cfs_list_for_each_entry(rtr, &the_lnet.ln_routers, lp_rtr_list) {
lnet_ni_t *ni = rtr->lp_ni;
lnet_process_id_t id;
return 0;
#ifdef __KERNEL__
- init_mutex_locked(&the_lnet.ln_rc_signal);
+ cfs_init_mutex_locked(&the_lnet.ln_rc_signal);
/* EQ size doesn't matter; the callback is guaranteed to get every
* event */
eqsz = 1;
the_lnet.ln_rc_state = LNET_RC_STATE_RUNNING;
#ifdef __KERNEL__
- rc = (int)cfs_kernel_thread(lnet_router_checker, NULL, 0);
+ rc = cfs_create_thread(lnet_router_checker, NULL, 0);
if (rc < 0) {
CERROR("Can't start router checker thread: %d\n", rc);
the_lnet.ln_rc_state = LNET_RC_STATE_UNLINKING;
rc = LNetMDUnlink(the_lnet.ln_rc_mdh);
LASSERT (rc == 0);
/* block until event callback signals exit */
- mutex_down(&the_lnet.ln_rc_signal);
+ cfs_mutex_down(&the_lnet.ln_rc_signal);
rc = LNetEQFree(the_lnet.ln_rc_eqh);
LASSERT (rc == 0);
the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
#ifdef __KERNEL__
/* block until event callback signals exit */
- mutex_down(&the_lnet.ln_rc_signal);
+ cfs_mutex_down(&the_lnet.ln_rc_signal);
#else
while (the_lnet.ln_rc_state != LNET_RC_STATE_UNLINKED) {
lnet_router_checker();
{
lnet_rc_data_t *rcd;
lnet_rc_data_t *tmp;
- struct list_head free_rcd;
+ cfs_list_t free_rcd;
int i;
__u64 version;
LNET_LOCK();
rescan:
version = the_lnet.ln_routers_version;
- list_for_each_entry_safe (rcd, tmp, &the_lnet.ln_zombie_rcd, rcd_list) {
+ cfs_list_for_each_entry_safe (rcd, tmp, &the_lnet.ln_zombie_rcd,
+ rcd_list) {
if (LNetHandleIsInvalid(rcd->rcd_mdh)) {
- list_del(&rcd->rcd_list);
- list_add(&rcd->rcd_list, &free_rcd);
+ cfs_list_del(&rcd->rcd_list);
+ cfs_list_add(&rcd->rcd_list, &free_rcd);
continue;
}
}
i = 2;
- while (wait_unlink && !list_empty(&the_lnet.ln_zombie_rcd)) {
- rcd = list_entry(the_lnet.ln_zombie_rcd.next,
- lnet_rc_data_t, rcd_list);
+ while (wait_unlink && !cfs_list_empty(&the_lnet.ln_zombie_rcd)) {
+ rcd = cfs_list_entry(the_lnet.ln_zombie_rcd.next,
+ lnet_rc_data_t, rcd_list);
if (LNetHandleIsInvalid(rcd->rcd_mdh)) {
- list_del(&rcd->rcd_list);
- list_add(&rcd->rcd_list, &free_rcd);
+ cfs_list_del(&rcd->rcd_list);
+ cfs_list_add(&rcd->rcd_list, &free_rcd);
continue;
}
LNET_UNLOCK();
- while (!list_empty(&free_rcd)) {
- rcd = list_entry(free_rcd.next, lnet_rc_data_t, rcd_list);
- list_del_init(&rcd->rcd_list);
+ while (!cfs_list_empty(&free_rcd)) {
+ rcd = cfs_list_entry(free_rcd.next, lnet_rc_data_t, rcd_list);
+ cfs_list_del_init(&rcd->rcd_list);
lnet_destroy_rc_data(rcd);
}
return;
{
int rc;
lnet_peer_t *rtr;
- struct list_head *entry;
- lnet_process_id_t rtr_id;
+ cfs_list_t *entry;
cfs_daemonize("router_checker");
cfs_block_allsigs();
- rtr_id.pid = LUSTRE_SRV_LNET_PID;
-
LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING);
while (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING) {
rescan:
version = the_lnet.ln_routers_version;
- list_for_each (entry, &the_lnet.ln_routers) {
- rtr = list_entry(entry, lnet_peer_t, lp_rtr_list);
+ cfs_list_for_each (entry, &the_lnet.ln_routers) {
+ rtr = cfs_list_entry(entry, lnet_peer_t, lp_rtr_list);
lnet_ping_router_locked(rtr);
/* NB dropped lock */
/* Call cfs_pause() here always adds 1 to load average
* because kernel counts # active tasks as nr_running
* + nr_uninterruptible. */
- cfs_schedule_timeout(CFS_TASK_INTERRUPTIBLE,
- cfs_time_seconds(1));
+ cfs_schedule_timeout_and_set_state(CFS_TASK_INTERRUPTIBLE,
+ cfs_time_seconds(1));
}
LNET_LOCK();
- list_for_each (entry, &the_lnet.ln_routers) {
- rtr = list_entry(entry, lnet_peer_t, lp_rtr_list);
+ cfs_list_for_each (entry, &the_lnet.ln_routers) {
+ rtr = cfs_list_entry(entry, lnet_peer_t, lp_rtr_list);
if (rtr->lp_rcd == NULL)
continue;
- LASSERT (list_empty(&rtr->lp_rcd->rcd_list));
- list_add(&rtr->lp_rcd->rcd_list, &the_lnet.ln_zombie_rcd);
+ LASSERT (cfs_list_empty(&rtr->lp_rcd->rcd_list));
+ cfs_list_add(&rtr->lp_rcd->rcd_list, &the_lnet.ln_zombie_rcd);
rtr->lp_rcd = NULL;
}
int nbuffers = 0;
lnet_rtrbuf_t *rb;
- LASSERT (list_empty(&rbp->rbp_msgs));
+ LASSERT (cfs_list_empty(&rbp->rbp_msgs));
LASSERT (rbp->rbp_credits == rbp->rbp_nbuffers);
- while (!list_empty(&rbp->rbp_bufs)) {
+ while (!cfs_list_empty(&rbp->rbp_bufs)) {
LASSERT (rbp->rbp_credits > 0);
- rb = list_entry(rbp->rbp_bufs.next,
- lnet_rtrbuf_t, rb_list);
- list_del(&rb->rb_list);
+ rb = cfs_list_entry(rbp->rbp_bufs.next,
+ lnet_rtrbuf_t, rb_list);
+ cfs_list_del(&rb->rb_list);
lnet_destroy_rtrbuf(rb, npages);
nbuffers++;
}
rbp->rbp_nbuffers++;
rbp->rbp_credits++;
rbp->rbp_mincredits++;
- list_add(&rb->rb_list, &rbp->rbp_bufs);
+ cfs_list_add(&rb->rb_list, &rbp->rbp_bufs);
/* No allocation "under fire" */
/* Otherwise we'd need code to schedule blocked msgs etc */
lnet_peer_t *lp = NULL;
cfs_time_t now = cfs_time_current();
- LASSERT (!in_interrupt ());
+ LASSERT (!cfs_in_interrupt ());
CDEBUG (D_NET, "%s notifying %s: %s\n",
(ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
if (last != 0 &&
interval > MAX(live_router_check_interval,
dead_router_check_interval))
- CDEBUG(D_NETERROR, "Checker(%d/%d) not called for %d seconds\n",
- live_router_check_interval, dead_router_check_interval,
- interval);
+ CNETERR("Checker(%d/%d) not called for %d seconds\n",
+ live_router_check_interval, dead_router_check_interval,
+ interval);
LNET_LOCK();
LASSERT (!running); /* recursion check */
LNET_LOCK();
version = the_lnet.ln_routers_version;
- list_for_each_entry (rtr, &the_lnet.ln_routers, lp_rtr_list) {
+ cfs_list_for_each_entry (rtr, &the_lnet.ln_routers, lp_rtr_list) {
lnet_ping_router_locked(rtr);
LASSERT (version == the_lnet.ln_routers_version);
}