X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lnet%2Fulnds%2Fsocklnd%2Fconn.c;h=6f83dbc640ae5020b2c90012e095c24cbff56526;hb=e0d2bfe3b93d5179c9aee4a65e1695fa4c7779ed;hp=5972198ea2618e6b02c5ec7cc6491dade4c9b6c4;hpb=0f8dca08a4f68cba82c2c822998ecc309d3b7aaf;p=fs%2Flustre-release.git diff --git a/lnet/ulnds/socklnd/conn.c b/lnet/ulnds/socklnd/conn.c index 5972198..6f83dbc 100644 --- a/lnet/ulnds/socklnd/conn.c +++ b/lnet/ulnds/socklnd/conn.c @@ -1,6 +1,4 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -28,6 +26,8 @@ /* * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. + * + * Copyright (c) 2012, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -198,7 +198,7 @@ usocklnd_check_peer_stale(lnet_ni_t *ni, lnet_process_id_t id) return; } - if (cfs_mt_atomic_read(&peer->up_refcount) == 2) { + if (mt_atomic_read(&peer->up_refcount) == 2) { int i; for (i = 0; i < N_CONN_TYPES; i++) LASSERT (peer->up_conns[i] == NULL); @@ -252,7 +252,7 @@ usocklnd_create_passive_conn(lnet_ni_t *ni, CFS_INIT_LIST_HEAD (&conn->uc_tx_list); CFS_INIT_LIST_HEAD (&conn->uc_zcack_list); pthread_mutex_init(&conn->uc_lock, NULL); - cfs_mt_atomic_set(&conn->uc_refcount, 1); /* 1 ref for me */ + mt_atomic_set(&conn->uc_refcount, 1); /* 1 ref for me */ *connp = conn; return 0; @@ -309,7 +309,7 @@ usocklnd_create_active_conn(usock_peer_t *peer, int type, CFS_INIT_LIST_HEAD (&conn->uc_tx_list); CFS_INIT_LIST_HEAD (&conn->uc_zcack_list); pthread_mutex_init(&conn->uc_lock, NULL); - cfs_mt_atomic_set(&conn->uc_refcount, 1); /* 1 ref for me */ + mt_atomic_set(&conn->uc_refcount, 1); /* 1 ref for me */ *connp = conn; return 0; @@ -711,7 +711,7 @@ usocklnd_create_peer(lnet_ni_t *ni, lnet_process_id_t id, peer->up_incrn_is_set = 0; peer->up_errored = 0; peer->up_last_alive = 0; - cfs_mt_atomic_set (&peer->up_refcount, 1); /* 1 ref for caller */ + mt_atomic_set(&peer->up_refcount, 1); /* 1 ref for caller */ pthread_mutex_init(&peer->up_lock, NULL); pthread_mutex_lock(&net->un_lock);