-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
/*
* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2014, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
*/
#define DEBUG_SUBSYSTEM S_RPC
-#ifdef __KERNEL__
#include <obd_support.h>
#include <obd_class.h>
#include <lustre_net.h>
-#else
-#include <liblustre.h>
-#endif
#include "ptlrpc_internal.h"
conn->c_peer = peer;
conn->c_self = self;
- CFS_INIT_HLIST_NODE(&conn->c_hash);
- cfs_atomic_set(&conn->c_refcount, 1);
+ INIT_HLIST_NODE(&conn->c_hash);
+ atomic_set(&conn->c_refcount, 1);
if (uuid)
obd_str2uuid(&conn->c_remote_uuid, uuid->uuid);
- /*
- * Add the newly created conn to the hash, on key collision we
- * lost a racing addition and must destroy our newly allocated
- * connection. The object which exists in the has will be
- * returned and may be compared against out object.
- */
- conn2 = cfs_hash_findadd_unique(conn_hash, &peer, &conn->c_hash);
- if (conn != conn2) {
- OBD_FREE_PTR(conn);
- conn = conn2;
- }
- EXIT;
+ /*
+ * Add the newly created conn to the hash, on key collision we
+ * lost a racing addition and must destroy our newly allocated
+ * connection. The object which exists in the has will be
+ * returned and may be compared against out object.
+ */
+ /* In the function below, .hs_keycmp resolves to
+ * conn_keycmp() */
+ /* coverity[overrun-buffer-val] */
+ conn2 = cfs_hash_findadd_unique(conn_hash, &peer, &conn->c_hash);
+ if (conn != conn2) {
+ OBD_FREE_PTR(conn);
+ conn = conn2;
+ }
+ EXIT;
out:
- CDEBUG(D_INFO, "conn=%p refcount %d to %s\n",
- conn, cfs_atomic_read(&conn->c_refcount),
- libcfs_nid2str(conn->c_peer.nid));
- return conn;
+ CDEBUG(D_INFO, "conn=%p refcount %d to %s\n",
+ conn, atomic_read(&conn->c_refcount),
+ libcfs_nid2str(conn->c_peer.nid));
+ return conn;
}
int ptlrpc_connection_put(struct ptlrpc_connection *conn)
{
- int rc = 0;
- ENTRY;
-
- if (!conn)
- RETURN(rc);
-
- LASSERT(!cfs_hlist_unhashed(&conn->c_hash));
-
- /*
- * We do not remove connection from hashtable and
- * do not free it even if last caller released ref,
- * as we want to have it cached for the case it is
- * needed again.
- *
- * Deallocating it and later creating new connection
- * again would be wastful. This way we also avoid
- * expensive locking to protect things from get/put
- * race when found cached connection is freed by
- * ptlrpc_connection_put().
- *
- * It will be freed later in module unload time,
- * when ptlrpc_connection_fini()->lh_exit->conn_exit()
- * path is called.
- */
- if (cfs_atomic_dec_return(&conn->c_refcount) == 1)
- rc = 1;
-
- CDEBUG(D_INFO, "PUT conn=%p refcount %d to %s\n",
- conn, cfs_atomic_read(&conn->c_refcount),
- libcfs_nid2str(conn->c_peer.nid));
-
- RETURN(rc);
+ int rc = 0;
+ ENTRY;
+
+ if (!conn)
+ RETURN(rc);
+
+ LASSERT(atomic_read(&conn->c_refcount) > 1);
+
+ /*
+ * We do not remove connection from hashtable and
+ * do not free it even if last caller released ref,
+ * as we want to have it cached for the case it is
+ * needed again.
+ *
+ * Deallocating it and later creating new connection
+ * again would be wastful. This way we also avoid
+ * expensive locking to protect things from get/put
+ * race when found cached connection is freed by
+ * ptlrpc_connection_put().
+ *
+ * It will be freed later in module unload time,
+ * when ptlrpc_connection_fini()->lh_exit->conn_exit()
+ * path is called.
+ */
+ if (atomic_dec_return(&conn->c_refcount) == 1)
+ rc = 1;
+
+ CDEBUG(D_INFO, "PUT conn=%p refcount %d to %s\n",
+ conn, atomic_read(&conn->c_refcount),
+ libcfs_nid2str(conn->c_peer.nid));
+
+ RETURN(rc);
}
struct ptlrpc_connection *
ptlrpc_connection_addref(struct ptlrpc_connection *conn)
{
- ENTRY;
+ ENTRY;
- cfs_atomic_inc(&conn->c_refcount);
- CDEBUG(D_INFO, "conn=%p refcount %d to %s\n",
- conn, cfs_atomic_read(&conn->c_refcount),
- libcfs_nid2str(conn->c_peer.nid));
+ atomic_inc(&conn->c_refcount);
+ CDEBUG(D_INFO, "conn=%p refcount %d to %s\n",
+ conn, atomic_read(&conn->c_refcount),
+ libcfs_nid2str(conn->c_peer.nid));
- RETURN(conn);
+ RETURN(conn);
}
int ptlrpc_connection_init(void)
static unsigned
conn_hashfn(cfs_hash_t *hs, const void *key, unsigned mask)
{
- return cfs_hash_djb2_hash(key, sizeof(lnet_process_id_t), mask);
+ return cfs_hash_djb2_hash(key, sizeof(lnet_process_id_t), mask);
}
static int
-conn_keycmp(const void *key, cfs_hlist_node_t *hnode)
+conn_keycmp(const void *key, struct hlist_node *hnode)
{
- struct ptlrpc_connection *conn;
- const lnet_process_id_t *conn_key;
+ struct ptlrpc_connection *conn;
+ const lnet_process_id_t *conn_key;
- LASSERT(key != NULL);
- conn_key = (lnet_process_id_t*)key;
- conn = cfs_hlist_entry(hnode, struct ptlrpc_connection, c_hash);
+ LASSERT(key != NULL);
+ conn_key = (lnet_process_id_t *)key;
+ conn = hlist_entry(hnode, struct ptlrpc_connection, c_hash);
- return conn_key->nid == conn->c_peer.nid &&
+ return conn_key->nid == conn->c_peer.nid &&
conn_key->pid == conn->c_peer.pid;
}
static void *
-conn_key(cfs_hlist_node_t *hnode)
+conn_key(struct hlist_node *hnode)
{
- struct ptlrpc_connection *conn;
- conn = cfs_hlist_entry(hnode, struct ptlrpc_connection, c_hash);
- return &conn->c_peer;
+ struct ptlrpc_connection *conn;
+ conn = hlist_entry(hnode, struct ptlrpc_connection, c_hash);
+ return &conn->c_peer;
}
static void *
-conn_object(cfs_hlist_node_t *hnode)
+conn_object(struct hlist_node *hnode)
{
- return cfs_hlist_entry(hnode, struct ptlrpc_connection, c_hash);
+ return hlist_entry(hnode, struct ptlrpc_connection, c_hash);
}
static void
-conn_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+conn_get(cfs_hash_t *hs, struct hlist_node *hnode)
{
- struct ptlrpc_connection *conn;
+ struct ptlrpc_connection *conn;
- conn = cfs_hlist_entry(hnode, struct ptlrpc_connection, c_hash);
- cfs_atomic_inc(&conn->c_refcount);
+ conn = hlist_entry(hnode, struct ptlrpc_connection, c_hash);
+ atomic_inc(&conn->c_refcount);
}
static void
-conn_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+conn_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
{
- struct ptlrpc_connection *conn;
+ struct ptlrpc_connection *conn;
- conn = cfs_hlist_entry(hnode, struct ptlrpc_connection, c_hash);
- cfs_atomic_dec(&conn->c_refcount);
+ conn = hlist_entry(hnode, struct ptlrpc_connection, c_hash);
+ atomic_dec(&conn->c_refcount);
}
static void
-conn_exit(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+conn_exit(cfs_hash_t *hs, struct hlist_node *hnode)
{
- struct ptlrpc_connection *conn;
-
- conn = cfs_hlist_entry(hnode, struct ptlrpc_connection, c_hash);
- /*
- * Nothing should be left. Connection user put it and
- * connection also was deleted from table by this time
- * so we should have 0 refs.
- */
- LASSERTF(cfs_atomic_read(&conn->c_refcount) == 0,
- "Busy connection with %d refs\n",
- cfs_atomic_read(&conn->c_refcount));
- OBD_FREE_PTR(conn);
+ struct ptlrpc_connection *conn;
+
+ conn = hlist_entry(hnode, struct ptlrpc_connection, c_hash);
+ /*
+ * Nothing should be left. Connection user put it and
+ * connection also was deleted from table by this time
+ * so we should have 0 refs.
+ */
+ LASSERTF(atomic_read(&conn->c_refcount) == 0,
+ "Busy connection with %d refs\n",
+ atomic_read(&conn->c_refcount));
+ OBD_FREE_PTR(conn);
}
static cfs_hash_ops_t conn_hash_ops = {
- .hs_hash = conn_hashfn,
- .hs_keycmp = conn_keycmp,
- .hs_key = conn_key,
- .hs_object = conn_object,
- .hs_get = conn_get,
- .hs_put_locked = conn_put_locked,
- .hs_exit = conn_exit,
+ .hs_hash = conn_hashfn,
+ .hs_keycmp = conn_keycmp,
+ .hs_key = conn_key,
+ .hs_object = conn_object,
+ .hs_get = conn_get,
+ .hs_put_locked = conn_put_locked,
+ .hs_exit = conn_exit,
};