]) # LIBCFS_HOTPLUG_STATE_MACHINE
#
+# Kernel version 4.10-rc3 commit f405df5de3170c00e5c54f8b7cf4766044a032ba
+# introduced refcount_t which is atomic_t plus over flow guards.
+#
+AC_DEFUN([LIBCFS_REFCOUNT_T], [
+LB_CHECK_LINUX_HEADER([linux/refcount.h], [
+ AC_DEFINE(HAVE_REFCOUNT_T, 1,
+ [refcount_t is supported])])
+]) # LIBCFS_REFCOUNT_T
+
+#
# LIBCFS_SCHED_HEADERS
#
# 4.11 has broken up sched.h into more headers.
LIBCFS_RHASHTABLE_WALK_ENTER
# 4.10
LIBCFS_HOTPLUG_STATE_MACHINE
+LIBCFS_REFCOUNT_T
# 4.11
LIBCFS_RHASHTABLE_LOOKUP_GET_INSERT_FAST
LIBCFS_SCHED_HEADERS
EXTRA_DIST = linux-misc.h linux-fs.h linux-mem.h linux-time.h linux-cpu.h \
linux-list.h linux-hash.h linux-uuid.h linux-crypto.h linux-wait.h \
- processor.h
+ linux-refcount.h processor.h
--- /dev/null
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.gnu.org/licenses/gpl-2.0.html
+ *
+ * GPL HEADER END
+ */
+#ifndef __LIBCFS_LINUX_REFCOUNT_H__
+#define __LIBCFS_LINUX_REFCOUNT_H__
+
+#include <linux/atomic.h>
+
+#ifndef HAVE_REFCOUNT_T
+
+#define refcount_t atomic_t
+
+#define refcount_set atomic_set
+#define refcount_inc atomic_inc
+#define refcount_dec atomic_dec
+#define refcount_dec_and_test atomic_dec_and_test
+#define refcount_read atomic_read
+
+#endif
+
+#endif /* __LIBCFS_LINUX_REFCOUNT_H__ */
*/
struct portals_handle l_handle;
/**
- * Lock reference count.
- * This is how many users have pointers to actual structure, so that
- * we do not accidentally free lock structure that is in use.
- */
- atomic_t l_refc;
- /**
* Internal spinlock protects l_resource. We should hold this lock
* first before taking res_lock.
*/
* what export they are talking to.
*/
struct portals_handle exp_handle;
- atomic_t exp_refcount;
/**
* Set of counters below is to track where export references are
* kept. The exp_rpc_count is used for reconnect handling also,
* the cb_count and locks_count are for debug purposes only for now.
- * The sum of them should be less than exp_refcount by 3
+ * The sum of them should be less than exp_handle.href by 3
*/
atomic_t exp_rpc_count; /* RPC references */
atomic_t exp_cb_count; /* Commit callback references */
*/
#include <linux/rcupdate.h>
+#ifdef HAVE_REFCOUNT_T
+#include <linux/refcount.h>
+#else
+#include <libcfs/linux/linux-refcount.h>
+#endif
#include <linux/spinlock.h>
#include <libcfs/libcfs.h>
struct portals_handle_ops {
- void (*hop_addref)(void *object);
void (*hop_free)(void *object, int size);
+ /* hop_type is used for some debugging messages */
+ char *hop_type;
};
/* These handles are most easily used by having them appear at the very top of
struct list_head h_link;
__u64 h_cookie;
const struct portals_handle_ops *h_ops;
+ refcount_t h_ref;
/* newly added fields to handle the RCU issue. -jxiong */
struct rcu_head h_rcu;
LCONSOLE_WARN("%s: Client %s (at %s) refused connection, still busy with %d references\n",
target->obd_name, cluuid.uuid,
libcfs_nid2str(req->rq_peer.nid),
- atomic_read(&export->exp_refcount));
+ refcount_read(&export->exp_handle.h_ref));
GOTO(out, rc = -EBUSY);
} else if (lustre_msg_get_conn_cnt(req->rq_reqmsg) == 1 &&
rc != EALREADY) {
*/
struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock)
{
- atomic_inc(&lock->l_refc);
+ refcount_inc(&lock->l_handle.h_ref);
return lock;
}
EXPORT_SYMBOL(ldlm_lock_get);
ENTRY;
LASSERT(lock->l_resource != LP_POISON);
- LASSERT(atomic_read(&lock->l_refc) > 0);
- if (atomic_dec_and_test(&lock->l_refc)) {
+ LASSERT(refcount_read(&lock->l_handle.h_ref) > 0);
+ if (refcount_dec_and_test(&lock->l_handle.h_ref)) {
struct ldlm_resource *res;
LDLM_DEBUG(lock,
EXIT;
}
-/* this is called by portals_handle2object with the handle lock taken */
-static void lock_handle_addref(void *lock)
-{
- LDLM_LOCK_GET((struct ldlm_lock *)lock);
-}
-
static void lock_handle_free(void *lock, int size)
{
LASSERT(size == sizeof(struct ldlm_lock));
}
static struct portals_handle_ops lock_handle_ops = {
- .hop_addref = lock_handle_addref,
.hop_free = lock_handle_free,
+ .hop_type = "ldlm",
};
/**
lock->l_resource = resource;
lu_ref_add(&resource->lr_reference, "lock", lock);
- atomic_set(&lock->l_refc, 2);
+ refcount_set(&lock->l_handle.h_ref, 2);
INIT_LIST_HEAD(&lock->l_res_link);
INIT_LIST_HEAD(&lock->l_lru);
INIT_LIST_HEAD(&lock->l_pending_chain);
&vaf,
lock,
lock->l_handle.h_cookie,
- atomic_read(&lock->l_refc),
+ refcount_read(&lock->l_handle.h_ref),
lock->l_readers, lock->l_writers,
ldlm_lockname[lock->l_granted_mode],
ldlm_lockname[lock->l_req_mode],
lock->l_flags, nid,
lock->l_remote_handle.cookie,
- exp ? atomic_read(&exp->exp_refcount) : -99,
+ exp ? refcount_read(&exp->exp_handle.h_ref) : -99,
lock->l_pid, lock->l_callback_timeout,
lock->l_lvb_type);
va_end(args);
&vaf,
ldlm_lock_to_ns_name(lock), lock,
lock->l_handle.h_cookie,
- atomic_read(&lock->l_refc),
+ refcount_read(&lock->l_handle.h_ref),
lock->l_readers, lock->l_writers,
ldlm_lockname[lock->l_granted_mode],
ldlm_lockname[lock->l_req_mode],
lock->l_req_extent.start, lock->l_req_extent.end,
lock->l_flags, nid,
lock->l_remote_handle.cookie,
- exp ? atomic_read(&exp->exp_refcount) : -99,
+ exp ? refcount_read(&exp->exp_handle.h_ref) : -99,
lock->l_pid, lock->l_callback_timeout,
lock->l_lvb_type);
break;
&vaf,
ldlm_lock_to_ns_name(lock), lock,
lock->l_handle.h_cookie,
- atomic_read(&lock->l_refc),
+ refcount_read(&lock->l_handle.h_ref),
lock->l_readers, lock->l_writers,
ldlm_lockname[lock->l_granted_mode],
ldlm_lockname[lock->l_req_mode],
lock->l_policy_data.l_flock.end,
lock->l_flags, nid,
lock->l_remote_handle.cookie,
- exp ? atomic_read(&exp->exp_refcount) : -99,
+ exp ? refcount_read(&exp->exp_handle.h_ref) : -99,
lock->l_pid, lock->l_callback_timeout);
break;
&vaf,
ldlm_lock_to_ns_name(lock),
lock, lock->l_handle.h_cookie,
- atomic_read(&lock->l_refc),
+ refcount_read(&lock->l_handle.h_ref),
lock->l_readers, lock->l_writers,
ldlm_lockname[lock->l_granted_mode],
ldlm_lockname[lock->l_req_mode],
ldlm_typename[resource->lr_type],
lock->l_flags, nid,
lock->l_remote_handle.cookie,
- exp ? atomic_read(&exp->exp_refcount) : -99,
+ exp ? refcount_read(&exp->exp_handle.h_ref) : -99,
lock->l_pid, lock->l_callback_timeout,
lock->l_lvb_type);
break;
&vaf,
ldlm_lock_to_ns_name(lock),
lock, lock->l_handle.h_cookie,
- atomic_read(&lock->l_refc),
+ refcount_read(&lock->l_handle.h_ref),
lock->l_readers, lock->l_writers,
ldlm_lockname[lock->l_granted_mode],
ldlm_lockname[lock->l_req_mode],
ldlm_typename[resource->lr_type],
lock->l_flags, nid,
lock->l_remote_handle.cookie,
- exp ? atomic_read(&exp->exp_refcount) : -99,
+ exp ? refcount_read(&exp->exp_handle.h_ref) : -99,
lock->l_pid, lock->l_callback_timeout,
lock->l_lvb_type);
break;
#include "mdt_internal.h"
#include <lustre_nodemap.h>
-/* we do nothing because we do not have refcount now */
-static void mdt_mfd_get(void *mfdp)
-{
-}
-
static const struct portals_handle_ops mfd_open_handle_ops = {
- .hop_addref = mdt_mfd_get,
.hop_free = NULL,
+ .hop_type = "mdt",
};
/* Create a new mdt_file_data struct, initialize it,
OBD_ALLOC_PTR(mfd);
if (mfd != NULL) {
+ refcount_set(&mfd->mfd_open_handle.h_ref, 1);
INIT_LIST_HEAD_RCU(&mfd->mfd_open_handle.h_link);
mfd->mfd_owner = med;
INIT_LIST_HEAD(&mfd->mfd_list);
LASSERT(open_handle != NULL);
mfd = class_handle2object(open_handle->cookie, &mfd_open_handle_ops);
+ if (mfd)
+ refcount_dec(&mfd->mfd_open_handle.h_ref);
+
/* during dw/setattr replay the mfd can be found by old handle */
if ((!mfd || mfd->mfd_owner != med) && is_replay_or_resent) {
list_for_each_entry(mfd, &med->med_open_head, mfd_list) {
/* free mfd */
void mdt_mfd_free(struct mdt_file_data *mfd)
{
+ LASSERT(refcount_read(&mfd->mfd_open_handle.h_ref) == 1);
LASSERT(list_empty(&mfd->mfd_list));
OBD_FREE_RCU(mfd, sizeof *mfd, &mfd->mfd_open_handle);
}
struct obd_device *obd = exp->exp_obd;
ENTRY;
- LASSERT_ATOMIC_ZERO(&exp->exp_refcount);
+ LASSERT(refcount_read(&exp->exp_handle.h_ref) == 0);
LASSERT(obd != NULL);
CDEBUG(D_IOCTL, "destroying export %p/%s for %s\n", exp,
EXIT;
}
-static void export_handle_addref(void *export)
-{
- class_export_get(export);
-}
-
static struct portals_handle_ops export_handle_ops = {
- .hop_addref = export_handle_addref,
.hop_free = NULL,
+ .hop_type = "export",
};
struct obd_export *class_export_get(struct obd_export *exp)
{
- atomic_inc(&exp->exp_refcount);
- CDEBUG(D_INFO, "GETting export %p : new refcount %d\n", exp,
- atomic_read(&exp->exp_refcount));
+ refcount_inc(&exp->exp_handle.h_ref);
+ CDEBUG(D_INFO, "GET export %p refcount=%d\n", exp,
+ refcount_read(&exp->exp_handle.h_ref));
return exp;
}
EXPORT_SYMBOL(class_export_get);
void class_export_put(struct obd_export *exp)
{
LASSERT(exp != NULL);
- LASSERT_ATOMIC_GT_LT(&exp->exp_refcount, 0, LI_POISON);
+ LASSERT(refcount_read(&exp->exp_handle.h_ref) > 0);
+ LASSERT(refcount_read(&exp->exp_handle.h_ref) < LI_POISON);
CDEBUG(D_INFO, "PUTting export %p : new refcount %d\n", exp,
- atomic_read(&exp->exp_refcount) - 1);
+ refcount_read(&exp->exp_handle.h_ref) - 1);
- if (atomic_dec_and_test(&exp->exp_refcount)) {
+ if (refcount_dec_and_test(&exp->exp_handle.h_ref)) {
struct obd_device *obd = exp->exp_obd;
CDEBUG(D_IOCTL, "final put %p/%s\n",
export->exp_lock_hash = NULL;
export->exp_flock_hash = NULL;
/* 2 = class_handle_hash + last */
- atomic_set(&export->exp_refcount, 2);
+ refcount_set(&export->exp_handle.h_ref, 2);
atomic_set(&export->exp_rpc_count, 0);
atomic_set(&export->exp_cb_count, 0);
atomic_set(&export->exp_locks_count, 0);
CDEBUG(debug_level, "%s: %s %p %s %s %d (%d %d %d) %d %d %d %d: "
"%p %s %llu stale:%d\n",
exp->exp_obd->obd_name, status, exp, exp->exp_client_uuid.uuid,
- obd_export_nid2str(exp), atomic_read(&exp->exp_refcount),
+ obd_export_nid2str(exp),
+ refcount_read(&exp->exp_handle.h_ref),
atomic_read(&exp->exp_rpc_count),
atomic_read(&exp->exp_cb_count),
atomic_read(&exp->exp_locks_count),
spin_lock(&h->h_lock);
if (likely(h->h_in != 0)) {
- h->h_ops->hop_addref(h);
+ refcount_inc(&h->h_ref);
+ CDEBUG(D_INFO, "GET %s %p refcount=%d\n",
+ h->h_ops->hop_type, h,
+ refcount_read(&h->h_ref));
retval = h;
}
spin_unlock(&h->h_lock);
RETURN(-EBUSY);
}
- LASSERT(atomic_read(&ec->ec_exp->exp_refcount) > 0);
+ LASSERT(refcount_read(&ec->ec_exp->exp_handle.h_ref) > 0);
rc = obd_disconnect(ec->ec_exp);
if (rc != 0)
CERROR("fail to disconnect device: %d\n", rc);
(request->rq_export ?
(char *)request->rq_export->exp_client_uuid.uuid : "0"),
(request->rq_export ?
- atomic_read(&request->rq_export->exp_refcount) : -99),
+ refcount_read(&request->rq_export->exp_handle.h_ref) : -99),
lustre_msg_get_status(request->rq_reqmsg), request->rq_xid,
libcfs_id2str(request->rq_peer),
lustre_msg_get_opc(request->rq_reqmsg),
(request->rq_export ?
(char *)request->rq_export->exp_client_uuid.uuid : "0"),
(request->rq_export ?
- atomic_read(&request->rq_export->exp_refcount) : -99),
+ refcount_read(&request->rq_export->exp_handle.h_ref) : -99),
lustre_msg_get_status(request->rq_reqmsg),
request->rq_xid,
libcfs_id2str(request->rq_peer),