#include <linux/atomic.h>
#include <linux/list.h>
#include <linux/mutex.h>
+#include <linux/refcount.h>
#include <linux/spinlock.h>
#include <linux/time.h>
#include <linux/types.h>
time64_t at_binstart; /* bin start time */
unsigned int at_hist[AT_BINS]; /* timeout history bins */
unsigned int at_flags;
- unsigned int at_current; /* current timeout value */
- unsigned int at_worst_ever; /* worst-ever timeout value */
- time64_t at_worst_time; /* worst-ever timeout timestamp */
+ timeout_t at_current_timeout; /* current timeout value */
+ timeout_t at_worst_timeout_ever; /* worst-ever timeout delta
+ * value
+ */
+ time64_t at_worst_timestamp; /* worst-ever timeout
+ * timestamp
+ */
spinlock_t at_lock;
};
};
/** Returns test string representation of numeric import state \a state */
-static inline char * ptlrpc_import_state_name(enum lustre_imp_state state)
+static inline const char *ptlrpc_import_state_name(enum lustre_imp_state state)
{
- static char *import_state_names[] = {
+ static const char * const import_state_names[] = {
"<UNKNOWN>", "CLOSED", "NEW", "DISCONN",
"CONNECTING", "REPLAY", "REPLAY_LOCKS", "REPLAY_WAIT",
"RECOVER", "FULL", "EVICTED", "IDLE",
*/
struct obd_import {
/** Reference counter */
- atomic_t imp_refcount;
+ refcount_t imp_refcount;
struct lustre_handle imp_dlm_handle; /* client's ldlm export */
/** Currently active connection */
struct ptlrpc_connection *imp_connection;
* @{
*/
struct ptlrpc_sec *imp_sec;
- struct mutex imp_sec_mutex;
+ rwlock_t imp_sec_lock;
time64_t imp_sec_expire;
pid_t imp_sec_refpid;
/** @} */
/** Wait queue for those who need to wait for recovery completion */
wait_queue_head_t imp_recovery_waitq;
+ /** Number of requests allocated */
+ atomic_t imp_reqs;
/** Number of requests currently in-flight */
atomic_t imp_inflight;
/** Number of requests currently unregistering */
atomic_t imp_unregistering;
/** Number of replay requests inflight */
atomic_t imp_replay_inflight;
+ /** In-flight replays rate control */
+ wait_queue_head_t imp_replay_waitq;
+
/** Number of currently happening import invalidations */
atomic_t imp_inval_count;
/** Numbner of request timeouts */
spinlock_t imp_lock;
/* flags */
- unsigned long imp_no_timeout:1, /* timeouts are disabled */
- imp_invalid:1, /* evicted */
+ unsigned long imp_invalid:1, /* evicted */
/* administratively disabled */
imp_deactive:1,
/* try to recover the import */
/* import has tried to connect with server */
imp_connect_tried:1,
/* connected but not FULL yet */
- imp_connected:1;
+ imp_connected:1,
+ /* grant shrink disabled */
+ imp_grant_shrink_disabled:1,
+ /* to supress LCONSOLE() at conn.restore */
+ imp_was_idle:1;
u32 imp_connect_op;
u32 imp_idle_timeout;
u32 imp_idle_debug;
time64_t imp_last_reply_time; /* for health check */
};
-/* import.c */
-static inline unsigned int at_est2timeout(unsigned int val)
+/* import.c : adaptive timeout handling.
+ *
+ * Lustre tracks how long RPCs take to complete. This information is reported
+ * back to clients who utilize the information to estimate the time needed
+ * for future requests and set appropriate RPC timeouts. Minimum and maximum
+ * service times can be configured via the at_min and at_max kernel module
+ * parameters, respectively.
+ *
+ * Since this information is transmitted between nodes the timeouts are in
+ * seconds not jiffies which can vary from node to node. To avoid confusion
+ * the timeout is handled in timeout_t (s32) instead of time64_t or
+ * long (jiffies).
+ */
+static inline timeout_t at_est2timeout(timeout_t timeout)
{
- /* add an arbitrary minimum: 125% +5 sec */
- return (val + (val >> 2) + 5);
+ /* add an arbitrary minimum: 125% +5 sec */
+ return timeout + (timeout >> 2) + 5;
}
-static inline unsigned int at_timeout2est(unsigned int val)
+static inline timeout_t at_timeout2est(timeout_t timeout)
{
- /* restore estimate value from timeout: e=4/5(t-5) */
- LASSERT(val);
- return (max((val << 2) / 5, 5U) - 4);
+ /* restore estimate value from timeout: e=4/5(t-5) */
+ LASSERT(timeout > 0);
+ return max((timeout << 2) / 5, 5) - 4;
}
-static inline void at_reset_nolock(struct adaptive_timeout *at, int val)
+static inline void at_reset_nolock(struct adaptive_timeout *at,
+ timeout_t timeout)
{
- at->at_current = val;
- at->at_worst_ever = val;
- at->at_worst_time = ktime_get_real_seconds();
+ at->at_current_timeout = timeout;
+ at->at_worst_timeout_ever = timeout;
+ at->at_worst_timestamp = ktime_get_real_seconds();
}
-static inline void at_reset(struct adaptive_timeout *at, int val)
+static inline void at_reset(struct adaptive_timeout *at, timeout_t timeout)
{
spin_lock(&at->at_lock);
- at_reset_nolock(at, val);
+ at_reset_nolock(at, timeout);
spin_unlock(&at->at_lock);
}
-static inline void at_init(struct adaptive_timeout *at, int val, int flags) {
+static inline void at_init(struct adaptive_timeout *at, timeout_t timeout,
+ int flags)
+{
memset(at, 0, sizeof(*at));
spin_lock_init(&at->at_lock);
at->at_flags = flags;
- at_reset(at, val);
+ at_reset(at, timeout);
}
-static inline void at_reinit(struct adaptive_timeout *at, int val, int flags)
+static inline void at_reinit(struct adaptive_timeout *at, timeout_t timeout,
+ int flags)
{
spin_lock(&at->at_lock);
at->at_binstart = 0;
memset(at->at_hist, 0, sizeof(at->at_hist));
at->at_flags = flags;
- at_reset_nolock(at, val);
+ at_reset_nolock(at, timeout);
spin_unlock(&at->at_lock);
}
extern unsigned int at_min;
-static inline int at_get(struct adaptive_timeout *at) {
- return (at->at_current > at_min) ? at->at_current : at_min;
-}
-int at_measured(struct adaptive_timeout *at, unsigned int val);
-int import_at_get_index(struct obd_import *imp, int portal);
extern unsigned int at_max;
#define AT_OFF (at_max == 0)
+static inline timeout_t at_get(struct adaptive_timeout *at)
+{
+ return (at->at_current_timeout > at_min) ?
+ at->at_current_timeout : at_min;
+}
+
+timeout_t at_measured(struct adaptive_timeout *at, timeout_t timeout);
+int import_at_get_index(struct obd_import *imp, int portal);
+
/* genops.c */
struct obd_export;
extern struct obd_import *class_exp2cliimp(struct obd_export *);