Whamcloud - gitweb
LU-1346 gnilnd: remove libcfs abstractions
[fs/lustre-release.git] / lnet / klnds / o2iblnd / o2iblnd.h
index 4245526..661756c 100644 (file)
 
 typedef struct
 {
-        int              *kib_dev_failover;     /* HCA failover */
-        unsigned int     *kib_service;          /* IB service number */
-        int              *kib_min_reconnect_interval; /* first failed connection retry... */
-        int              *kib_max_reconnect_interval; /* ...exponentially increasing to this */
-        int              *kib_cksum;            /* checksum kib_msg_t? */
-        int              *kib_timeout;          /* comms timeout (seconds) */
-        int              *kib_keepalive;        /* keepalive timeout (seconds) */
-        int              *kib_ntx;              /* # tx descs */
-        int              *kib_credits;          /* # concurrent sends */
-        int              *kib_peertxcredits;    /* # concurrent sends to 1 peer */
-        int              *kib_peerrtrcredits;   /* # per-peer router buffer credits */
-        int              *kib_peercredits_hiw;  /* # when eagerly to return credits */
-        int              *kib_peertimeout;      /* seconds to consider peer dead */
-        char            **kib_default_ipif;     /* default IPoIB interface */
-        int              *kib_retry_count;
-        int              *kib_rnr_retry_count;
-        int              *kib_concurrent_sends; /* send work queue sizing */
-        int             *kib_ib_mtu;           /* IB MTU */
-        int              *kib_map_on_demand;    /* map-on-demand if RD has more fragments
-                                                 * than this value, 0 disable map-on-demand */
-        int              *kib_pmr_pool_size;    /* # physical MR in pool */
-        int              *kib_fmr_pool_size;    /* # FMRs in pool */
-        int              *kib_fmr_flush_trigger; /* When to trigger FMR flush */
-        int              *kib_fmr_cache;        /* enable FMR pool cache? */
+       int              *kib_dev_failover;     /* HCA failover */
+       unsigned int     *kib_service;          /* IB service number */
+       int              *kib_min_reconnect_interval; /* first failed connection retry... */
+       int              *kib_max_reconnect_interval; /* ...exponentially increasing to this */
+       int              *kib_cksum;            /* checksum kib_msg_t? */
+       int              *kib_timeout;          /* comms timeout (seconds) */
+       int              *kib_keepalive;        /* keepalive timeout (seconds) */
+       int              *kib_ntx;              /* # tx descs */
+       int              *kib_credits;          /* # concurrent sends */
+       int              *kib_peertxcredits;    /* # concurrent sends to 1 peer */
+       int              *kib_peerrtrcredits;   /* # per-peer router buffer credits */
+       int              *kib_peercredits_hiw;  /* # when eagerly to return credits */
+       int              *kib_peertimeout;      /* seconds to consider peer dead */
+       char            **kib_default_ipif;     /* default IPoIB interface */
+       int              *kib_retry_count;
+       int              *kib_rnr_retry_count;
+       int              *kib_concurrent_sends; /* send work queue sizing */
+       int              *kib_ib_mtu;           /* IB MTU */
+       int              *kib_map_on_demand;    /* map-on-demand if RD has more fragments
+                                                * than this value, 0 disable map-on-demand */
+       int              *kib_pmr_pool_size;    /* # physical MR in pool */
+       int              *kib_fmr_pool_size;    /* # FMRs in pool */
+       int              *kib_fmr_flush_trigger; /* When to trigger FMR flush */
+       int              *kib_fmr_cache;        /* enable FMR pool cache? */
 #if defined(CONFIG_SYSCTL) && !CFS_SYSFS_MODULE_PARM
-        cfs_sysctl_table_header_t *kib_sysctl;  /* sysctl interface */
+       struct ctl_table_header *kib_sysctl;  /* sysctl interface */
 #endif
-        int              *kib_require_priv_port;/* accept only privileged ports */
-        int              *kib_use_priv_port;    /* use privileged port for active connect */
+       int              *kib_require_priv_port;/* accept only privileged ports */
+       int              *kib_use_priv_port;    /* use privileged port for active connect */
        /* # threads on each CPT */
        int              *kib_nscheds;
 } kib_tunables_t;
@@ -378,7 +378,7 @@ struct kib_sched_info {
        /* serialise */
        spinlock_t              ibs_lock;
        /* schedulers sleep here */
-       cfs_waitq_t             ibs_waitq;
+       wait_queue_head_t               ibs_waitq;
        /* conns to check for rx completions */
        cfs_list_t              ibs_conns;
        /* number of scheduler threads */
@@ -396,7 +396,7 @@ typedef struct
        /* list head of failed devices */
        cfs_list_t              kib_failed_devs;
        /* schedulers sleep here */
-       cfs_waitq_t             kib_failover_waitq;
+       wait_queue_head_t               kib_failover_waitq;
        cfs_atomic_t            kib_nthreads;   /* # live threads */
        /* stabilize net/dev/peer/conn ops */
        rwlock_t                kib_global_lock;
@@ -411,7 +411,7 @@ typedef struct
        /* connections with zero refcount */
        cfs_list_t              kib_connd_zombies;
        /* connection daemon sleeps here */
-       cfs_waitq_t             kib_connd_waitq;
+       wait_queue_head_t               kib_connd_waitq;
        spinlock_t              kib_connd_lock; /* serialise */
        struct ib_qp_attr       kib_error_qpa;  /* QP->ERROR */
        /* percpt data for schedulers */
@@ -713,7 +713,7 @@ do {                                                                        \
                spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);  \
                cfs_list_add_tail(&(conn)->ibc_list,                    \
                                  &kiblnd_data.kib_connd_zombies);      \
-               cfs_waitq_signal(&kiblnd_data.kib_connd_waitq);         \
+               wake_up(&kiblnd_data.kib_connd_waitq);          \
                spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);\
        }                                                               \
 } while (0)