X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lnet%2Fklnds%2Fo2iblnd%2Fo2iblnd.h;h=661756cde7c70f6ce837d929bf3c711b5197d682;hp=87c7cc0ec5bad1ce033766bf39df6be2562f12b1;hb=59071a8334bbc1a3a6d31565b7474063438d1f43;hpb=c88317218461159cae399f74b459a850ae3c84f8 diff --git a/lnet/klnds/o2iblnd/o2iblnd.h b/lnet/klnds/o2iblnd/o2iblnd.h index 87c7cc0..661756c 100644 --- a/lnet/klnds/o2iblnd/o2iblnd.h +++ b/lnet/klnds/o2iblnd/o2iblnd.h @@ -27,7 +27,7 @@ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011, 2012, Intel Corporation. + * Copyright (c) 2011, 2013, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -38,6 +38,7 @@ * Author: Eric Barton */ +#include #include #include #include @@ -58,6 +59,9 @@ #include #include #include +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,32) +#include +#endif #include #include @@ -86,35 +90,35 @@ typedef struct { - int *kib_dev_failover; /* HCA failover */ - unsigned int *kib_service; /* IB service number */ - int *kib_min_reconnect_interval; /* first failed connection retry... */ - int *kib_max_reconnect_interval; /* ...exponentially increasing to this */ - int *kib_cksum; /* checksum kib_msg_t? */ - int *kib_timeout; /* comms timeout (seconds) */ - int *kib_keepalive; /* keepalive timeout (seconds) */ - int *kib_ntx; /* # tx descs */ - int *kib_credits; /* # concurrent sends */ - int *kib_peertxcredits; /* # concurrent sends to 1 peer */ - int *kib_peerrtrcredits; /* # per-peer router buffer credits */ - int *kib_peercredits_hiw; /* # when eagerly to return credits */ - int *kib_peertimeout; /* seconds to consider peer dead */ - char **kib_default_ipif; /* default IPoIB interface */ - int *kib_retry_count; - int *kib_rnr_retry_count; - int *kib_concurrent_sends; /* send work queue sizing */ - int *kib_ib_mtu; /* IB MTU */ - int *kib_map_on_demand; /* map-on-demand if RD has more fragments - * than this value, 0 disable map-on-demand */ - int *kib_pmr_pool_size; /* # physical MR in pool */ - int *kib_fmr_pool_size; /* # FMRs in pool */ - int *kib_fmr_flush_trigger; /* When to trigger FMR flush */ - int *kib_fmr_cache; /* enable FMR pool cache? */ + int *kib_dev_failover; /* HCA failover */ + unsigned int *kib_service; /* IB service number */ + int *kib_min_reconnect_interval; /* first failed connection retry... */ + int *kib_max_reconnect_interval; /* ...exponentially increasing to this */ + int *kib_cksum; /* checksum kib_msg_t? */ + int *kib_timeout; /* comms timeout (seconds) */ + int *kib_keepalive; /* keepalive timeout (seconds) */ + int *kib_ntx; /* # tx descs */ + int *kib_credits; /* # concurrent sends */ + int *kib_peertxcredits; /* # concurrent sends to 1 peer */ + int *kib_peerrtrcredits; /* # per-peer router buffer credits */ + int *kib_peercredits_hiw; /* # when eagerly to return credits */ + int *kib_peertimeout; /* seconds to consider peer dead */ + char **kib_default_ipif; /* default IPoIB interface */ + int *kib_retry_count; + int *kib_rnr_retry_count; + int *kib_concurrent_sends; /* send work queue sizing */ + int *kib_ib_mtu; /* IB MTU */ + int *kib_map_on_demand; /* map-on-demand if RD has more fragments + * than this value, 0 disable map-on-demand */ + int *kib_pmr_pool_size; /* # physical MR in pool */ + int *kib_fmr_pool_size; /* # FMRs in pool */ + int *kib_fmr_flush_trigger; /* When to trigger FMR flush */ + int *kib_fmr_cache; /* enable FMR pool cache? */ #if defined(CONFIG_SYSCTL) && !CFS_SYSFS_MODULE_PARM - cfs_sysctl_table_header_t *kib_sysctl; /* sysctl interface */ + struct ctl_table_header *kib_sysctl; /* sysctl interface */ #endif - int *kib_require_priv_port;/* accept only privileged ports */ - int *kib_use_priv_port; /* use privileged port for active connect */ + int *kib_require_priv_port;/* accept only privileged ports */ + int *kib_use_priv_port; /* use privileged port for active connect */ /* # threads on each CPT */ int *kib_nscheds; } kib_tunables_t; @@ -374,7 +378,7 @@ struct kib_sched_info { /* serialise */ spinlock_t ibs_lock; /* schedulers sleep here */ - cfs_waitq_t ibs_waitq; + wait_queue_head_t ibs_waitq; /* conns to check for rx completions */ cfs_list_t ibs_conns; /* number of scheduler threads */ @@ -392,7 +396,7 @@ typedef struct /* list head of failed devices */ cfs_list_t kib_failed_devs; /* schedulers sleep here */ - cfs_waitq_t kib_failover_waitq; + wait_queue_head_t kib_failover_waitq; cfs_atomic_t kib_nthreads; /* # live threads */ /* stabilize net/dev/peer/conn ops */ rwlock_t kib_global_lock; @@ -407,7 +411,7 @@ typedef struct /* connections with zero refcount */ cfs_list_t kib_connd_zombies; /* connection daemon sleeps here */ - cfs_waitq_t kib_connd_waitq; + wait_queue_head_t kib_connd_waitq; spinlock_t kib_connd_lock; /* serialise */ struct ib_qp_attr kib_error_qpa; /* QP->ERROR */ /* percpt data for schedulers */ @@ -709,7 +713,7 @@ do { \ spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); \ cfs_list_add_tail(&(conn)->ibc_list, \ &kiblnd_data.kib_connd_zombies); \ - cfs_waitq_signal(&kiblnd_data.kib_connd_waitq); \ + wake_up(&kiblnd_data.kib_connd_waitq); \ spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);\ } \ } while (0) @@ -760,9 +764,9 @@ kiblnd_get_conn_locked (kib_peer_t *peer) static inline int kiblnd_send_keepalive(kib_conn_t *conn) { - return (*kiblnd_tunables.kib_keepalive > 0) && - cfs_time_after(jiffies, conn->ibc_last_send + - *kiblnd_tunables.kib_keepalive*CFS_HZ); + return (*kiblnd_tunables.kib_keepalive > 0) && + cfs_time_after(jiffies, conn->ibc_last_send + + *kiblnd_tunables.kib_keepalive*HZ); } static inline int @@ -1067,7 +1071,7 @@ void kiblnd_tunables_fini(void); int kiblnd_connd (void *arg); int kiblnd_scheduler(void *arg); -int kiblnd_thread_start (int (*fn)(void *arg), void *arg); +int kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name); int kiblnd_failover_thread (void *arg); int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages);