Whamcloud - gitweb
LU-1346 gnilnd: remove libcfs abstractions 54/7454/7
authorJames Simmons <uja.ornl@gmail.com>
Tue, 17 Sep 2013 15:37:23 +0000 (11:37 -0400)
committerOleg Drokin <oleg.drokin@intel.com>
Thu, 26 Sep 2013 05:00:19 +0000 (05:00 +0000)
With the move of libcfs to using linux as the default api
this has impacted many of the lnet drivers. This patch
removes the libcfs wrappers and uses the linux API's
directly in the LND driver

Signed-off-by: James Simmons <uja.ornl@gmail.com>
Change-Id: Id502abb578808063b04e094bff026c92a5d12a3b
Reviewed-on: http://review.whamcloud.com/7454
Reviewed-by: James Shimek <jshimek@cray.com>
Tested-by: Hudson
Tested-by: Maloo <whamcloud.maloo@gmail.com>
Reviewed-by: Chuck Fossen <chuckf@cray.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
lnet/klnds/gnilnd/gnilnd.c
lnet/klnds/gnilnd/gnilnd.h
lnet/klnds/gnilnd/gnilnd_cb.c
lnet/klnds/gnilnd/gnilnd_conn.c
lnet/klnds/gnilnd/gnilnd_proc.c
lnet/klnds/gnilnd/gnilnd_stack.c
lnet/klnds/gnilnd/gnilnd_sysctl.c

index 6c00370..92a139c 100644 (file)
@@ -1650,7 +1650,7 @@ kgnilnd_report_node_state(lnet_nid_t nid, int down)
 {
        int         rc;
        kgn_peer_t  *peer, *new_peer;
-       CFS_LIST_HEAD(zombies);
+       LIST_HEAD(zombies);
 
        write_lock(&kgnilnd_data.kgn_peer_conn_lock);
        peer = kgnilnd_find_peer_locked(nid);
@@ -2228,7 +2228,7 @@ int kgnilnd_base_startup(void)
 
        /* OK to call kgnilnd_api_shutdown() to cleanup now */
        kgnilnd_data.kgn_init = GNILND_INIT_DATA;
-       PORTAL_MODULE_USE;
+       try_module_get(THIS_MODULE);
 
        rwlock_init(&kgnilnd_data.kgn_peer_conn_lock);
 
@@ -2269,10 +2269,8 @@ int kgnilnd_base_startup(void)
        }
 
        kgnilnd_data.kgn_mbox_cache =
-               cfs_mem_cache_create("kgn_mbox_block",
-                                    KMALLOC_MAX_SIZE,
-                                    0,    /* offset */
-                                    SLAB_HWCACHE_ALIGN);   /* flags */
+               kmem_cache_create("kgn_mbox_block", KMALLOC_MAX_SIZE, 0,
+                                 SLAB_HWCACHE_ALIGN, NULL);
        if (kgnilnd_data.kgn_mbox_cache == NULL) {
                CERROR("Can't create slab for physical mbox blocks\n");
                rc = -ENOMEM;
@@ -2280,10 +2278,7 @@ int kgnilnd_base_startup(void)
        }
 
        kgnilnd_data.kgn_rx_cache =
-               cfs_mem_cache_create("kgn_rx_t",
-                                    sizeof(kgn_rx_t),
-                                    0,    /* offset */
-                                    0);   /* flags */
+               kmem_cache_create("kgn_rx_t", sizeof(kgn_rx_t), 0, 0, NULL);
        if (kgnilnd_data.kgn_rx_cache == NULL) {
                CERROR("Can't create slab for kgn_rx_t descriptors\n");
                rc = -ENOMEM;
@@ -2291,10 +2286,7 @@ int kgnilnd_base_startup(void)
        }
 
        kgnilnd_data.kgn_tx_cache =
-               cfs_mem_cache_create("kgn_tx_t",
-                                    sizeof(kgn_tx_t),
-                                    0,    /* offset */
-                                    0);   /* flags */
+               kmem_cache_create("kgn_tx_t", sizeof(kgn_tx_t), 0, 0, NULL);
        if (kgnilnd_data.kgn_tx_cache == NULL) {
                CERROR("Can't create slab for kgn_tx_t\n");
                rc = -ENOMEM;
@@ -2302,10 +2294,9 @@ int kgnilnd_base_startup(void)
        }
 
        kgnilnd_data.kgn_tx_phys_cache =
-               cfs_mem_cache_create("kgn_tx_phys",
-                                    LNET_MAX_IOV * sizeof(gni_mem_segment_t),
-                                    0,    /* offset */
-                                    0);   /* flags */
+               kmem_cache_create("kgn_tx_phys",
+                                  LNET_MAX_IOV * sizeof(gni_mem_segment_t),
+                                  0, 0, NULL);
        if (kgnilnd_data.kgn_tx_phys_cache == NULL) {
                CERROR("Can't create slab for kgn_tx_phys\n");
                rc = -ENOMEM;
@@ -2313,10 +2304,7 @@ int kgnilnd_base_startup(void)
        }
 
        kgnilnd_data.kgn_dgram_cache =
-               cfs_mem_cache_create("kgn_dgram_t",
-                                    sizeof(kgn_dgram_t),
-                                    0,    /* offset */
-                                    0);   /* flags */
+               kmem_cache_create("kgn_dgram_t", sizeof(kgn_dgram_t), 0, 0, NULL);
        if (kgnilnd_data.kgn_dgram_cache == NULL) {
                CERROR("Can't create slab for outgoing datagrams\n");
                rc = -ENOMEM;
@@ -2569,30 +2557,20 @@ kgnilnd_base_shutdown(void)
                kgnilnd_free_phys_fmablk(dev);
        }
 
-       if (kgnilnd_data.kgn_mbox_cache != NULL) {
-               i = cfs_mem_cache_destroy(kgnilnd_data.kgn_mbox_cache);
-               LASSERTF(i == 0, "rc %d destroying kgn_mbox_cache\n", i);
-       }
+       if (kgnilnd_data.kgn_mbox_cache != NULL)
+               kmem_cache_destroy(kgnilnd_data.kgn_mbox_cache);
 
-       if (kgnilnd_data.kgn_rx_cache != NULL) {
-               i = cfs_mem_cache_destroy(kgnilnd_data.kgn_rx_cache);
-               LASSERTF(i == 0, "rc %d destroying kgn_rx_cache\n", i);
-       }
+       if (kgnilnd_data.kgn_rx_cache != NULL)
+               kmem_cache_destroy(kgnilnd_data.kgn_rx_cache);
 
-       if (kgnilnd_data.kgn_tx_cache != NULL) {
-               i = cfs_mem_cache_destroy(kgnilnd_data.kgn_tx_cache);
-               LASSERTF(i == 0, "rc %d destroying kgn_tx_cache\n", i);
-       }
+       if (kgnilnd_data.kgn_tx_cache != NULL)
+               kmem_cache_destroy(kgnilnd_data.kgn_tx_cache);
 
-       if (kgnilnd_data.kgn_tx_phys_cache != NULL) {
-               i = cfs_mem_cache_destroy(kgnilnd_data.kgn_tx_phys_cache);
-               LASSERTF(i == 0, "rc %d destroying kgn_tx_phys_cache\n", i);
-       }
+       if (kgnilnd_data.kgn_tx_phys_cache != NULL)
+               kmem_cache_destroy(kgnilnd_data.kgn_tx_phys_cache);
 
-       if (kgnilnd_data.kgn_dgram_cache != NULL) {
-               i = cfs_mem_cache_destroy(kgnilnd_data.kgn_dgram_cache);
-               LASSERTF(i == 0, "rc %d destroying kgn_dgram_cache\n", i);
-       }
+       if (kgnilnd_data.kgn_dgram_cache != NULL)
+               kmem_cache_destroy(kgnilnd_data.kgn_dgram_cache);
 
        if (kgnilnd_data.kgn_cksum_map_pages != NULL) {
                for (i = 0; i < kgnilnd_data.kgn_cksum_npages; i++) {
@@ -2607,7 +2585,7 @@ kgnilnd_base_shutdown(void)
               atomic_read(&libcfs_kmemory));
 
        kgnilnd_data.kgn_init = GNILND_INIT_NOTHING;
-       PORTAL_MODULE_UNUSE;
+       module_put(THIS_MODULE);
 
        EXIT;
 }
index 012e50f..064492a 100644 (file)
@@ -754,7 +754,7 @@ typedef struct kgn_peer {
        unsigned long       gnp_last_alive;             /* last time I had valid comms */
        int                 gnp_last_dgram_errno;       /* last error dgrams saw */
        unsigned long       gnp_last_dgram_time;        /* last time I tried to connect */
-       unsigned long       gnp_reconnect_time;         /* CURRENT_SECONDS when reconnect OK */
+       unsigned long       gnp_reconnect_time;         /* get_seconds() when reconnect OK */
        unsigned long       gnp_reconnect_interval;     /* exponential backoff */
        atomic_t            gnp_dirty_eps;              /* # of old but yet to be destroyed EPs from conns */
        int                 gnp_down;                   /* rca says peer down */
@@ -821,11 +821,11 @@ typedef struct kgn_data {
        wait_queue_head_t       kgn_reaper_waitq;     /* reaper sleeps here */
        spinlock_t              kgn_reaper_lock;      /* serialise */
 
-       cfs_mem_cache_t        *kgn_rx_cache;         /* rx descriptor space */
-       cfs_mem_cache_t        *kgn_tx_cache;         /* tx descriptor memory */
-       cfs_mem_cache_t        *kgn_tx_phys_cache;    /* tx phys descriptor memory */
+       struct kmem_cache        *kgn_rx_cache;         /* rx descriptor space */
+       struct kmem_cache        *kgn_tx_cache;         /* tx descriptor memory */
+       struct kmem_cache        *kgn_tx_phys_cache;    /* tx phys descriptor memory */
        atomic_t                kgn_ntx;              /* # tx in use */
-       cfs_mem_cache_t        *kgn_dgram_cache;      /* outgoing datagrams */
+       struct kmem_cache        *kgn_dgram_cache;      /* outgoing datagrams */
 
        struct page          ***kgn_cksum_map_pages;  /* page arrays for mapping pages on checksum */
        __u64                   kgn_cksum_npages;     /* Number of pages allocated for checksumming */
@@ -1029,19 +1029,18 @@ do {
        (atomic_read(&kgnilnd_data.kgn_nquiesce) ==                             \
                atomic_read(&kgnilnd_data.kgn_nthreads))
 
-#define KGNILND_SPIN_QUIESCE                                                 \
-do {                                                                         \
-       /* E.T phone home */                                                 \
-       atomic_inc(&kgnilnd_data.kgn_nquiesce);                              \
-       CDEBUG(D_NET, "Waiting for thread pause to be over...\n");           \
-       while (kgnilnd_data.kgn_quiesce_trigger) {                           \
-               set_current_state(TASK_INTERRUPTIBLE);                       \
-               cfs_schedule_timeout_and_set_state(TASK_INTERRUPTIBLE,       \
-                       cfs_time_seconds(1));                                \
-       }                                                                    \
-       /* Mom, my homework is done */                                       \
-       CDEBUG(D_NET, "Waking up from thread pause\n");                      \
-       atomic_dec(&kgnilnd_data.kgn_nquiesce);                              \
+#define KGNILND_SPIN_QUIESCE                                           \
+do {                                                                   \
+       /* E.T phone home */                                            \
+       atomic_inc(&kgnilnd_data.kgn_nquiesce);                         \
+       CDEBUG(D_NET, "Waiting for thread pause to be over...\n");      \
+       while (kgnilnd_data.kgn_quiesce_trigger) {                      \
+               set_current_state(TASK_INTERRUPTIBLE);                  \
+               schedule_timeout(HZ);                                   \
+       }                                                               \
+       /* Mom, my homework is done */                                  \
+       CDEBUG(D_NET, "Waking up from thread pause\n");                 \
+       atomic_dec(&kgnilnd_data.kgn_nquiesce);                         \
 } while(0)
 
 /* use macros for addref/decref to get the calling function name in the CDEBUG */
index 381aa64..2112612 100644 (file)
@@ -228,7 +228,7 @@ kgnilnd_free_tx(kgn_tx_t *tx)
 
        /* we only allocate this if we need to */
        if (tx->tx_phys != NULL) {
-               cfs_mem_cache_free(kgnilnd_data.kgn_tx_phys_cache, tx->tx_phys);
+               kmem_cache_free(kgnilnd_data.kgn_tx_phys_cache, tx->tx_phys);
                CDEBUG(D_MALLOC, "slab-freed 'tx_phys': %lu at %p.\n",
                       LNET_MAX_IOV * sizeof(gni_mem_segment_t), tx->tx_phys);
        }
@@ -242,7 +242,7 @@ kgnilnd_free_tx(kgn_tx_t *tx)
 #if 0
        KGNILND_POISON(tx, 0x5a, sizeof(kgn_tx_t));
 #endif
-       cfs_mem_cache_free(kgnilnd_data.kgn_tx_cache, tx);
+       kmem_cache_free(kgnilnd_data.kgn_tx_cache, tx);
        CDEBUG(D_MALLOC, "slab-freed 'tx': %lu at %p.\n",
               sizeof(*tx), tx);
 }
@@ -255,7 +255,7 @@ kgnilnd_alloc_tx (void)
        if (CFS_FAIL_CHECK(CFS_FAIL_GNI_ALLOC_TX))
                return tx;
 
-       tx = cfs_mem_cache_alloc(kgnilnd_data.kgn_tx_cache, CFS_ALLOC_ATOMIC);
+       tx = kmem_cache_alloc(kgnilnd_data.kgn_tx_cache, GFP_ATOMIC);
        if (tx == NULL) {
                CERROR("failed to allocate tx\n");
                return NULL;
@@ -522,7 +522,7 @@ kgnilnd_setup_immediate_buffer(kgn_tx_t *tx, unsigned int niov, struct iovec *io
                         * than kiov_len, we will also have a whole at the end of that page
                         * which isn't allowed */
                        if ((kiov[i].kiov_offset != 0 && i > 0) ||
-                           (kiov[i].kiov_offset + kiov[i].kiov_len != CFS_PAGE_SIZE && i < niov - 1)) {
+                           (kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_SIZE && i < niov - 1)) {
                                CNETERR("Can't make payload contiguous in I/O VM:"
                                       "page %d, offset %u, nob %u, kiov_offset %u kiov_len %u \n",
                                       i, offset, nob, kiov->kiov_offset, kiov->kiov_len);
@@ -640,8 +640,8 @@ kgnilnd_setup_phys_buffer(kgn_tx_t *tx, int nkiov, lnet_kiov_t *kiov,
        LASSERT(tx->tx_buftype == GNILND_BUF_NONE);
 
        /* only allocate this if we are going to use it */
-       tx->tx_phys = cfs_mem_cache_alloc(kgnilnd_data.kgn_tx_phys_cache,
-                                             CFS_ALLOC_ATOMIC);
+       tx->tx_phys = kmem_cache_alloc(kgnilnd_data.kgn_tx_phys_cache,
+                                             GFP_ATOMIC);
        if (tx->tx_phys == NULL) {
                CERROR("failed to allocate tx_phys\n");
                rc = -ENOMEM;
@@ -729,7 +729,7 @@ kgnilnd_setup_phys_buffer(kgn_tx_t *tx, int nkiov, lnet_kiov_t *kiov,
 
 error:
        if (tx->tx_phys != NULL) {
-               cfs_mem_cache_free(kgnilnd_data.kgn_tx_phys_cache, tx->tx_phys);
+               kmem_cache_free(kgnilnd_data.kgn_tx_phys_cache, tx->tx_phys);
                CDEBUG(D_MALLOC, "slab-freed 'tx_phys': %lu at %p.\n",
                       sizeof(*tx->tx_phys), tx->tx_phys);
                tx->tx_phys = NULL;
@@ -1990,7 +1990,7 @@ kgnilnd_alloc_rx(void)
 {
        kgn_rx_t        *rx;
 
-       rx = cfs_mem_cache_alloc(kgnilnd_data.kgn_rx_cache, CFS_ALLOC_ATOMIC);
+       rx = kmem_cache_alloc(kgnilnd_data.kgn_rx_cache, GFP_ATOMIC);
        if (rx == NULL) {
                CERROR("failed to allocate rx\n");
                return NULL;
@@ -2045,7 +2045,7 @@ kgnilnd_consume_rx(kgn_rx_t *rx)
                kgnilnd_release_msg(conn);
        }
 
-       cfs_mem_cache_free(kgnilnd_data.kgn_rx_cache, rx);
+       kmem_cache_free(kgnilnd_data.kgn_rx_cache, rx);
        CDEBUG(D_MALLOC, "slab-freed 'rx': %lu at %p.\n",
               sizeof(*rx), rx);
 
@@ -2946,7 +2946,6 @@ kgnilnd_reaper(void *arg)
        struct timer_list  timer;
        DEFINE_WAIT(wait);
 
-       cfs_daemonize("kgnilnd_rpr");
        cfs_block_allsigs();
 
        /* all gnilnd threads need to run fairly urgently */
@@ -4850,15 +4849,11 @@ kgnilnd_scheduler(void *arg)
 {
        int               threadno = (long)arg;
        kgn_device_t            *dev;
-       char                    name[16];
        int                     busy_loops = 0;
        unsigned long     deadline = 0;
        DEFINE_WAIT(wait);
 
        dev = &kgnilnd_data.kgn_devices[(threadno + 1) % kgnilnd_data.kgn_ndevs];
-
-       snprintf(name, sizeof(name), "kgnilnd_sd_%02d", threadno);
-       cfs_daemonize(name);
        cfs_block_allsigs();
 
        /* all gnilnd threads need to run fairly urgently */
index 39716b8..5fee8cc 100644 (file)
@@ -125,7 +125,7 @@ kgnilnd_alloc_fmablk(kgn_device_t *device, int use_phys)
         * as reallocating them is tough if there is memory fragmentation */
 
        if (use_phys) {
-               fma_blk->gnm_block = cfs_mem_cache_alloc(kgnilnd_data.kgn_mbox_cache, CFS_ALLOC_ATOMIC);
+               fma_blk->gnm_block = kmem_cache_alloc(kgnilnd_data.kgn_mbox_cache, GFP_ATOMIC);
                if (fma_blk->gnm_block == NULL) {
                        CNETERR("could not allocate physical SMSG mailbox memory\n");
                        rc = -ENOMEM;
@@ -215,7 +215,7 @@ free_blk:
        if (fma_blk->gnm_state == GNILND_FMABLK_VIRT) {
                LIBCFS_FREE(fma_blk->gnm_block, fma_blk->gnm_blk_size);
        } else {
-               cfs_mem_cache_free(kgnilnd_data.kgn_mbox_cache, fma_blk->gnm_block);
+               kmem_cache_free(kgnilnd_data.kgn_mbox_cache, fma_blk->gnm_block);
        }
 free_desc:
        LIBCFS_FREE(fma_blk, sizeof(kgn_fma_memblock_t));
@@ -310,7 +310,7 @@ kgnilnd_free_fmablk_locked(kgn_device_t *dev, kgn_fma_memblock_t *fma_blk)
                fma_blk, fma_blk->gnm_block, fma_blk->gnm_mbox_size);
 
        if (fma_blk->gnm_state == GNILND_FMABLK_PHYS) {
-               cfs_mem_cache_free(kgnilnd_data.kgn_mbox_cache, fma_blk->gnm_block);
+               kmem_cache_free(kgnilnd_data.kgn_mbox_cache, fma_blk->gnm_block);
        } else {
                LIBCFS_FREE(fma_blk->gnm_block, fma_blk->gnm_blk_size);
        }
@@ -925,8 +925,7 @@ kgnilnd_alloc_dgram(kgn_dgram_t **dgramp, kgn_device_t *dev, kgn_dgram_type_t ty
 {
        kgn_dgram_t         *dgram;
 
-       dgram = cfs_mem_cache_alloc(kgnilnd_data.kgn_dgram_cache,
-                                       CFS_ALLOC_ATOMIC);
+       dgram = kmem_cache_alloc(kgnilnd_data.kgn_dgram_cache, GFP_ATOMIC);
        if (dgram == NULL)
                return -ENOMEM;
 
@@ -1152,7 +1151,7 @@ kgnilnd_free_dgram(kgn_device_t *dev, kgn_dgram_t *dgram)
        dgram->gndg_magic = 0x6f5a6b5f;
        atomic_dec(&dev->gnd_ndgrams);
 
-       cfs_mem_cache_free(kgnilnd_data.kgn_dgram_cache, dgram);
+       kmem_cache_free(kgnilnd_data.kgn_dgram_cache, dgram);
        CDEBUG(D_MALLOC|D_NETTRACE, "slab-freed 'dgram': %lu at %p.\n",
               sizeof(*dgram), dgram);
 }
@@ -2167,13 +2166,10 @@ int
 kgnilnd_dgram_waitq(void *arg)
 {
        kgn_device_t     *dev = (kgn_device_t *) arg;
-       char              name[16];
        gni_return_t      grc;
        __u64             readyid;
        DEFINE_WAIT(mover_done);
 
-       snprintf(name, sizeof(name), "kgnilnd_dgn_%02d", dev->gnd_id);
-       cfs_daemonize(name);
        cfs_block_allsigs();
 
        /* all gnilnd threads need to run fairly urgently */
@@ -2345,7 +2341,6 @@ int
 kgnilnd_dgram_mover(void *arg)
 {
        kgn_device_t            *dev = (kgn_device_t *)arg;
-       char                     name[16];
        int                      rc, did_something;
        unsigned long            next_purge_check = jiffies - 1;
        unsigned long            timeout;
@@ -2353,8 +2348,6 @@ kgnilnd_dgram_mover(void *arg)
        unsigned long            deadline = 0;
        DEFINE_WAIT(wait);
 
-       snprintf(name, sizeof(name), "kgnilnd_dg_%02d", dev->gnd_id);
-       cfs_daemonize(name);
        cfs_block_allsigs();
        /* all gnilnd threads need to run fairly urgently */
        set_user_nice(current, *kgnilnd_tunables.kgn_nice);
index 6170583..38ef64a 100644 (file)
@@ -53,8 +53,8 @@ _kgnilnd_proc_run_cksum_test(int caseno, int nloops, int nob)
 
        for (i = 0; i < LNET_MAX_IOV; i++) {
                src[i].kiov_offset = 0;
-               src[i].kiov_len = CFS_PAGE_SIZE;
-               src[i].kiov_page = cfs_alloc_page(CFS_ALLOC_STD|CFS_ALLOC_ZERO);
+               src[i].kiov_len = PAGE_SIZE;
+               src[i].kiov_page = alloc_page(__GFP_IO | __GFP_FS | __GFP_ZERO);
 
                if (src[i].kiov_page == NULL) {
                        CERROR("couldn't allocate page %d\n", i);
@@ -62,8 +62,8 @@ _kgnilnd_proc_run_cksum_test(int caseno, int nloops, int nob)
                }
 
                dest[i].kiov_offset = 0;
-               dest[i].kiov_len = CFS_PAGE_SIZE;
-               dest[i].kiov_page = cfs_alloc_page(CFS_ALLOC_STD|CFS_ALLOC_ZERO);
+               dest[i].kiov_len = PAGE_SIZE;
+               dest[i].kiov_page = alloc_page(__GFP_IO | __GFP_FS | __GFP_ZERO);
 
                if (dest[i].kiov_page == NULL) {
                        CERROR("couldn't allocate page %d\n", i);
@@ -114,8 +114,8 @@ _kgnilnd_proc_run_cksum_test(int caseno, int nloops, int nob)
        for (n = 0; n < nloops; n++) {
                CDEBUG(D_BUFFS, "case %d loop %d src %d dest %d nob %d niov %d\n",
                       caseno, n, src[0].kiov_offset, dest[0].kiov_offset, nob, niov);
-               cksum = kgnilnd_cksum_kiov(niov, src, 0, nob - n, 1);
-               cksum2 = kgnilnd_cksum_kiov(niov, dest, 0, nob - n, 1);
+               cksum = kgnilnd_cksum_kiov(niov, src, 0, nob - (n % nob), 1);
+               cksum2 = kgnilnd_cksum_kiov(niov, dest, 0, nob - (n % nob), 1);
 
                if (cksum != cksum2) {
                        CERROR("case %d loop %d different checksums %x expected %x\n",
@@ -137,10 +137,10 @@ unwind:
        CDEBUG(D_NET, "freeing %d pages\n", i);
        for (i -= 1; i >= 0; i--) {
                if (src[i].kiov_page != NULL) {
-                       cfs_free_page(src[i].kiov_page);
+                       __free_page(src[i].kiov_page);
                }
                if (dest[i].kiov_page != NULL) {
-                       cfs_free_page(dest[i].kiov_page);
+                       __free_page(dest[i].kiov_page);
                }
        }
 
index 0dec950..6cf5f7b 100644 (file)
@@ -366,7 +366,6 @@ kgnilnd_ruhroh_thread(void *arg)
        int                i = 1;
        DEFINE_WAIT(wait);
 
-       cfs_daemonize("kgnilnd_rr");
        cfs_block_allsigs();
        set_user_nice(current, *kgnilnd_tunables.kgn_nice);
        kgnilnd_data.kgn_ruhroh_running = 1;
@@ -592,7 +591,6 @@ kgnilnd_rca(void *arg)
        rs_event_t event;
        lnet_nid_t nid;
 
-       cfs_daemonize("kgnilnd_rca");
        cfs_block_allsigs();
 
        /* all gnilnd threads need to run fairly urgently */
@@ -651,7 +649,7 @@ subscribe_retry:
                if (krca_get_message(&rca_krt, &event) == 0) {
                        int node_down = GNILND_RCA_NODE_UNKNOWN;
                        rs_state_t state;
-                       CFS_LIST_HEAD(zombies);
+                       LIST_HEAD(zombies);
 
                        /* Compute nodes don't care about other compute nodes
                         * so we don't need to create a peer.
index 0ee1204..5d9d742 100644 (file)
@@ -38,7 +38,7 @@ static kgn_sysctl_data_t        kgnilnd_sysctl;
 
 #if defined(CONFIG_SYSCTL)
 
-static cfs_sysctl_table_header_t *kgnilnd_table_header = NULL;
+static struct ctl_table_header *kgnilnd_table_header = NULL;
 #ifndef HAVE_SYSCTL_UNNUMBERED
 
 enum {
@@ -64,7 +64,7 @@ static int LL_PROC_PROTO(proc_toggle_thread_pause)
        int  rc = 0;
        ENTRY;
 
-       rc = ll_proc_dointvec(table, write, filp, buffer, lenp, ppos);
+       rc = proc_dointvec(table, write, buffer, lenp, ppos);
        if (!write) {
                /* read */
                RETURN(rc);
@@ -92,7 +92,7 @@ static int LL_PROC_PROTO(proc_hw_quiesce)
        kgn_device_t    *dev;
        ENTRY;
 
-       rc = ll_proc_dointvec(table, write, filp, buffer, lenp, ppos);
+       rc = proc_dointvec(table, write, buffer, lenp, ppos);
        if (!write) {
                /* read */
                RETURN(rc);
@@ -124,7 +124,7 @@ int LL_PROC_PROTO(proc_trigger_stack_reset)
 
        if (!write) {
                /* read */
-               rc = ll_proc_dointvec(table, write, filp, buffer, lenp, ppos);
+               rc = proc_dointvec(table, write, buffer, lenp, ppos);
                RETURN(rc);
        }
 
@@ -153,7 +153,7 @@ static int LL_PROC_PROTO(proc_toggle_rdmaq_override)
        int  rc = 0;
        ENTRY;
 
-       rc = ll_proc_dointvec(table, write, filp, buffer, lenp, ppos);
+       rc = proc_dointvec(table, write, buffer, lenp, ppos);
        if (!write) {
                /* read */
                RETURN(rc);
@@ -187,7 +187,7 @@ static int LL_PROC_PROTO(proc_rca_inject)
        char            command[10];
        ENTRY;
 
-       rc = ll_proc_dostring(table, write, filp, buffer, lenp, ppos);
+       rc = proc_dostring(table, write, buffer, lenp, ppos);
 
        if (!write) {
                /* read */
@@ -233,7 +233,7 @@ static int LL_PROC_PROTO(proc_rca_inject)
        RETURN(rc);
 }
 
-static cfs_sysctl_table_t kgnilnd_table[] = {
+static struct ctl_table kgnilnd_table[] = {
        /*
         * NB No .strategy entries have been provided since sysctl(8) prefers
         * to go via /proc for portability.
@@ -289,7 +289,7 @@ static cfs_sysctl_table_t kgnilnd_table[] = {
        {       INIT_CTL_NAME(0)   }
 };
 
-static cfs_sysctl_table_t kgnilnd_top_table[2] = {
+static struct ctl_table kgnilnd_top_table[2] = {
        {
                INIT_CTL_NAME(CTL_GNILND)
                .procname = "kgnilnd",
@@ -304,13 +304,13 @@ static cfs_sysctl_table_t kgnilnd_top_table[2] = {
 void kgnilnd_insert_sysctl(void)
 {
        if (kgnilnd_table_header == NULL)
-               kgnilnd_table_header = cfs_register_sysctl_table(kgnilnd_top_table, 0);
+               kgnilnd_table_header = register_sysctl_table(kgnilnd_top_table);
 }
 
 void kgnilnd_remove_sysctl(void)
 {
        if (kgnilnd_table_header != NULL)
-               cfs_unregister_sysctl_table(kgnilnd_table_header);
+               unregister_sysctl_table(kgnilnd_table_header);
 
        kgnilnd_table_header = NULL;
 }