]) # LIBCFS_SYSCTL_CTLNAME
#
-# LIBCFS_ADD_WAIT_QUEUE_EXCLUSIVE
-#
-# 2.6.34 adds __add_wait_queue_exclusive
-#
-AC_DEFUN([LIBCFS_ADD_WAIT_QUEUE_EXCLUSIVE], [
-LB_CHECK_COMPILE([if '__add_wait_queue_exclusive' exists],
-__add_wait_queue_exclusive, [
- #include <linux/wait.h>
-],[
- wait_queue_head_t queue;
- wait_queue_t wait;
- __add_wait_queue_exclusive(&queue, &wait);
-],[
- AC_DEFINE(HAVE___ADD_WAIT_QUEUE_EXCLUSIVE, 1,
- [__add_wait_queue_exclusive exists])
-])
-]) # LIBCFS_ADD_WAIT_QUEUE_EXCLUSIVE
-
-#
# LC_SK_SLEEP
#
# 2.6.35 kernel has sk_sleep function
LC_SHRINKER_WANT_SHRINK_PTR
# 2.6.33
LIBCFS_SYSCTL_CTLNAME
-# 2.6.34
-LIBCFS_ADD_WAIT_QUEUE_EXCLUSIVE
# 2.6.35
LC_SK_SLEEP
# 2.6.39
types.h \
user-bitops.h \
user-crypto.h \
- user-prim.h \
user-time.h
# include <sys/time.h>
# include <sys/types.h>
# include <libcfs/user-time.h>
-# include <libcfs/user-prim.h>
# include <libcfs/user-bitops.h>
#endif /* __KERNEL__ */
#ifdef __KERNEL__
-#ifndef cfs_for_each_possible_cpu
-# error cfs_for_each_possible_cpu is not supported by kernel!
-#endif
-
/* libcfs watchdogs */
struct lc_watchdog;
sigset_t cfs_block_sigs(unsigned long sigs);
sigset_t cfs_block_sigsinv(unsigned long sigs);
void cfs_restore_sigs(sigset_t);
-int cfs_signal_pending(void);
void cfs_clear_sigpending(void);
int convert_server_error(__u64 ecode);
#include <libcfs/libcfs_workitem.h>
#ifdef __KERNEL__
# include <libcfs/libcfs_hash.h>
+# include <libcfs/libcfs_heap.h>
+# include <libcfs/libcfs_fail.h>
#endif /* __KERNEL__ */
-#include <libcfs/libcfs_heap.h>
-#include <libcfs/libcfs_fail.h>
/* container_of depends on "likely" which is defined in libcfs_private.h */
static inline void *__container_of(const void *ptr, unsigned long shift)
#define container_of0(ptr, type, member) \
((type *)__container_of((ptr), offsetof(type, member)))
-#define _LIBCFS_H
-
-#endif /* _LIBCFS_H */
+#endif /* _LIBCFS_LIBCFS_H_ */
-EXTRA_DIST = kp30.h libcfs.h linux-fs.h linux-mem.h \
- linux-prim.h linux-time.h linux-cpu.h linux-crypto.h
+EXTRA_DIST = kp30.h libcfs.h linux-fs.h linux-mem.h linux-time.h linux-cpu.h \
+ linux-crypto.h
#include <libcfs/linux/linux-cpu.h>
#include <libcfs/linux/linux-time.h>
#include <libcfs/linux/linux-mem.h>
-#include <libcfs/linux/linux-prim.h>
#include <libcfs/linux/linux-fs.h>
#include <libcfs/linux/kp30.h>
+++ /dev/null
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * libcfs/include/libcfs/linux/linux-prim.h
- *
- * Basic library routines.
- */
-
-#ifndef __LIBCFS_LINUX_CFS_PRIM_H__
-#define __LIBCFS_LINUX_CFS_PRIM_H__
-
-#ifndef __LIBCFS_LIBCFS_H__
-#error Do not #include this file directly. #include <libcfs/libcfs.h> instead
-#endif
-
-#ifndef __KERNEL__
-#error This include is only for kernel use.
-#endif
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/version.h>
-#include <linux/proc_fs.h>
-#include <linux/mm.h>
-#include <linux/timer.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/kthread.h>
-#ifdef HAVE_LINUX_RANDOM_H
-#include <linux/random.h>
-#endif
-#ifdef HAVE_UIDGID_HEADER
-#include <linux/uidgid.h>
-#endif
-#include <linux/user_namespace.h>
-#include <linux/miscdevice.h>
-#include <asm/div64.h>
-
-#include <libcfs/linux/linux-time.h>
-
-
-/*
- * CPU
- */
-#ifdef for_each_possible_cpu
-#define cfs_for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
-#elif defined(for_each_cpu)
-#define cfs_for_each_possible_cpu(cpu) for_each_cpu(cpu)
-#endif
-
-#ifndef NR_CPUS
-#define NR_CPUS 1
-#endif
-
-/*
- * Wait Queue
- */
-
-
-#define CFS_DECL_WAITQ(wq) DECLARE_WAIT_QUEUE_HEAD(wq)
-
-#define LIBCFS_WQITQ_MACROS 1
-#define init_waitqueue_entry_current(w) init_waitqueue_entry(w, current)
-#define waitq_wait(w, s) schedule()
-#define waitq_timedwait(w, s, t) schedule_timeout(t)
-
-#ifndef HAVE___ADD_WAIT_QUEUE_EXCLUSIVE
-static inline void __add_wait_queue_exclusive(wait_queue_head_t *q,
- wait_queue_t *wait)
-{
- wait->flags |= WQ_FLAG_EXCLUSIVE;
- __add_wait_queue(q, wait);
-}
-#endif /* HAVE___ADD_WAIT_QUEUE_EXCLUSIVE */
-
-/**
- * wait_queue_t of Linux (version < 2.6.34) is a FIFO list for exclusively
- * waiting threads, which is not always desirable because all threads will
- * be waken up again and again, even user only needs a few of them to be
- * active most time. This is not good for performance because cache can
- * be polluted by different threads.
- *
- * LIFO list can resolve this problem because we always wakeup the most
- * recent active thread by default.
- *
- * NB: please don't call non-exclusive & exclusive wait on the same
- * waitq if add_wait_queue_exclusive_head is used.
- */
-#define add_wait_queue_exclusive_head(waitq, link) \
-{ \
- unsigned long flags; \
- \
- spin_lock_irqsave(&((waitq)->lock), flags); \
- __add_wait_queue_exclusive(waitq, link); \
- spin_unlock_irqrestore(&((waitq)->lock), flags); \
-}
-
-#define schedule_timeout_and_set_state(state, timeout) \
-{ \
- set_current_state(state); \
- schedule_timeout(timeout); \
-}
-
-/* deschedule for a bit... */
-#define cfs_pause(ticks) \
-{ \
- set_current_state(TASK_UNINTERRUPTIBLE); \
- schedule_timeout(ticks); \
-}
-
-#define DECL_JOURNAL_DATA void *journal_info
-#define PUSH_JOURNAL do { \
- journal_info = current->journal_info; \
- current->journal_info = NULL; \
- } while(0)
-#define POP_JOURNAL do { \
- current->journal_info = journal_info; \
- } while(0)
-
-/* Module interfaces */
-#define cfs_module(name, version, init, fini) \
- module_init(init); \
- module_exit(fini)
-
-#endif
+++ /dev/null
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, 2014, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * libcfs/include/libcfs/user-prim.h
- *
- * Author: Nikita Danilov <nikita@clusterfs.com>
- */
-
-#ifndef __LIBCFS_USER_PRIM_H__
-#define __LIBCFS_USER_PRIM_H__
-
-#ifndef __LIBCFS_LIBCFS_H__
-#error Do not #include this file directly. #include <libcfs/libcfs.h> instead
-#endif
-
-/* Implementations of portable APIs for liblustre */
-
-/*
- * liblustre is single-threaded, so most "synchronization" APIs are trivial.
- */
-
-#ifndef EXPORT_SYMBOL
-# define EXPORT_SYMBOL(s)
-#endif
-
-/*
- * Just present a single processor until will add thread support.
- */
-#ifndef smp_processor_id
-# define smp_processor_id() 0
-#endif
-#ifndef num_online_cpus
-# define num_online_cpus() 1
-#endif
-#ifndef num_possible_cpus
-# define num_possible_cpus() 1
-#endif
-#ifndef get_cpu
-# define get_cpu() 0
-#endif
-#ifndef put_cpu
-# define put_cpu() do {} while (0)
-#endif
-#ifndef NR_CPUS
-# define NR_CPUS 1
-#endif
-#ifndef for_each_possible_cpu
-# define for_each_possible_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++)
-#endif
-
-/*
- * Wait Queue.
- */
-
-typedef struct cfs_waitlink {
- struct list_head sleeping;
- void *process;
-} wait_queue_t;
-
-typedef struct cfs_waitq {
- struct list_head sleepers;
-} wait_queue_head_t;
-
-#define CFS_DECL_WAITQ(wq) wait_queue_head_t wq
-void init_waitqueue_head(struct cfs_waitq *waitq);
-void init_waitqueue_entry_current(struct cfs_waitlink *link);
-void add_wait_queue(struct cfs_waitq *waitq, struct cfs_waitlink *link);
-void add_wait_queue_exclusive(struct cfs_waitq *waitq, struct cfs_waitlink *link);
-void add_wait_queue_exclusive_head(struct cfs_waitq *waitq, struct cfs_waitlink *link);
-void remove_wait_queue(struct cfs_waitq *waitq, struct cfs_waitlink *link);
-int waitqueue_active(struct cfs_waitq *waitq);
-void wake_up(struct cfs_waitq *waitq);
-void wake_up_nr(struct cfs_waitq *waitq, int nr);
-void wake_up_all(struct cfs_waitq *waitq);
-void waitq_wait(struct cfs_waitlink *link, long state);
-int64_t waitq_timedwait(struct cfs_waitlink *link, long state, int64_t timeout);
-void schedule_timeout_and_set_state(long state, int64_t timeout);
-void cfs_pause(cfs_duration_t d);
-int need_resched(void);
-void cond_resched(void);
-
-/*
- * Task states
- */
-#define TASK_INTERRUPTIBLE (0)
-#define TASK_UNINTERRUPTIBLE (1)
-#define TASK_RUNNING (2)
-
-static inline void schedule(void) {}
-static inline void schedule_timeout(int64_t t) {}
-static inline void set_current_state(int state)
-{
-}
-
-/*
- * Lproc
- */
-typedef int (read_proc_t)(char *page, char **start, off_t off,
- int count, int *eof, void *data);
-
-struct file; /* forward ref */
-typedef int (write_proc_t)(struct file *file, const char *buffer,
- unsigned long count, void *data);
-
-/*
- * Signal
- */
-
-/*
- * Timer
- */
-
-struct timer_list {
- struct list_head tl_list;
- void (*function)(ulong_ptr_t unused);
- ulong_ptr_t data;
- long expires;
-};
-
-
-#define in_interrupt() (0)
-
-struct miscdevice{
-};
-
-static inline int misc_register(struct miscdevice *foo)
-{
- return 0;
-}
-
-static inline int misc_deregister(struct miscdevice *foo)
-{
- return 0;
-}
-
-#define cfs_recalc_sigpending(l) do {} while (0)
-
-#define DAEMON_FLAGS 0
-
-#define L1_CACHE_ALIGN(x) (x)
-
-#ifdef HAVE_LIBPTHREAD
-typedef int (*cfs_thread_t)(void *);
-void *kthread_run(cfs_thread_t func, void *arg, const char namefmt[], ...);
-#else
-/* Fine, crash, but stop giving me compile warnings */
-#define kthread_run(f, a, n, ...) LBUG()
-#endif
-
-uid_t current_uid(void);
-gid_t current_gid(void);
-uid_t current_fsuid(void);
-gid_t current_fsgid(void);
-
-#ifndef HAVE_STRLCPY /* not in glibc for RHEL 5.x, remove when obsolete */
-size_t strlcpy(char *tgt, const char *src, size_t tgt_len);
-#endif
-
-#ifndef HAVE_STRLCAT /* not in glibc for RHEL 5.x, remove when obsolete */
-size_t strlcat(char *tgt, const char *src, size_t tgt_len);
-#endif
-
-#define LIBCFS_REALLOC(ptr, size) realloc(ptr, size)
-
-#define cfs_online_cpus() sysconf(_SC_NPROCESSORS_ONLN)
-
-// static inline void local_irq_save(unsigned long flag) {return;}
-// static inline void local_irq_restore(unsigned long flag) {return;}
-
-enum {
- CFS_STACK_TRACE_DEPTH = 16
-};
-
-struct cfs_stack_trace {
- void *frame[CFS_STACK_TRACE_DEPTH];
-};
-
-/*
- * arithmetic
- */
-#ifndef do_div /* gcc only, platform-specific will override */
-#define do_div(a,b) \
- ({ \
- unsigned long remainder;\
- remainder = (a) % (b); \
- (a) = (a) / (b); \
- (remainder); \
- })
-#endif
-
-/*
- * Groups
- */
-struct group_info{ };
-
-#ifndef min
-# define min(x,y) ((x)<(y) ? (x) : (y))
-#endif
-
-#ifndef max
-# define max(x,y) ((x)>(y) ? (x) : (y))
-#endif
-
-#define get_random_bytes(val, size) (*val) = 0
-
-#endif /* __LIBCFS_USER_PRIM_H__ */
# define DEBUG_SUBSYSTEM S_LNET
+#include <linux/kthread.h>
#include <libcfs/libcfs.h>
#include "tracefile.h"
{
static time_t last_dump_time;
time_t current_time;
- DECL_JOURNAL_DATA;
+ void *journal_info;
- PUSH_JOURNAL;
+ journal_info = current->journal_info;
+ current->journal_info = NULL;
current_time = cfs_time_current_sec();
cfs_tracefile_dump_all_pages(debug_file_name);
libcfs_run_debug_log_upcall(debug_file_name);
}
- POP_JOURNAL;
+ current->journal_info = journal_info;
}
static int libcfs_debug_dumplog_thread(void *arg)
/* we're being careful to ensure that the kernel thread is
* able to set our state to running as it exits before we
* get to schedule() */
- init_waitqueue_entry_current(&wait);
+ init_waitqueue_entry(&wait, current);
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&debug_ctlwq, &wait);
printk(KERN_ERR "LustreError: cannot start log dump thread:"
" %ld\n", PTR_ERR(dumper));
else
- waitq_wait(&wait, TASK_INTERRUPTIBLE);
+ schedule();
/* be sure to teardown if cfs_create_thread() failed */
remove_wait_queue(&debug_ctlwq, &wait);
if (ret && likely(ms > 0)) {
CERROR("cfs_fail_timeout id %x sleeping for %dms\n",
id, ms);
- schedule_timeout_and_set_state(TASK_UNINTERRUPTIBLE,
- cfs_time_seconds(ms) / 1000);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(ms) / 1000);
set_current_state(TASK_RUNNING);
CERROR("cfs_fail_timeout id %x awake\n", id);
}
/* nothing */
#endif
}
+EXPORT_SYMBOL(cfs_enter_debugger);
sigset_t
cfs_block_allsigs(void)
spin_unlock_irqrestore(¤t->sighand->siglock, flags);
return old;
}
+EXPORT_SYMBOL(cfs_block_allsigs);
sigset_t cfs_block_sigs(unsigned long sigs)
{
spin_unlock_irqrestore(¤t->sighand->siglock, flags);
return old;
}
+EXPORT_SYMBOL(cfs_block_sigs);
/* Block all signals except for the @sigs */
sigset_t cfs_block_sigsinv(unsigned long sigs)
spin_unlock_irqrestore(¤t->sighand->siglock, flags);
return old;
}
+EXPORT_SYMBOL(cfs_block_sigsinv);
void
cfs_restore_sigs(sigset_t old)
recalc_sigpending();
spin_unlock_irqrestore(¤t->sighand->siglock, flags);
}
-
-int
-cfs_signal_pending(void)
-{
- return signal_pending(current);
-}
+EXPORT_SYMBOL(cfs_restore_sigs);
void
cfs_clear_sigpending(void)
clear_tsk_thread_flag(current, TIF_SIGPENDING);
spin_unlock_irqrestore(¤t->sighand->siglock, flags);
}
-
-EXPORT_SYMBOL(cfs_enter_debugger);
-EXPORT_SYMBOL(cfs_block_allsigs);
-EXPORT_SYMBOL(cfs_block_sigs);
-EXPORT_SYMBOL(cfs_block_sigsinv);
-EXPORT_SYMBOL(cfs_restore_sigs);
-EXPORT_SYMBOL(cfs_signal_pending);
EXPORT_SYMBOL(cfs_clear_sigpending);
while (ldu->ldu_memhog_pages < npages &&
count1 < PAGE_CACHE_SIZE/sizeof(struct page *)) {
- if (cfs_signal_pending())
+ if (signal_pending(current))
return -EINTR;
*level1p = alloc_page(flags);
while (ldu->ldu_memhog_pages < npages &&
count2 < PAGE_CACHE_SIZE/sizeof(struct page *)) {
- if (cfs_signal_pending())
+ if (signal_pending(current))
return -EINTR;
*level2p = alloc_page(flags);
libcfs_ioctl
};
-MODULE_AUTHOR("Peter J. Braam <braam@clusterfs.com>");
-MODULE_DESCRIPTION("Portals v3.1");
-MODULE_LICENSE("GPL");
-
static int init_libcfs_module(void)
{
int rc;
rc);
}
-cfs_module(libcfs, "1.0.0", init_libcfs_module, exit_libcfs_module);
+MODULE_AUTHOR("Peter J. Braam <braam@clusterfs.com>");
+MODULE_DESCRIPTION("Libcfs v3.1");
+MODULE_VERSION("1.0.0");
+MODULE_LICENSE("GPL");
+
+module_init(init_libcfs_module);
+module_exit(exit_libcfs_module);
* algorithm recommended by Marsaglia
*/
+#include <linux/random.h>
#include <libcfs/libcfs.h>
/*
#define LUSTRE_TRACEFILE_PRIVATE
#include "tracefile.h"
+#include <linux/kthread.h>
#include <libcfs/libcfs.h>
/* XXX move things up to the top, comment */
break;
}
}
- init_waitqueue_entry_current(&__wait);
+ init_waitqueue_entry(&__wait, current);
add_wait_queue(&tctl->tctl_waitq, &__wait);
set_current_state(TASK_INTERRUPTIBLE);
- waitq_timedwait(&__wait, TASK_INTERRUPTIBLE,
- cfs_time_seconds(1));
+ schedule_timeout(cfs_time_seconds(1));
remove_wait_queue(&tctl->tctl_waitq, &__wait);
}
complete(&tctl->tctl_stop);
#define DEBUG_SUBSYSTEM S_LNET
+#include <linux/kthread.h>
#include <libcfs/libcfs.h>
#include "tracefile.h"
#define DEBUG_SUBSYSTEM S_LNET
+#include <linux/kthread.h>
#include <libcfs/libcfs.h>
#define CFS_WS_NAME_LEN 16
sched->ws_name);
spin_unlock(&cfs_wi_data.wi_glock);
- cfs_pause(cfs_time_seconds(1) / 20);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1) / 20);
spin_lock(&cfs_wi_data.wi_glock);
}
}
while (sched->ws_nthreads != 0) {
spin_unlock(&cfs_wi_data.wi_glock);
- cfs_pause(cfs_time_seconds(1) / 20);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1) / 20);
spin_lock(&cfs_wi_data.wi_glock);
}
spin_unlock(&cfs_wi_data.wi_glock);
# error This include is only for kernel use.
#endif
+#include <linux/kthread.h>
#include <linux/uio.h>
#include <linux/types.h>
#include <net/sock.h>
atomic_read(&kgnilnd_data.kgn_npending_detach) ||
atomic_read(&kgnilnd_data.kgn_npending_unlink)) {
- cfs_pause(cfs_time_seconds(1));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
i++;
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, "Waiting on %d peers %d closes %d detaches\n",
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
"Waiting for conns to be cleaned up %d\n",atomic_read(&kgnilnd_data.kgn_nconns));
- cfs_pause(cfs_time_seconds(1));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
}
/* Peer state all cleaned up BEFORE setting shutdown, so threads don't
* have to worry about shutdown races. NB connections may be created
i++;
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
"Waiting for ruhroh thread to terminate\n");
- cfs_pause(cfs_time_seconds(1));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
}
/* Flag threads to terminate */
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
"Waiting for %d threads to terminate\n",
atomic_read(&kgnilnd_data.kgn_nthreads));
- cfs_pause(cfs_time_seconds(1));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
}
LASSERTF(atomic_read(&kgnilnd_data.kgn_npeers) == 0,
"Waiting for %d references to clear on net %d\n",
atomic_read(&net->gnn_refcount),
net->gnn_netnum);
- cfs_pause(cfs_time_seconds(1));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
}
/* release ref from kgnilnd_startup */
atomic_read(&kgnilnd_data.kgn_nthreads) -
atomic_read(&kgnilnd_data.kgn_nquiesce));
CFS_RACE(CFS_FAIL_GNI_QUIESCE_RACE);
- cfs_pause(cfs_time_seconds(1 * i));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1 * i));
LASSERTF(quiesce_deadline > jiffies,
"couldn't quiesce threads in %lu seconds, falling over now\n",
"%s: Waiting for %d threads to wake up\n",
reason,
atomic_read(&kgnilnd_data.kgn_nquiesce));
- cfs_pause(cfs_time_seconds(1 * i));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1 * i));
}
CDEBUG(D_INFO, "%s: All threads awake!\n", reason);
i++;
CDEBUG(D_INFO, "Waiting for hardware quiesce "
"flag to clear\n");
- cfs_pause(cfs_time_seconds(1 * i));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1 * i));
/* If we got a quiesce event with bump info, DO THE BUMP!. */
if (kgnilnd_data.kgn_bump_info_rdy) {
i++;
LCONSOLE((((i) & (-i)) == i) ? D_WARNING : D_NET,
"Waiting for stack reset request to clear\n");
- cfs_pause(cfs_time_seconds(1 * i));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1 * i));
}
RETURN(rc);
i = 2;
while (atomic_read(&kiblnd_data.kib_nthreads) != 0) {
- i++;
- CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
- "Waiting for %d threads to terminate\n",
+ i++;
+ /* power of 2? */
+ CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
+ "Waiting for %d threads to terminate\n",
atomic_read(&kiblnd_data.kib_nthreads));
- cfs_pause(cfs_time_seconds(1));
- }
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
+ }
/* fall through */
/* nuke all existing peers within this net */
kiblnd_del_peer(ni, LNET_NID_ANY);
- /* Wait for all peer state to clean up */
- i = 2;
+ /* Wait for all peer state to clean up */
+ i = 2;
while (atomic_read(&net->ibn_npeers) != 0) {
- i++;
- CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* 2**n? */
- "%s: waiting for %d peers to disconnect\n",
- libcfs_nid2str(ni->ni_nid),
+ i++;
+ /* power of 2? */
+ CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
+ "%s: waiting for %d peers to disconnect\n",
+ libcfs_nid2str(ni->ni_nid),
atomic_read(&net->ibn_npeers));
- cfs_pause(cfs_time_seconds(1));
- }
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
+ }
kiblnd_net_fini_pools(net);
#include <linux/version.h>
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/kthread.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/stat.h>
int peer_index = 0;
unsigned long deadline = jiffies;
- cfs_block_allsigs ();
+ cfs_block_allsigs();
- init_waitqueue_entry_current (&wait);
+ init_waitqueue_entry(&wait, current);
kiblnd_data.kib_connd = current;
spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
add_wait_queue(&kiblnd_data.kib_connd_waitq, &wait);
spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
- waitq_timedwait(&wait, TASK_INTERRUPTIBLE, timeout);
+ schedule_timeout(timeout);
set_current_state(TASK_RUNNING);
remove_wait_queue(&kiblnd_data.kib_connd_waitq, &wait);
cfs_block_allsigs();
- init_waitqueue_entry_current(&wait);
+ init_waitqueue_entry(&wait, current);
sched = kiblnd_data.kib_scheds[KIB_THREAD_CPT(id)];
add_wait_queue_exclusive(&sched->ibs_waitq, &wait);
spin_unlock_irqrestore(&sched->ibs_lock, flags);
- waitq_wait(&wait, TASK_INTERRUPTIBLE);
+ schedule();
busy_loops = 0;
remove_wait_queue(&sched->ibs_waitq, &wait);
kiblnd_failover_thread(void *arg)
{
rwlock_t *glock = &kiblnd_data.kib_global_lock;
- kib_dev_t *dev;
- wait_queue_t wait;
- unsigned long flags;
- int rc;
+ kib_dev_t *dev;
+ wait_queue_t wait;
+ unsigned long flags;
+ int rc;
- LASSERT (*kiblnd_tunables.kib_dev_failover != 0);
+ LASSERT(*kiblnd_tunables.kib_dev_failover != 0);
- cfs_block_allsigs ();
+ cfs_block_allsigs();
- init_waitqueue_entry_current(&wait);
+ init_waitqueue_entry(&wait, current);
write_lock_irqsave(glock, flags);
while (!kiblnd_data.kib_shutdown) {
}
}
- i = 4;
+ i = 4;
read_lock(&ksocknal_data.ksnd_global_lock);
- while (ksocknal_data.ksnd_nthreads != 0) {
- i++;
- CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
- "waiting for %d threads to terminate\n",
- ksocknal_data.ksnd_nthreads);
+ while (ksocknal_data.ksnd_nthreads != 0) {
+ i++;
+ /* power of 2? */
+ CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
+ "waiting for %d threads to terminate\n",
+ ksocknal_data.ksnd_nthreads);
read_unlock(&ksocknal_data.ksnd_global_lock);
- cfs_pause(cfs_time_seconds(1));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
read_lock(&ksocknal_data.ksnd_global_lock);
- }
+ }
read_unlock(&ksocknal_data.ksnd_global_lock);
ksocknal_free_buffers();
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
"waiting for %d peers to disconnect\n",
net->ksnn_npeers);
- cfs_pause(cfs_time_seconds(1));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
ksocknal_debug_peerhash(ni);
MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
MODULE_DESCRIPTION("Kernel TCP Socket LND v3.0.0");
+MODULE_VERSION("3.0.0");
MODULE_LICENSE("GPL");
-cfs_module(ksocknal, "3.0.0", ksocknal_module_init, ksocknal_module_fini);
+module_init(ksocknal_module_init);
+module_exit(ksocknal_module_fini);
#include <linux/if.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/kthread.h>
#include <linux/kmod.h>
#include <linux/list.h>
#include <linux/mm.h>
}
static int
-ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_transmit(ksock_conn_t *conn, ksock_tx_t *tx)
{
- int rc;
- int bufnob;
+ int rc;
+ int bufnob;
- if (ksocknal_data.ksnd_stall_tx != 0) {
- cfs_pause(cfs_time_seconds(ksocknal_data.ksnd_stall_tx));
- }
+ if (ksocknal_data.ksnd_stall_tx != 0) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_tx));
+ }
- LASSERT (tx->tx_resid != 0);
+ LASSERT(tx->tx_resid != 0);
rc = ksocknal_connsock_addref(conn);
if (rc != 0) {
int rc;
ENTRY;
- if (ksocknal_data.ksnd_stall_rx != 0) {
- cfs_pause(cfs_time_seconds (ksocknal_data.ksnd_stall_rx));
- }
+ if (ksocknal_data.ksnd_stall_rx != 0) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_rx));
+ }
rc = ksocknal_connsock_addref(conn);
if (rc != 0) {
int nloops = 0;
int cons_retry = 0;
- cfs_block_allsigs ();
+ cfs_block_allsigs();
- init_waitqueue_entry_current(&wait);
+ init_waitqueue_entry(&wait, current);
spin_lock_bh(connd_lock);
spin_unlock_bh(connd_lock);
nloops = 0;
- waitq_timedwait(&wait, TASK_INTERRUPTIBLE, timeout);
+ schedule_timeout(timeout);
set_current_state(TASK_RUNNING);
remove_wait_queue(&ksocknal_data.ksnd_connd_waitq, &wait);
cfs_block_allsigs ();
INIT_LIST_HEAD(&enomem_conns);
- init_waitqueue_entry_current(&wait);
+ init_waitqueue_entry(&wait, current);
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
if (!ksocknal_data.ksnd_shuttingdown &&
list_empty(&ksocknal_data.ksnd_deathrow_conns) &&
list_empty(&ksocknal_data.ksnd_zombie_conns))
- waitq_timedwait(&wait, TASK_INTERRUPTIBLE, timeout);
+ schedule_timeout(timeout);
set_current_state(TASK_RUNNING);
remove_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
while (!lnet_acceptor_state.pta_shutdown) {
rc = lnet_sock_accept(&newsock, lnet_acceptor_state.pta_sock);
- if (rc != 0) {
- if (rc != -EAGAIN) {
- CWARN("Accept error %d: pausing...\n", rc);
- cfs_pause(cfs_time_seconds(1));
- }
- continue;
- }
+ if (rc != 0) {
+ if (rc != -EAGAIN) {
+ CWARN("Accept error %d: pausing...\n", rc);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
+ }
+ continue;
+ }
/* maybe we're waken up with lnet_sock_abort_accept() */
if (lnet_acceptor_state.pta_shutdown) {
/* NB md could be busy; this just starts the unlink */
while (pinfo->pi_features != LNET_PING_FEAT_INVAL) {
CDEBUG(D_NET, "Still waiting for ping MD to unlink\n");
- cfs_pause(cfs_time_seconds(1));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
}
cfs_restore_sigs(blocked);
"Waiting for zombie LNI %s\n",
libcfs_nid2str(ni->ni_nid));
}
- cfs_pause(cfs_time_seconds(1));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
lnet_net_lock(LNET_LOCK_EX);
continue;
}
{
int tms = *timeout_ms;
int wait;
- wait_queue_t wl;
+ wait_queue_t wl;
cfs_time_t now;
if (tms == 0)
return -1; /* don't want to wait and no new event */
- init_waitqueue_entry_current(&wl);
+ init_waitqueue_entry(&wl, current);
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&the_lnet.ln_eq_waitq, &wl);
lnet_eq_wait_unlock();
if (tms < 0) {
- waitq_wait(&wl, TASK_INTERRUPTIBLE);
-
+ schedule();
} else {
struct timeval tv;
now = cfs_time_current();
- waitq_timedwait(&wl, TASK_INTERRUPTIBLE,
- cfs_time_seconds(tms) / 1000);
+ schedule_timeout(cfs_time_seconds(tms) / 1000);
cfs_duration_usec(cfs_time_sub(cfs_time_current(), now), &tv);
tms -= (int)(tv.tv_sec * 1000 + tv.tv_usec / 1000);
if (tms < 0) /* no more wait but may have new event */
}
MODULE_AUTHOR("Peter J. Braam <braam@clusterfs.com>");
-MODULE_DESCRIPTION("Portals v3.1");
+MODULE_DESCRIPTION("LNet v3.1");
+MODULE_VERSION("1.0.0");
MODULE_LICENSE("GPL");
-cfs_module(lnet, "1.0.0", lnet_module_init, lnet_module_exit);
+module_init(lnet_module_init);
+module_exit(lnet_module_exit);
"Waiting for %d zombies on peer table\n",
ptable->pt_zombies);
}
- cfs_pause(cfs_time_seconds(1) >> 1);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1) >> 1);
lnet_net_lock(cpt_locked);
}
}
if (all_known)
return;
- cfs_pause(cfs_time_seconds(1));
- }
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
+ }
}
void
i++;
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
"Waiting for rc buffers to unlink\n");
- cfs_pause(cfs_time_seconds(1) / 4);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1) / 4);
lnet_net_lock(LNET_LOCK_EX);
}
lnet_prune_rc_data(0); /* don't wait for UNLINK */
- /* Call cfs_pause() here always adds 1 to load average
+ /* Call schedule_timeout() here always adds 1 to load average
* because kernel counts # active tasks as nr_running
* + nr_uninterruptible. */
/* if there are any routes then wakeup every second. If
CWARN("Session is shutting down, "
"waiting for termination of transactions\n");
- cfs_pause(cfs_time_seconds(1));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
mutex_lock(&console_session.ses_mutex);
}
MODULE_DESCRIPTION("LNet Selftest");
+MODULE_VERSION("0.9.0");
MODULE_LICENSE("GPL");
-cfs_module(lnet, "0.9.0", lnet_selftest_init, lnet_selftest_fini);
-
+module_init(lnet_selftest_init);
+module_exit(lnet_selftest_fini);
memset(&srpc_data, 0, sizeof(struct smoketest_rpc));
spin_lock_init(&srpc_data.rpc_glock);
- /* 1 second pause to avoid timestamp reuse */
- cfs_pause(cfs_time_seconds(1));
- srpc_data.rpc_matchbits = ((__u64) cfs_time_current_sec()) << 48;
+ /* 1 second pause to avoid timestamp reuse */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
+ srpc_data.rpc_matchbits = ((__u64) cfs_time_current_sec()) << 48;
srpc_data.rpc_state = SRPC_STATE_NONE;
#undef STATE2STR
}
-#define selftest_wait_events() cfs_pause(cfs_time_seconds(1) / 10)
-
#define lst_wait_until(cond, lock, fmt, ...) \
do { \
int __I = 2; \
fmt, ## __VA_ARGS__); \
spin_unlock(&(lock)); \
\
- selftest_wait_events(); \
+ set_current_state(TASK_UNINTERRUPTIBLE); \
+ schedule_timeout(cfs_time_seconds(1) / 10); \
\
spin_lock(&(lock)); \
} \
LASSERT(sv->sv_shuttingdown);
- while (srpc_finish_service(sv) == 0) {
- i++;
- CDEBUG (((i & -i) == i) ? D_WARNING : D_NET,
- "Waiting for %s service to shutdown...\n",
- sv->sv_name);
- selftest_wait_events();
- }
+ while (srpc_finish_service(sv) == 0) {
+ i++;
+ CDEBUG(((i & -i) == i) ? D_WARNING : D_NET,
+ "Waiting for %s service to shutdown...\n",
+ sv->sv_name);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1) / 10);
+ }
}
extern sfw_test_client_ops_t ping_test_client;
]) # LC_QUOTA64
#
+# LC_HAVE_ADD_WAIT_QUEUE_EXCLUSIVE
+#
+# 2.6.34 adds __add_wait_queue_exclusive
+#
+AC_DEFUN([LC_HAVE_ADD_WAIT_QUEUE_EXCLUSIVE], [
+LB_CHECK_COMPILE([if '__add_wait_queue_exclusive' exists],
+__add_wait_queue_exclusive, [
+ #include <linux/wait.h>
+],[
+ wait_queue_head_t queue;
+ wait_queue_t wait;
+ __add_wait_queue_exclusive(&queue, &wait);
+],[
+ AC_DEFINE(HAVE___ADD_WAIT_QUEUE_EXCLUSIVE, 1,
+ [__add_wait_queue_exclusive exists])
+])
+]) # LC_HAVE_ADD_WAIT_QUEUE_EXCLUSIVE
+
+#
# LC_FS_STRUCT_RWLOCK
#
# 2.6.36 fs_struct.lock use spinlock instead of rwlock.
# 2.6.34
LC_HAVE_DQUOT_FS_DISK_QUOTA
LC_HAVE_DQUOT_SUSPEND
+ LC_HAVE_ADD_WAIT_QUEUE_EXCLUSIVE
# 2.6.35, 3.0.0
LC_FILE_FSYNC
set_current_state(TASK_UNINTERRUPTIBLE);
mutex_unlock(&seq->lcs_mutex);
- waitq_wait(link, TASK_UNINTERRUPTIBLE);
+ schedule();
mutex_lock(&seq->lcs_mutex);
remove_wait_queue(&seq->lcs_waitq, link);
LASSERT(seqnr != NULL);
mutex_lock(&seq->lcs_mutex);
- init_waitqueue_entry_current(&link);
+ init_waitqueue_entry(&link, current);
while (1) {
rc = seq_fid_alloc_prep(seq, &link);
LASSERT(seq != NULL);
LASSERT(fid != NULL);
- init_waitqueue_entry_current(&link);
+ init_waitqueue_entry(&link, current);
mutex_lock(&seq->lcs_mutex);
if (OBD_FAIL_CHECK(OBD_FAIL_SEQ_EXHAUST))
wait_queue_t link;
LASSERT(seq != NULL);
- init_waitqueue_entry_current(&link);
+ init_waitqueue_entry(&link, current);
mutex_lock(&seq->lcs_mutex);
while (seq->lcs_update) {
set_current_state(TASK_UNINTERRUPTIBLE);
mutex_unlock(&seq->lcs_mutex);
- waitq_wait(&link, TASK_UNINTERRUPTIBLE);
+ schedule();
mutex_lock(&seq->lcs_mutex);
remove_wait_queue(&seq->lcs_waitq, &link);
MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
MODULE_DESCRIPTION("Lustre FID Module");
+MODULE_VERSION(LUSTRE_VERSION_STRING);
MODULE_LICENSE("GPL");
-cfs_module(fid, "0.1.0", fid_mod_init, fid_mod_exit);
+module_init(fid_mod_init);
+module_exit(fid_mod_exit);
MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
MODULE_DESCRIPTION("Lustre FLD");
+MODULE_VERSION(LUSTRE_VERSION_STRING);
MODULE_LICENSE("GPL");
-cfs_module(mdd, LUSTRE_VERSION_STRING, fld_mod_init, fld_mod_exit);
+module_init(fld_mod_init);
+module_exit(fld_mod_exit);
# include <obd_support.h>
#else /* !__KERNEL__ */
# include <malloc.h>
+# include <stdlib.h>
#endif /* __KERNEL__ */
/** \defgroup cfg cfg
* of data. Try to use the padding first though.
*/
if (s[lcfg->lcfg_buflens[index] - 1] != '\0') {
- size_t last = min((size_t)lcfg->lcfg_buflens[index],
- cfs_size_round(lcfg->lcfg_buflens[index]) - 1);
- char lost = s[last];
+ size_t last = cfs_size_round(lcfg->lcfg_buflens[index]) - 1;
+ char lost;
+
+ /* Use the smaller value */
+ if (last > lcfg->lcfg_buflens[index])
+ last = lcfg->lcfg_buflens[index];
+
+ lost = s[last];
s[last] = '\0';
if (lost != '\0') {
CWARN("Truncated buf %d to '%s' (lost '%c'...)\n",
sigmask(SIGQUIT) | sigmask(SIGALRM))
/*
+ * Wait Queue
+ */
+#ifndef HAVE___ADD_WAIT_QUEUE_EXCLUSIVE
+static inline void __add_wait_queue_exclusive(wait_queue_head_t *q,
+ wait_queue_t *wait)
+{
+ wait->flags |= WQ_FLAG_EXCLUSIVE;
+ __add_wait_queue(q, wait);
+}
+#endif /* HAVE___ADD_WAIT_QUEUE_EXCLUSIVE */
+
+/**
+ * wait_queue_t of Linux (version < 2.6.34) is a FIFO list for exclusively
+ * waiting threads, which is not always desirable because all threads will
+ * be waken up again and again, even user only needs a few of them to be
+ * active most time. This is not good for performance because cache can
+ * be polluted by different threads.
+ *
+ * LIFO list can resolve this problem because we always wakeup the most
+ * recent active thread by default.
+ *
+ * NB: please don't call non-exclusive & exclusive wait on the same
+ * waitq if add_wait_queue_exclusive_head is used.
+ */
+#define add_wait_queue_exclusive_head(waitq, link) \
+{ \
+ unsigned long flags; \
+ \
+ spin_lock_irqsave(&((waitq)->lock), flags); \
+ __add_wait_queue_exclusive(waitq, link); \
+ spin_unlock_irqrestore(&((waitq)->lock), flags); \
+}
+
+/*
* wait for @condition to become true, but no longer than timeout, specified
* by @info.
*/
if (condition) \
break; \
\
- init_waitqueue_entry_current(&__wait); \
+ init_waitqueue_entry(&__wait, current); \
l_add_wait(&wq, &__wait); \
\
/* Block all signals (just the non-fatal ones if no timeout). */ \
__blocked = cfs_block_sigsinv(0); \
\
for (;;) { \
- unsigned __wstate; \
- \
- __wstate = info->lwi_on_signal != NULL && \
- (__timeout == 0 || __allow_intr) ? \
- TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; \
- \
set_current_state(TASK_INTERRUPTIBLE); \
\
if (condition) \
break; \
\
if (__timeout == 0) { \
- waitq_wait(&__wait, __wstate); \
+ schedule(); \
} else { \
cfs_duration_t interval = info->lwi_interval? \
min_t(cfs_duration_t, \
info->lwi_interval,__timeout):\
__timeout; \
- cfs_duration_t remaining = waitq_timedwait(&__wait, \
- __wstate, \
- interval); \
+ cfs_duration_t remaining = schedule_timeout(interval); \
__timeout = cfs_time_sub(__timeout, \
cfs_time_sub(interval, remaining));\
if (__timeout == 0) { \
\
if (condition) \
break; \
- if (cfs_signal_pending()) { \
+ if (signal_pending(current)) { \
if (info->lwi_on_signal != NULL && \
(__timeout == 0 || __allow_intr)) { \
if (info->lwi_on_signal != LWI_ON_SIGNAL_NOOP) \
\
cfs_restore_sigs(__blocked); \
\
- set_current_state(TASK_RUNNING); \
+ set_current_state(TASK_RUNNING); \
remove_wait_queue(&wq, &__wait); \
} while (0)
-
#define l_wait_event(wq, condition, info) \
({ \
int __ret; \
#define DEBUG_SUBSYSTEM S_LDLM
+#include <linux/kthread.h>
#include <libcfs/libcfs.h>
#include <obd.h>
#include <obd_class.h>
#define DEBUG_SUBSYSTEM S_LDLM
+#include <linux/kthread.h>
#include <libcfs/libcfs.h>
#include <lustre_dlm.h>
#include <obd_class.h>
if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE)) {
int to = cfs_time_seconds(1);
while (to > 0) {
- schedule_timeout_and_set_state(
- TASK_INTERRUPTIBLE, to);
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(to);
if (lock->l_granted_mode == lock->l_req_mode ||
ldlm_is_destroyed(lock))
break;
#define DEBUG_SUBSYSTEM S_LDLM
+#include <linux/kthread.h>
#include <lustre_dlm.h>
#include <cl_object.h>
#include <obd_class.h>
unlock_res(res);
LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY");
if (lock->l_flags & LDLM_FL_FAIL_LOC) {
- schedule_timeout_and_set_state(
- TASK_UNINTERRUPTIBLE,
- cfs_time_seconds(4));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(4));
set_current_state(TASK_RUNNING);
}
if (lock->l_completion_ast)
#define DEBUG_SUBSYSTEM S_LFSCK
+#include <linux/kthread.h>
#include <libcfs/list.h>
#include <lu_object.h>
#include <dt_object.h>
MODULE_AUTHOR("Intel Corporation <http://www.intel.com/>");
MODULE_DESCRIPTION("LFSCK");
+MODULE_VERSION(LUSTRE_VERSION_STRING);
MODULE_LICENSE("GPL");
-cfs_module(lfsck, LUSTRE_VERSION_STRING, lfsck_init, lfsck_exit);
+module_init(lfsck_init);
+module_exit(lfsck_exit);
#include <linux/pagemap.h>
#include <linux/mm.h>
#include <linux/version.h>
+#include <linux/user_namespace.h>
+#ifdef HAVE_UIDGID_HEADER
+# include <linux/uidgid.h>
+#endif
#include <asm/uaccess.h>
#include <linux/buffer_head.h> // for wait_on_buffer
#include <linux/pagevec.h>
#include <linux/pagemap.h>
#include <linux/file.h>
#include <linux/sched.h>
+#include <linux/user_namespace.h>
+#ifdef HAVE_UIDGID_HEADER
+# include <linux/uidgid.h>
+#endif
#include <lustre/ll_fiemap.h>
#include <lustre_ioctl.h>
bkt = lu_site_bkt_from_fid(site, &header->loh_fid);
- init_waitqueue_entry_current(&waiter);
+ init_waitqueue_entry(&waiter, current);
add_wait_queue(&bkt->lsb_marche_funebre, &waiter);
while (1) {
set_current_state(TASK_UNINTERRUPTIBLE);
if (atomic_read(&header->loh_ref) == 1)
break;
- waitq_wait(&waiter, TASK_UNINTERRUPTIBLE);
+ schedule();
}
set_current_state(TASK_RUNNING);
#include <linux/fs.h>
#include <linux/version.h>
+#include <linux/kthread.h>
#include <asm/uaccess.h>
#include <linux/file.h>
#include <linux/kmod.h>
#include <linux/types.h>
#include <linux/version.h>
#include <linux/mm.h>
+#include <linux/user_namespace.h>
+#ifdef HAVE_UIDGID_HEADER
+# include <linux/uidgid.h>
+#endif
#include <lustre_ioctl.h>
#include <lustre_ha.h>
sbi->ll_umounting = 1;
/* wait running statahead threads to quit */
- while (atomic_read(&sbi->ll_sa_running) > 0)
- schedule_timeout_and_set_state(TASK_UNINTERRUPTIBLE,
- msecs_to_jiffies(MSEC_PER_SEC >> 3));
+ while (atomic_read(&sbi->ll_sa_running) > 0) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(msecs_to_jiffies(MSEC_PER_SEC >> 3));
+ }
}
EXIT;
#include <linux/module.h>
#include <linux/sched.h>
+#include <linux/kthread.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/stat.h>
#define DEBUG_SUBSYSTEM S_LLITE
#include <linux/version.h>
+#include <linux/user_namespace.h>
+#ifdef HAVE_UIDGID_HEADER
+# include <linux/uidgid.h>
+#endif
#include <lustre_param.h>
#include <lprocfs_status.h>
#include <obd_support.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/security.h>
+#include <linux/user_namespace.h>
+#ifdef HAVE_UIDGID_HEADER
+# include <linux/uidgid.h>
+#endif
#define DEBUG_SUBSYSTEM S_LLITE
#include <linux/module.h>
#include <linux/types.h>
#include <linux/version.h>
+#include <linux/user_namespace.h>
+#ifdef HAVE_UIDGID_HEADER
+# include <linux/uidgid.h>
+#endif
#include <lustre_ha.h>
#include <lustre_dlm.h>
#include <linux/fs.h>
#include <linux/sched.h>
+#include <linux/kthread.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#define DEBUG_SUBSYSTEM S_LLITE
-
+#include <linux/user_namespace.h>
+#ifdef HAVE_UIDGID_HEADER
+# include <linux/uidgid.h>
+#endif
#include <libcfs/libcfs.h>
#include <obd.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/user_namespace.h>
+#ifdef HAVE_UIDGID_HEADER
+# include <linux/uidgid.h>
+#endif
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/mm.h>
MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
MODULE_DESCRIPTION("Lustre Logical Object Volume OBD driver");
+MODULE_VERSION(LUSTRE_VERSION_STRING);
MODULE_LICENSE("GPL");
-cfs_module(lov, LUSTRE_VERSION_STRING, lov_init, lov_exit);
+module_init(lov_init);
+module_exit(lov_exit);
* ->lo_sub[] slot in lovsub_object_fini() */
if (r0->lo_sub[idx] == los) {
waiter = &lov_env_info(env)->lti_waiter;
- init_waitqueue_entry_current(waiter);
+ init_waitqueue_entry(waiter, current);
add_wait_queue(&bkt->lsb_marche_funebre, waiter);
set_current_state(TASK_UNINTERRUPTIBLE);
while (1) {
spin_lock(&r0->lo_sub_lock);
if (r0->lo_sub[idx] == los) {
spin_unlock(&r0->lo_sub_lock);
- waitq_wait(waiter, TASK_UNINTERRUPTIBLE);
+ schedule();
} else {
spin_unlock(&r0->lo_sub_lock);
set_current_state(TASK_RUNNING);
*/
#define DEBUG_SUBSYSTEM S_MDC
+#include <linux/user_namespace.h>
+#ifdef HAVE_UIDGID_HEADER
+# include <linux/uidgid.h>
+#endif
#include <lustre_net.h>
#include <lustre/lustre_idl.h>
#include <obd_class.h>
#include <linux/miscdevice.h>
#include <linux/init.h>
#include <linux/utsname.h>
+#include <linux/kthread.h>
+#include <linux/user_namespace.h>
+#ifdef HAVE_UIDGID_HEADER
+# include <linux/uidgid.h>
+#endif
#include <lustre_acl.h>
#include <lustre_ioctl.h>
#define DEBUG_SUBSYSTEM S_MDS
#include <linux/module.h>
+#include <linux/kthread.h>
#include <obd_class.h>
#include <lustre_ioctl.h>
#include <lustre_mds.h>
MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
MODULE_DESCRIPTION("Lustre Meta-data Device Prototype ("LUSTRE_MDD_NAME")");
+MODULE_VERSION(LUSTRE_VERSION_STRING);
MODULE_LICENSE("GPL");
-cfs_module(mdd, "0.1.0", mdd_mod_init, mdd_mod_exit);
+module_init(mdd_mod_init);
+module_exit(mdd_mod_exit);
#define DEBUG_SUBSYSTEM S_MDS
+#include <linux/kthread.h>
#include <obd_support.h>
#include <lustre_net.h>
#include <lustre_export.h>
if (OBD_FAIL_CHECK(OBD_FAIL_TGT_DELAY_CONDITIONAL) &&
cfs_fail_val ==
- tsi2mdt_info(tsi)->mti_mdt->mdt_seq_site.ss_node_id)
- schedule_timeout_and_set_state(TASK_UNINTERRUPTIBLE,
- msecs_to_jiffies(3 * MSEC_PER_SEC));
+ tsi2mdt_info(tsi)->mti_mdt->mdt_seq_site.ss_node_id) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(msecs_to_jiffies(3 * MSEC_PER_SEC));
+ }
rc = tgt_connect(tsi);
if (rc != 0)
MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
MODULE_DESCRIPTION("Lustre Metadata Target ("LUSTRE_MDT_NAME")");
+MODULE_VERSION(LUSTRE_VERSION_STRING);
MODULE_LICENSE("GPL");
-cfs_module(mdt, LUSTRE_VERSION_STRING, mdt_mod_init, mdt_mod_exit);
+module_init(mdt_mod_init);
+module_exit(mdt_mod_exit);
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/kmod.h>
+#include <linux/user_namespace.h>
+#ifdef HAVE_UIDGID_HEADER
+# include <linux/uidgid.h>
+#endif
#include <linux/string.h>
#include <linux/stat.h>
#include <linux/errno.h>
#define DEBUG_SUBSYSTEM S_MDS
+#include <linux/user_namespace.h>
+#ifdef HAVE_UIDGID_HEADER
+# include <linux/uidgid.h>
+#endif
#include "mdt_internal.h"
#include <lnet/nidstr.h>
#include <lustre_nodemap.h>
#define D_MGC D_CONFIG /*|D_WARNING*/
#include <linux/module.h>
+#include <linux/kthread.h>
#include <obd_class.h>
#include <lustre_dlm.h>
#include <lprocfs_status.h>
#define DEBUG_SUBSYSTEM S_MGS
#define D_MGS D_CONFIG
+#include <linux/kthread.h>
#include <linux/pagemap.h>
#include <obd.h>
*/
#define DEBUG_SUBSYSTEM S_CLASS
-#include <asm/atomic.h>
+
+#include <linux/user_namespace.h>
+#ifdef HAVE_UIDGID_HEADER
+# include <linux/uidgid.h>
+#endif
+#include <linux/atomic.h>
#include <obd_support.h>
#include <obd_class.h>
#include <lustre_ioctl.h>
#include "llog_internal.h"
-
struct obd_device *obd_devs[MAX_OBD_DEVICES];
struct list_head obd_types;
DEFINE_RWLOCK(obd_dev_lock);
MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
MODULE_DESCRIPTION("Lustre Class Driver Build Version: " BUILD_VERSION);
+MODULE_VERSION(LUSTRE_VERSION_STRING);
MODULE_LICENSE("GPL");
-cfs_module(obdclass, LUSTRE_VERSION_STRING, init_obdclass, cleanup_obdclass);
+module_init(init_obdclass);
+module_exit(cleanup_obdclass);
*/
#define DEBUG_SUBSYSTEM S_CLASS
+
+#include <linux/kthread.h>
#include <obd_class.h>
#include <lprocfs_status.h>
#include <lustre_kernelcomm.h>
spin_lock(&obd->obd_dev_lock);
while (!list_empty(&obd->obd_unlinked_exports)) {
spin_unlock(&obd->obd_dev_lock);
- schedule_timeout_and_set_state(TASK_UNINTERRUPTIBLE,
- cfs_time_seconds(waited));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(waited));
if (waited > 5 && IS_PO2(waited)) {
LCONSOLE_WARN("%s is waiting for obd_unlinked_exports "
"more than %d seconds. "
#define DEBUG_SUBSYSTEM S_SEC
+#include <linux/user_namespace.h>
+#ifdef HAVE_UIDGID_HEADER
+# include <linux/uidgid.h>
+#endif
#include <lustre_idmap.h>
#include <upcall_cache.h>
#include <md_object.h>
#include <obd_support.h>
-#define lustre_get_group_info(group_info) do { \
- atomic_inc(&(group_info)->usage); \
+#define lustre_get_group_info(group_info) do { \
+ atomic_inc(&(group_info)->usage); \
} while (0)
-#define lustre_put_group_info(group_info) do { \
- if (atomic_dec_and_test(&(group_info)->usage)) \
- groups_free(group_info); \
+#define lustre_put_group_info(group_info) do { \
+ if (atomic_dec_and_test(&(group_info)->usage)) \
+ groups_free(group_info); \
} while (0)
/*
#define DEBUG_SUBSYSTEM S_LOG
-
+#include <linux/kthread.h>
#include <obd_class.h>
#include <lustre_log.h>
#include "llog_internal.h"
*/
if (likely(waiter != NULL)) {
- init_waitqueue_entry_current(waiter);
+ init_waitqueue_entry(waiter, current);
add_wait_queue(&bkt->lsb_marche_funebre, waiter);
set_current_state(TASK_UNINTERRUPTIBLE);
lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_DEATH_RACE);
* lu_object_find_try() already added waiter into the
* wait queue.
*/
- waitq_wait(&wait, TASK_UNINTERRUPTIBLE);
+ schedule();
bkt = lu_site_bkt_from_fid(dev->ld_site, (void *)f);
remove_wait_queue(&bkt->lsb_marche_funebre, &wait);
}
#define DEBUG_SUBSYSTEM S_CLASS
+#include <linux/user_namespace.h>
+#ifdef HAVE_UIDGID_HEADER
+# include <linux/uidgid.h>
+#endif
#include <obd_class.h>
#include <lustre/lustre_idl.h>
MAX_SCHEDULE_TIMEOUT;
long left;
- init_waitqueue_entry_current(&wait);
+ init_waitqueue_entry(&wait, current);
add_wait_queue(&entry->ue_waitq, &wait);
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock(&cache->uc_lock);
- left = waitq_timedwait(&wait, TASK_INTERRUPTIBLE,
- expiry);
+ left = schedule_timeout(expiry);
spin_lock(&cache->uc_lock);
remove_wait_queue(&entry->ue_waitq, &wait);
/* XXX Bug 3413; wait for a bit to ensure the BL callback has
* happened before calling ldlm_namespace_free() */
- schedule_timeout_and_set_state(TASK_UNINTERRUPTIBLE, cfs_time_seconds(1));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
ldlm_namespace_free(obd->obd_namespace, NULL, obd->obd_force);
obd->obd_namespace = NULL;
*/
#define DEBUG_SUBSYSTEM S_ECHO
+
+#include <linux/user_namespace.h>
+#ifdef HAVE_UIDGID_HEADER
+# include <linux/uidgid.h>
+#endif
#include <libcfs/libcfs.h>
#include <obd.h>
spin_unlock(&ec->ec_lock);
CERROR("echo_client still has objects at cleanup time, "
"wait for 1 second\n");
- schedule_timeout_and_set_state(TASK_UNINTERRUPTIBLE,
- cfs_time_seconds(1));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
lu_site_purge(env, &ed->ed_site->cs_lu, -1);
spin_lock(&ec->ec_lock);
}
MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
MODULE_DESCRIPTION("Lustre Testing Echo OBD driver");
+MODULE_VERSION(LUSTRE_VERSION_STRING);
MODULE_LICENSE("GPL");
-cfs_module(obdecho, LUSTRE_VERSION_STRING, obdecho_init, obdecho_exit);
+module_init(obdecho_init);
+module_exit(obdecho_exit);
/** @} echo_client */
#define DEBUG_SUBSYSTEM S_FILTER
+#include <linux/kthread.h>
#include "ofd_internal.h"
struct ofd_inconsistency_item {
* at any time.
*/
-static CFS_DECL_WAITQ(osc_lru_waitq);
+static DECLARE_WAIT_QUEUE_HEAD(osc_lru_waitq);
/* LRU pages are freed in batch mode. OSC should at least free this
* number of pages to avoid running out of LRU budget, and.. */
static const int lru_shrink_min = 2 << (20 - PAGE_CACHE_SHIFT); /* 2M */
MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
MODULE_DESCRIPTION("Lustre Object Storage Client (OSC)");
+MODULE_VERSION(LUSTRE_VERSION_STRING);
MODULE_LICENSE("GPL");
-cfs_module(osc, LUSTRE_VERSION_STRING, osc_init, osc_exit);
+module_init(osc_init);
+module_exit(osc_exit);
#define DEBUG_SUBSYSTEM S_OSD
#include <linux/module.h>
+#include <linux/user_namespace.h>
+#ifdef HAVE_UIDGID_HEADER
+# include <linux/uidgid.h>
+#endif
/* LUSTRE_VERSION_CODE */
#include <lustre_ver.h>
MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
MODULE_DESCRIPTION("Lustre Object Storage Device ("LUSTRE_OSD_LDISKFS_NAME")");
+MODULE_VERSION(LUSTRE_VERSION_STRING);
MODULE_LICENSE("GPL");
-cfs_module(osd, "0.1.0", osd_mod_init, osd_mod_exit);
+module_init(osd_mod_init);
+module_exit(osd_mod_exit);
#define DEBUG_SUBSYSTEM S_LFSCK
+#include <linux/kthread.h>
#include <lustre/lustre_idl.h>
#include <lustre_disk.h>
#include <dt_object.h>
MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
MODULE_DESCRIPTION("Lustre Object Storage Device ("LUSTRE_OSD_ZFS_NAME")");
+MODULE_VERSION(LUSTRE_VERSION_STRING);
MODULE_LICENSE("GPL");
-cfs_module(osd, LUSTRE_VERSION_STRING, osd_init, osd_exit);
+module_init(osd_init);
+module_exit(osd_exit);
MODULE_AUTHOR("Intel, Inc. <http://www.intel.com/>");
MODULE_DESCRIPTION("Lustre OST Proxy Device ("LUSTRE_OSP_NAME")");
+MODULE_VERSION(LUSTRE_VERSION_STRING);
MODULE_LICENSE("GPL");
-cfs_module(osp, LUSTRE_VERSION_STRING, osp_mod_init, osp_mod_exit);
+module_init(osp_mod_init);
+module_exit(osp_mod_exit);
#define DEBUG_SUBSYSTEM S_MDS
+#include <linux/kthread.h>
#include "osp_internal.h"
/*
#define DEBUG_SUBSYSTEM S_MDS
+#include <linux/kthread.h>
#include <lustre_log.h>
#include <lustre_update.h>
#include "osp_internal.h"
CDEBUG(D_RPCTRACE, "set %p going to sleep for %d seconds\n",
set, timeout);
- if (timeout == 0 && !cfs_signal_pending())
+ if (timeout == 0 && !signal_pending(current))
/*
* No requests are in-flight (ether timed out
* or delayed), so we can allow interrupts.
* pending when we started, we need to handle it now or we risk
* it being ignored forever */
if (rc == -ETIMEDOUT && !lwi.lwi_allow_intr &&
- cfs_signal_pending()) {
+ signal_pending(current)) {
sigset_t blocked_sigs =
cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
* like SIGINT or SIGKILL. We still ignore less
* important signals since ptlrpc set is not easily
* reentrant from userspace again */
- if (cfs_signal_pending())
+ if (signal_pending(current))
ptlrpc_interrupted_set(set);
cfs_restore_sigs(blocked_sigs);
}
cache_get(&rsip->h); /* take an extra ref */
init_waitqueue_head(&rsip->waitq);
- init_waitqueue_entry_current(&wait);
+ init_waitqueue_entry(&wait, current);
add_wait_queue(&rsip->waitq, &wait);
cache_check:
#define DEBUG_SUBSYSTEM S_RPC
+#include <linux/kthread.h>
#include <obd_support.h>
#include <lustre_ha.h>
#include <lustre_net.h>
#define DEBUG_SUBSYSTEM S_RPC
+#include <linux/kthread.h>
#include <obd_support.h>
#include <obd_class.h>
#include "ptlrpc_internal.h"
MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
MODULE_DESCRIPTION("Lustre Request Processor and Lock Management");
+MODULE_VERSION(LUSTRE_VERSION_STRING);
MODULE_LICENSE("GPL");
-cfs_module(ptlrpc, "1.0.0", ptlrpc_init, ptlrpc_exit);
+module_init(ptlrpc_init);
+module_exit(ptlrpc_exit);
#define DEBUG_SUBSYSTEM S_RPC
+#include <linux/kthread.h>
#include <libcfs/libcfs.h>
#include <lustre_net.h>
#include <lustre_lib.h>
#define DEBUG_SUBSYSTEM S_SEC
-#include <libcfs/libcfs.h>
+#include <linux/user_namespace.h>
+#ifdef HAVE_UIDGID_HEADER
+# include <linux/uidgid.h>
+#endif
#include <linux/crypto.h>
#include <linux/key.h>
+#include <libcfs/libcfs.h>
#include <obd.h>
#include <obd_class.h>
#include <obd_support.h>
newctx = req->rq_cli_ctx;
LASSERT(newctx);
- if (unlikely(newctx == oldctx &&
+ if (unlikely(newctx == oldctx &&
test_bit(PTLRPC_CTX_DEAD_BIT, &oldctx->cc_flags))) {
/*
* still get the old dead ctx, usually means system too busy
"ctx (%p, fl %lx) doesn't switch, relax a little bit\n",
newctx, newctx->cc_flags);
- schedule_timeout_and_set_state(TASK_INTERRUPTIBLE,
- msecs_to_jiffies(MSEC_PER_SEC));
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(msecs_to_jiffies(MSEC_PER_SEC));
} else {
/*
* it's possible newctx == oldctx if we're switching
page_pools.epp_waitqlen;
set_current_state(TASK_UNINTERRUPTIBLE);
- init_waitqueue_entry_current(&waitlink);
+ init_waitqueue_entry(&waitlink, current);
add_wait_queue(&page_pools.epp_waitq, &waitlink);
spin_unlock(&page_pools.epp_lock);
- waitq_wait(&waitlink, TASK_UNINTERRUPTIBLE);
+ schedule();
remove_wait_queue(&page_pools.epp_waitq, &waitlink);
LASSERT(page_pools.epp_waitqlen > 0);
spin_lock(&page_pools.epp_lock);
#define DEBUG_SUBSYSTEM S_SEC
+#include <linux/kthread.h>
#include <libcfs/libcfs.h>
#include <obd_support.h>
*/
#define DEBUG_SUBSYSTEM S_RPC
+#include <linux/kthread.h>
#include <obd_support.h>
#include <obd_class.h>
#include <lustre_net.h>
"freed:%lu, repeat:%u\n", hash,
d.lid_inuse, d.lid_freed, repeat);
repeat++;
- schedule_timeout_and_set_state(TASK_INTERRUPTIBLE,
- cfs_time_seconds(1));
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
goto retry;
}
EXIT;
MODULE_AUTHOR("Intel Corporation <http://www.intel.com/>");
MODULE_DESCRIPTION("Lustre Quota");
+MODULE_VERSION(LUSTRE_VERSION_STRING);
MODULE_LICENSE("GPL");
-cfs_module(lquota, "2.4.0", init_lquota, exit_lquota);
+module_init(init_lquota);
+module_exit(exit_lquota);
#define DEBUG_SUBSYSTEM S_LQUOTA
+#include <linux/kthread.h>
#include <lustre_dlm.h>
#include <obd_class.h>
CDEBUG(D_QUOTA, "qqi reference count %u, repeat: %d\n",
atomic_read(&qqi->qqi_ref), repeat);
repeat++;
- schedule_timeout_and_set_state(TASK_INTERRUPTIBLE,
- cfs_time_seconds(1));
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
}
/* by now, all qqi users should have gone away */
#define DEBUG_SUBSYSTEM S_LQUOTA
+#include <linux/kthread.h>
#include "qsd_internal.h"
/*
#define DEBUG_SUBSYSTEM S_LQUOTA
+#include <linux/kthread.h>
#include "qsd_internal.h"
extern struct kmem_cache *upd_kmem;
#define DEBUG_SUBSYSTEM S_CLASS
+#include <linux/user_namespace.h>
+#ifdef HAVE_UIDGID_HEADER
+# include <linux/uidgid.h>
+#endif
+
#include <obd.h>
#include <obd_class.h>
#include <obd_cksum.h>
#include <time.h>
#include <sys/time.h>
+#define EXPORT_SYMBOL(s)
+
#include <libcfs/libcfs.h>
#include <../ldlm/interval_tree.c>
#include <string.h>
#include <signal.h>
#include <errno.h>
+#include <libcfs/util/string.h>
+
#include "gssd.h"
#include "err_util.h"
#include "gss_util.h"
#include <keyutils.h>
#include <gssapi/gssapi.h>
+#include <libcfs/util/string.h>
#include "lsupport.h"
#include "lgss_utils.h"
#include "write_bytes.h"
#include <libgen.h>
#include <syslog.h>
+#include <libcfs/util/string.h>
#include <libcfs/libcfs.h>
#include <lnet/nidstr.h>
#include <lustre/lustre_user.h>
# include <sys/quota.h>
#endif
+#include <libcfs/util/string.h>
#include <libcfs/libcfs.h>
#include <libcfs/util/ioctl.h>
#include <libcfs/util/parser.h>
#include <sys/xattr.h>
#include <sys/syscall.h>
#include <sys/types.h>
+#include <libcfs/util/string.h>
#include <lustre/lustre_idl.h>
#include <lustre/lustreapi.h>
struct stat dst_st;
char *buf = NULL;
__u64 write_total = 0;
- __u64 length;
+ __u64 length = hai->hai_extent.length;
time_t last_report_time;
int rc = 0;
double start_ct_now = ct_now();
}
/* Don't read beyond a given extent */
- length = min(hai->hai_extent.length,
- src_st.st_size - hai->hai_extent.offset);
+ if (length > src_st.st_size - hai->hai_extent.offset)
+ length = src_st.st_size - hai->hai_extent.offset;
start_time = last_bw_print = last_report_time = time(NULL);
#include <endian.h>
#endif
+#include <libcfs/util/string.h>
#include <libcfs/libcfs.h>
#include <lustre/libiam.h>
#include <unistd.h>
#endif
+#include <libcfs/util/string.h>
#include <libcfs/libcfs.h>
#include <lustre/lustreapi.h>
#include <glob.h>
#include <libcfs/libcfs.h>
+#include <libcfs/util/string.h>
#include <libcfs/util/parser.h>
#include <lnet/nidstr.h>
#include <lustre_cfg.h>
#include <lustre/lustre_idl.h>
#include <lustre/lustre_build_version.h>
-#include <unistd.h>
#include <sys/un.h>
#include <time.h>
#include <sys/time.h>
#include <utime.h>
#include <sys/xattr.h>
+#include <libcfs/util/string.h>
#include <libcfs/util/parser.h>
#include <lustre/lustreapi.h>
#include <lustre/lustre_idl.h>
struct changelog_rec *rec;
struct changelog_ext_rename *rnm;
size_t namelen;
- size_t copylen;
+ size_t copylen = sizeof(info->name);
if (llapi_changelog_recv(priv, &rec) != 0)
return -1;
snprintf(info->pfid, sizeof(info->pfid), DFID, PFID(&rec->cr_pfid));
namelen = strnlen(changelog_rec_name(rec), rec->cr_namelen);
- copylen = min(sizeof(info->name), namelen + 1);
+ if (copylen > namelen + 1)
+ copylen = namelen + 1;
strlcpy(info->name, changelog_rec_name(rec), copylen);
/* Don't use rnm if CLF_RENAME isn't set */
rnm = changelog_rec_rename(rec);
if (rec->cr_flags & CLF_RENAME && !fid_is_zero(&rnm->cr_sfid)) {
+ copylen = sizeof(info->sname);
+
snprintf(info->sfid, sizeof(info->sfid), DFID,
PFID(&rnm->cr_sfid));
snprintf(info->spfid, sizeof(info->spfid), DFID,
PFID(&rnm->cr_spfid));
namelen = changelog_rec_snamelen(rec);
- copylen = min(sizeof(info->sname), namelen + 1);
+ if (copylen > namelen + 1)
+ copylen = namelen + 1;
strlcpy(info->sname, changelog_rec_sname(rec), copylen);
if (verbose > 1)
mop.mo_retry - i);
}
- if (mop.mo_retry) {
- sleep(1 << max((i/2), 5));
- }
- else {
- rc = errno;
- }
- }
- }
- }
+ if (mop.mo_retry) {
+ int limit = i/2 > 5 ? i/2 : 5;
+
+ sleep(1 << limit);
+ } else {
+ rc = errno;
+ }
+ }
+ }
+ }
if (rc) {
char *cli;
/* This deals with duplicate ldd_mount_types resolving to same OSD layer
* plugin (e.g. ext3/ldiskfs/ldiskfs2 all being ldiskfs) */
- strlcpy(fsname, mt_type(mount_type), sizeof(fsname));
+ strncpy(fsname, mt_type(mount_type), sizeof(fsname));
name = fsname + sizeof("osd-") - 1;
/* change osd- to osd_ */
fid->f_oid = space->jt_id;
fid->f_ver = 0;
- space->jt_id = min(space->jt_id + *count, space->jt_width);
+ space->jt_id = space->jt_id + *count;
+ if (space->jt_id > space->jt_width)
+ space->jt_id = space->jt_width;
*count = space->jt_id - fid->f_oid;
return 0;
if (rc != 0)
goto out;
- memcpy(ret_data, data.ioc_pbuf1, min(data.ioc_plen1, ret_size));
+ if (ret_size > data.ioc_plen1)
+ ret_size = data.ioc_plen1;
+
+ memcpy(ret_data, data.ioc_pbuf1, ret_size);
}
out:
lustre_cfg_free(lcfg);
#include <stdlib.h>
#include <unistd.h>
-#include <libcfs/libcfs.h>
-#include <lustre/lustre_idl.h>
+#include <linux/types.h>
#define __REQ_LAYOUT_USER__ (1)
+#define EXPORT_SYMBOL(s)
+
+#include <libcfs/libcfs.h>
+#include <lustre/lustre_idl.h>
+#include <lustre_req_layout.h>
#define lustre_swab_generic_32s NULL
#define lustre_swab_lu_seq_range NULL