#define MODULE_DESCRIPTION(name) MODULE_AUTHOR(name)
#define MODULE_LICENSE(name) MODULE_AUTHOR(name)
-#define THIS_MODULE NULL
+#define THIS_MODULE (void *)0x11111
#define __init
#define __exit
static inline struct shrinker *set_shrinker(int seeks, shrinker_t shrinkert)
{
- return NULL;
+ return (struct shrinker *)0xdeadbea1; // Cannot return NULL here
}
static inline void remove_shrinker(struct shrinker *shrinker)
***************************************************************************/
struct radix_tree_root {
- struct list_head *rnode;
+ struct list_head list;
+ void *rnode;
};
+struct radix_tree_node {
+ struct list_head _node;
+ unsigned long index;
+ void *item;
+};
+
#define RADIX_TREE_INIT(mask) { \
- .rnode = NULL, \
+ NOT_IMPLEMENTED \
}
#define RADIX_TREE(name, mask) \
struct radix_tree_root name = RADIX_TREE_INIT(mask)
-#define INIT_RADIX_TREE(root, mask) \
-do { \
- (root)->rnode = NULL; \
+
+#define INIT_RADIX_TREE(root, mask) \
+do { \
+ CFS_INIT_LIST_HEAD(&((struct radix_tree_root *)root)->list); \
+ ((struct radix_tree_root *)root)->rnode = NULL; \
} while (0)
static inline int radix_tree_insert(struct radix_tree_root *root,
- unsigned long idx, struct page *page)
+ unsigned long idx, void *item)
{
- if (root->rnode == NULL)
- root->rnode = &page->_node;
- else
- list_add_tail(&page->_node, root->rnode);
+ struct radix_tree_node *node;
+ node = malloc(sizeof(*node));
+ if (!node)
+ return -ENOMEM;
+
+ CFS_INIT_LIST_HEAD(&node->_node);
+ node->index = idx;
+ node->item = item;
+ list_add_tail(&node->_node, &root->list);
+ root->rnode = (void *)1001;
return 0;
}
-static inline void *radix_tree_lookup(struct radix_tree_root *root,
+static inline struct radix_tree_node *radix_tree_lookup0(struct radix_tree_root *root,
unsigned long idx)
{
- struct page *p;
+ struct radix_tree_node *node;
- if (root->rnode == NULL)
+ if (list_empty(&root->list))
return NULL;
- p = list_entry(root->rnode, struct page, _node);
- if (p->index == idx)
- return p;
-
- list_for_each_entry(p, root->rnode, _node)
- if (p->index == idx)
- return p;
+ list_for_each_entry(node, &root->list, _node)
+ if (node->index == idx)
+ return node;
return NULL;
}
+static inline void *radix_tree_lookup(struct radix_tree_root *root,
+ unsigned long idx)
+{
+ struct radix_tree_node *node = radix_tree_lookup0(root, idx);
+
+ if (node)
+ return node->item;
+ return node;
+}
+
static inline void *radix_tree_delete(struct radix_tree_root *root,
unsigned long idx)
{
- struct page *p = radix_tree_lookup(root, idx);
+ struct radix_tree_node *p = radix_tree_lookup0(root, idx);
+ void *item;
if (p == NULL)
return NULL;
- if (list_empty(root->rnode))
- root->rnode = NULL;
- else if (root->rnode == &p->_node)
- root->rnode = p->_node.next;
+
list_del_init(&p->_node);
- return p;
+ item = p->item;
+ free(p);
+ if (list_empty(&root->list))
+ root->rnode = NULL;
+
+ return item;
}
static inline unsigned int
unsigned int done;
cfs_waitq_t wait;
};
-
+typedef int (cfs_wait_handler) (int timeout);
+void set_completion_wait_handler(cfs_wait_handler *handler);
void init_completion(struct completion *c);
void complete(struct completion *c);
void wait_for_completion(struct completion *c);
struct semaphore m_sem;
};
+#define DEFINE_MUTEX(m) struct mutex m
+
static inline void mutex_init(struct mutex *mutex)
{
init_mutex(&mutex->m_sem);
/**
* Try-lock this mutex.
*
- * \retval 1 try-lock succeeded.
*
- * \retval 0 try-lock failed.
+ * \retval 0 try-lock succeeded (lock acquired).
+ * \retval errno indicates lock contention.
+ */
+static inline int mutex_down_trylock(struct mutex *mutex)
+{
+ return 0;
+}
+
+/**
+ * Try-lock this mutex.
+ *
+ * Note, return values are negation of what is expected from down_trylock() or
+ * pthread_mutex_trylock().
+ *
+ * \retval 1 try-lock succeeded (lock acquired).
+ * \retval 0 indicates lock contention.
*/
static inline int mutex_trylock(struct mutex *mutex)
{
- return 1;
+ return !mutex_down_trylock(mutex);
}
static inline void mutex_destroy(struct mutex *lock)
EXPORT_SYMBOL(libcfs_arch_init);
EXPORT_SYMBOL(libcfs_arch_cleanup);
+EXPORT_SYMBOL(cfs_enter_debugger);
EXPORT_SYMBOL(cfs_daemonize);
EXPORT_SYMBOL(cfs_daemonize_ctxt);
EXPORT_SYMBOL(cfs_block_allsigs);
} else {
tage = tage_alloc(CFS_ALLOC_ATOMIC);
if (tage == NULL) {
- printk(KERN_WARNING
- "failure to allocate a tage (%ld)\n",
- tcd->tcd_cur_pages);
+ if (printk_ratelimit())
+ printk(KERN_WARNING
+ "cannot allocate a tage (%ld)\n",
+ tcd->tcd_cur_pages);
return NULL;
}
}
if (printk_ratelimit())
printk(KERN_WARNING "debug daemon buffer overflowed; "
- "discarding 10%% of pages (%d of %ld)\n",
+ "discarding 10%% of pages (%d of %ld)\n",
pgcount + 1, tcd->tcd_cur_pages);
CFS_INIT_LIST_HEAD(&pc.pc_pages);
int libcfs_debug_vmsg2(cfs_debug_limit_state_t *cdls, int subsys, int mask,
const char *file, const char *fn, const int line,
const char *format1, va_list args,
- const char *format2, ...)
+ const char *format2, ...)
{
struct trace_cpu_data *tcd = NULL;
struct ptldebug_header header;
{
libcfs_debug_msg(NULL, 0, D_EMERG, file, func, line,
"ASSERTION(%s) failed\n", expr);
+ cfs_enter_debugger();
lbug_with_loc(file, func, line);
}
EXPORT_SYMBOL(libcfs_assertion_failed);
const char *usr_buffer, int usr_buffer_nob)
{
int nob;
-
+
if (usr_buffer_nob > knl_buffer_nob)
return -EOVERFLOW;
-
- if (copy_from_user((void *)knl_buffer,
+
+ if (copy_from_user((void *)knl_buffer,
(void *)usr_buffer, usr_buffer_nob))
return -EFAULT;
* copied out string - usually "\n", for /proc entries and "" (i.e. a
* terminating zero byte) for sysctl entries */
int nob = strlen(knl_buffer);
-
+
if (nob > usr_buffer_nob)
nob = usr_buffer_nob;
-
+
if (copy_to_user(usr_buffer, knl_buffer, nob))
return -EFAULT;
-
+
if (append != NULL && nob < usr_buffer_nob) {
if (copy_to_user(usr_buffer + nob, append, 1))
return -EFAULT;
-
+
nob++;
}
{
if (nob > 2 * CFS_PAGE_SIZE) /* string must be "sensible" */
return -EINVAL;
-
+
*str = cfs_alloc(nob, CFS_ALLOC_STD | CFS_ALLOC_ZERO);
if (*str == NULL)
return -ENOMEM;
int trace_daemon_command(char *str)
{
int rc = 0;
-
+
tracefile_write_lock();
if (strcmp(str, "stop") == 0) {
int pages;
int limit = trace_max_debug_mb();
struct trace_cpu_data *tcd;
-
+
if (mb < num_possible_cpus())
return -EINVAL;
int j;
struct trace_cpu_data *tcd;
int total_pages = 0;
-
+
tracefile_read_lock();
tcd_for_each(tcd, i, j)
* - wait_for_completion(c)
*/
+static cfs_wait_handler *wait_handler;
+
+void init_completion_module(cfs_wait_handler *handler)
+{
+ wait_handler = handler;
+}
+
void init_completion(struct completion *c)
{
LASSERT(c != NULL);
void wait_for_completion(struct completion *c)
{
LASSERT(c != NULL);
+ do {
+ if (wait_handler)
+ wait_handler(1000);
+ else
+ break;
+ } while (c->done == 0);
}
int wait_for_completion_interruptible(struct completion *c)
{
LASSERT(c != NULL);
+ do {
+ if (wait_handler)
+ wait_handler(1000);
+ else
+ break;
+ } while (c->done == 0);
return 0;
}
WEXITSTATUS(rc) != 0)
abort();
- if (strnlen(str, len) == len)
+ if (strlen(str) == len)
str[len - 1] = 0;
if (str[strlen(str) - 1] == '\n')