-/* this copies a snapshot of the debug buffer into an array of pages
- * before doing the potentially blocking copy into userspace. it could
- * be warning userspace if things wrap heavily while its off copying. */
-__s32 portals_debug_copy_to_user(char *buf, unsigned long len)
-{
- int rc;
- unsigned long total, debug_off, i, off, copied;
- unsigned long flags;
- struct page *page;
- LIST_HEAD(my_pages);
- struct list_head *pos, *n;
-
- if (len < debug_size)
- return -ENOSPC;
-
- for (i = 0 ; i < debug_size; i += PAGE_SIZE) {
- page = alloc_page(GFP_NOFS);
- if (page == NULL) {
- rc = -ENOMEM;
- goto cleanup;
- }
- list_add(&page->list, &my_pages);
- }
-
- spin_lock_irqsave(&portals_debug_lock, flags);
- debug_off = atomic_read(&debug_off_a);
-
- /* Sigh. If the buffer is empty, then skip to the end. */
- if (debug_off == 0 && !debug_wrapped) {
- spin_unlock_irqrestore(&portals_debug_lock, flags);
- rc = 0;
- goto cleanup;
- }
-
- if (debug_wrapped) {
- off = debug_off + 1;
- total = debug_size;
- } else {
- off = 0;
- total = debug_off;
- }
- copied = 0;
- list_for_each(pos, &my_pages) {
- unsigned long to_copy;
- page = list_entry(pos, struct page, list);
-
- to_copy = min(total - off, PAGE_SIZE);
- if (to_copy == 0) {
- off = 0;
- to_copy = min(debug_size - off, PAGE_SIZE);
- }
-finish_partial:
- memcpy(kmap(page), debug_buf + off, to_copy);
- kunmap(page);
- copied += to_copy;
- if (copied >= total)
- break;
-
- off += to_copy;
- if (off >= debug_size) {
- off = 0;
- if (to_copy != PAGE_SIZE) {
- to_copy = PAGE_SIZE - to_copy;
- goto finish_partial;
- }
- }
- }
-
- spin_unlock_irqrestore(&portals_debug_lock, flags);
-
- off = 0;
- list_for_each(pos, &my_pages) {
- unsigned long to_copy;
- page = list_entry(pos, struct page, list);
-
- to_copy = min(copied - off, PAGE_SIZE);
- rc = copy_to_user(buf + off, kmap(page), to_copy);
- kunmap(page);
- if (rc) {
- rc = -EFAULT;
- goto cleanup;
- }
- off += to_copy;
- if (off >= copied)
- break;
- }
- rc = copied;
-
-cleanup:
- list_for_each_safe(pos, n, &my_pages) {
- page = list_entry(pos, struct page, list);
- list_del(&page->list);
- __free_page(page);
- }
- return rc;
-}
-
-/* FIXME: I'm not very smart; someone smarter should make this better. */
-void
-portals_debug_msg(int subsys, int mask, char *file, const char *fn,
- const int line, unsigned long stack, char *format, ...)
-{
- va_list ap;
- unsigned long flags;
- int max_nob;
- int prefix_nob;
- int msg_nob;
- struct timeval tv;
- unsigned long base_offset;
- unsigned long debug_off;
-
- if (debug_buf == NULL) {
- printk("LustreError: portals_debug_msg: debug_buf is NULL!\n");
- return;
- }
-
- spin_lock_irqsave(&portals_debug_lock, flags);
- debug_off = atomic_read(&debug_off_a);
- if (!atomic_read(&debug_daemon_state.paused)) {
- unsigned long available;
- long delta;
- long v = atomic_read(&debug_daemon_next_write);
-
- delta = debug_off - v;
- available = (delta>=0) ? debug_size-delta : -delta;
- // Check if we still have enough debug buffer for CDEBUG
- if (available < DAEMON_SND_SIZE) {
- /* Drop CDEBUG packets until enough debug_buffer is
- * available */
- if (debug_daemon_state.overlapped)
- goto out;
- /* If this is the first time, leave a marker in the
- * output */
- debug_daemon_state.overlapped = 1;
- format = "DEBUG MARKER: Debug buffer overlapped\n";
- printk(KERN_ERR "LustreError: debug daemon buffer "
- "overlapped\n");
- } else /* More space just became available */
- debug_daemon_state.overlapped = 0;
- }
-
- max_nob = debug_size - debug_off + DEBUG_OVERFLOW;
- if (max_nob <= 0) {
- spin_unlock_irqrestore(&portals_debug_lock, flags);
- printk("LustreError: logic error in portals_debug_msg: "
- "< 0 bytes to write\n");
- return;
- }
-
- /* NB since we pass a non-zero sized buffer (at least) on the first
- * print, we can be assured that by the end of all the snprinting,
- * we _do_ have a terminated buffer, even if our message got truncated.
- */
-
- do_gettimeofday(&tv);
-
- prefix_nob = snprintf(debug_buf + debug_off, max_nob,
- "%06x:%06x:%d:%lu.%06lu:%lu:%d:",
- subsys, mask, smp_processor_id(),
- tv.tv_sec, tv.tv_usec, stack, current->pid);
- max_nob -= prefix_nob;
- if(*(format + strlen(format) - 1) != '\n')
- *(format + strlen(format)) = '\n';
-
-#if defined(__arch_um__) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,20))
- msg_nob = snprintf(debug_buf + debug_off + prefix_nob, max_nob,
- "%d:(%s:%d:%s()) ",
- current->thread.extern_pid, file, line, fn);
-#elif defined(__arch_um__) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
- msg_nob = snprintf(debug_buf + debug_off + prefix_nob, max_nob,
- "%d:(%s:%d:%s()) ",
- current->thread.mode.tt.extern_pid, file, line, fn);
-#else
- msg_nob = snprintf(debug_buf + debug_off + prefix_nob, max_nob,
- "%d:(%s:%d:%s()) ",
- current->pid, file, line, fn);
-#endif
-
- va_start(ap, format);
- msg_nob += vsnprintf(debug_buf + debug_off + prefix_nob + msg_nob,
- max_nob, format, ap);
- max_nob -= msg_nob;
- va_end(ap);
-
- /* Print to console, while msg is contiguous in debug_buf */
- /* NB safely terminated see above */
- if ((mask & D_EMERG) != 0)
- printk(KERN_EMERG "LustreError: %s",
- debug_buf + debug_off + prefix_nob);
- else if ((mask & D_ERROR) != 0)
- printk(KERN_ERR "LustreError: %s",
- debug_buf + debug_off + prefix_nob);
- else if ((mask & D_WARNING) != 0)
- printk(KERN_WARNING "Lustre: %s",
- debug_buf + debug_off + prefix_nob);
- else if (portal_printk)
- printk("<%d>Lustre: %s", portal_printk,
- debug_buf+debug_off+prefix_nob);
- base_offset = debug_off & 0xFFFF;
-
- debug_off += prefix_nob + msg_nob;
- if (debug_off > debug_size) {
- memcpy(debug_buf, debug_buf + debug_size,
- debug_off - debug_size + 1);
- debug_off -= debug_size;
- debug_wrapped = 1;
- }
-
- atomic_set(&debug_off_a, debug_off);
- if (!atomic_read(&debug_daemon_state.paused) &&
- ((base_offset+prefix_nob+msg_nob) >= DAEMON_SND_SIZE)) {
- debug_daemon_state.daemon_event = 1;
- wake_up(&debug_daemon_state.daemon);
- }
-out:
- spin_unlock_irqrestore(&portals_debug_lock, flags);
-}
-