X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fkernel_patches%2Fpatches%2Fqsnet-rhel4-2.6.patch;h=f198a43d9ef5a09a66854a4be29ce38c7ec0172f;hb=4f958e8d4ebe2b040d4ee01e9f205e15aecf8498;hp=a17f058154ba0be55ccd71d133a7698ebbade656;hpb=113303973ec9f8484eb2355a1a6ef3c4c7fd6a56;p=fs%2Flustre-release.git diff --git a/lustre/kernel_patches/patches/qsnet-rhel4-2.6.patch b/lustre/kernel_patches/patches/qsnet-rhel4-2.6.patch index a17f058..f198a43 100644 --- a/lustre/kernel_patches/patches/qsnet-rhel4-2.6.patch +++ b/lustre/kernel_patches/patches/qsnet-rhel4-2.6.patch @@ -1,7 +1,79 @@ -diff -urN clean/arch/i386/defconfig linux-2.6.9/arch/i386/defconfig ---- clean/arch/i386/defconfig 2004-10-18 17:54:38.000000000 -0400 -+++ linux-2.6.9/arch/i386/defconfig 2005-10-10 17:47:17.000000000 -0400 -@@ -119,6 +119,8 @@ +Index: linux-269-5502/fs/open.c +=================================================================== +--- linux-269-5502.orig/fs/open.c ++++ linux-269-5502/fs/open.c +@@ -1029,6 +1029,8 @@ out_error: + goto out; + } + ++EXPORT_SYMBOL(sys_open); ++ + #ifndef __alpha__ + + /* +Index: linux-269-5502/fs/read_write.c +=================================================================== +--- linux-269-5502.orig/fs/read_write.c ++++ linux-269-5502/fs/read_write.c +@@ -145,6 +145,7 @@ asmlinkage off_t sys_lseek(unsigned int + bad: + return retval; + } ++EXPORT_SYMBOL(sys_lseek); + + #ifdef __ARCH_WANT_SYS_LLSEEK + asmlinkage long sys_llseek(unsigned int fd, unsigned long offset_high, +Index: linux-269-5502/fs/select.c +=================================================================== +--- linux-269-5502.orig/fs/select.c ++++ linux-269-5502/fs/select.c +@@ -539,3 +539,4 @@ out_fds: + poll_freewait(&table); + return err; + } ++EXPORT_SYMBOL_GPL(sys_poll); +Index: linux-269-5502/fs/exec.c +=================================================================== +--- linux-269-5502.orig/fs/exec.c ++++ linux-269-5502/fs/exec.c +@@ -56,6 +56,8 @@ + #include + #endif + ++#include ++ + int core_uses_pid; + char core_pattern[65] = "core"; + int suid_dumpable = 0; +@@ -1214,6 +1216,9 @@ int do_execve(char * filename, + if (retval < 0) + goto out; + ++ /* notify any ptrack callbacks of the process exec */ ++ ptrack_call_callbacks(PTRACK_PHASE_EXEC, NULL); ++ + retval = search_binary_handler(bprm,regs); + if (retval >= 0) { + free_arg_pages(bprm); +Index: linux-269-5502/arch/i386/Kconfig +=================================================================== +--- linux-269-5502.orig/arch/i386/Kconfig ++++ linux-269-5502/arch/i386/Kconfig +@@ -960,6 +960,9 @@ config REGPARM + generate incorrect output with certain kernel constructs when + -mregparm=3 is used. + ++source "mm/Kconfig" ++source "kernel/Kconfig" ++ + endmenu + + +Index: linux-269-5502/arch/i386/defconfig +=================================================================== +--- linux-269-5502.orig/arch/i386/defconfig ++++ linux-269-5502/arch/i386/defconfig +@@ -119,6 +119,8 @@ CONFIG_MTRR=y CONFIG_IRQBALANCE=y CONFIG_HAVE_DEC_LOCK=y # CONFIG_REGPARM is not set @@ -10,23 +82,25 @@ diff -urN clean/arch/i386/defconfig linux-2.6.9/arch/i386/defconfig # # Power management options (ACPI, APM) -diff -urN clean/arch/i386/Kconfig linux-2.6.9/arch/i386/Kconfig ---- clean/arch/i386/Kconfig 2005-05-13 13:39:03.000000000 -0400 -+++ linux-2.6.9/arch/i386/Kconfig 2005-10-10 17:47:17.000000000 -0400 -@@ -946,6 +946,9 @@ - support. As of this writing the exact hardware interface is - strongly in flux, so no good recommendation can be made. +Index: linux-269-5502/arch/ia64/Kconfig +=================================================================== +--- linux-269-5502.orig/arch/ia64/Kconfig ++++ linux-269-5502/arch/ia64/Kconfig +@@ -316,6 +316,9 @@ config IA64_PALINFO + To use this option, you have to ensure that the "/proc file system + support" (CONFIG_PROC_FS) is enabled, too. +source "mm/Kconfig" +source "kernel/Kconfig" -+ - endmenu - ++ + source "drivers/firmware/Kconfig" -diff -urN clean/arch/ia64/defconfig linux-2.6.9/arch/ia64/defconfig ---- clean/arch/ia64/defconfig 2004-10-18 17:53:12.000000000 -0400 -+++ linux-2.6.9/arch/ia64/defconfig 2005-10-10 17:47:17.000000000 -0400 -@@ -83,6 +83,8 @@ + source "fs/Kconfig.binfmt" +Index: linux-269-5502/arch/ia64/defconfig +=================================================================== +--- linux-269-5502.orig/arch/ia64/defconfig ++++ linux-269-5502/arch/ia64/defconfig +@@ -83,6 +83,8 @@ CONFIG_IA32_SUPPORT=y CONFIG_COMPAT=y CONFIG_PERFMON=y CONFIG_IA64_PALINFO=y @@ -35,23 +109,25 @@ diff -urN clean/arch/ia64/defconfig linux-2.6.9/arch/ia64/defconfig # # Firmware Drivers -diff -urN clean/arch/ia64/Kconfig linux-2.6.9/arch/ia64/Kconfig ---- clean/arch/ia64/Kconfig 2005-05-13 13:39:00.000000000 -0400 -+++ linux-2.6.9/arch/ia64/Kconfig 2005-10-10 17:47:17.000000000 -0400 -@@ -299,6 +299,9 @@ - To use this option, you have to ensure that the "/proc file system - support" (CONFIG_PROC_FS) is enabled, too. +Index: linux-269-5502/arch/x86_64/Kconfig +=================================================================== +--- linux-269-5502.orig/arch/x86_64/Kconfig ++++ linux-269-5502/arch/x86_64/Kconfig +@@ -401,6 +401,9 @@ config X86_MCE_AMD + Additional support for AMD specific MCE features such as + the DRAM Error Threshold. +source "mm/Kconfig" +source "kernel/Kconfig" + - source "drivers/firmware/Kconfig" + endmenu - source "fs/Kconfig.binfmt" -diff -urN clean/arch/x86_64/defconfig linux-2.6.9/arch/x86_64/defconfig ---- clean/arch/x86_64/defconfig 2004-10-18 17:54:39.000000000 -0400 -+++ linux-2.6.9/arch/x86_64/defconfig 2005-10-10 17:47:17.000000000 -0400 -@@ -87,6 +87,8 @@ + +Index: linux-269-5502/arch/x86_64/defconfig +=================================================================== +--- linux-269-5502.orig/arch/x86_64/defconfig ++++ linux-269-5502/arch/x86_64/defconfig +@@ -87,6 +87,8 @@ CONFIG_NR_CPUS=8 CONFIG_GART_IOMMU=y CONFIG_SWIOTLB=y CONFIG_X86_MCE=y @@ -60,106 +136,357 @@ diff -urN clean/arch/x86_64/defconfig linux-2.6.9/arch/x86_64/defconfig # # Power management options -diff -urN clean/arch/x86_64/Kconfig linux-2.6.9/arch/x86_64/Kconfig ---- clean/arch/x86_64/Kconfig 2005-05-13 13:39:03.000000000 -0400 -+++ linux-2.6.9/arch/x86_64/Kconfig 2005-10-10 17:47:17.000000000 -0400 -@@ -327,6 +327,9 @@ - machine check error logs. See - ftp://ftp.x86-64.org/pub/linux/tools/mcelog - -+source "mm/Kconfig" -+source "kernel/Kconfig" -+ - endmenu - - -diff -urN clean/Documentation/vm/ioproc.txt linux-2.6.9/Documentation/vm/ioproc.txt ---- clean/Documentation/vm/ioproc.txt 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/Documentation/vm/ioproc.txt 2005-10-10 17:47:17.000000000 -0400 -@@ -0,0 +1,468 @@ -+Linux IOPROC patch overview -+=========================== +Index: linux-269-5502/kernel/ptrack.c +=================================================================== +--- /dev/null ++++ linux-269-5502/kernel/ptrack.c +@@ -0,0 +1,145 @@ ++/* ++ * Copyright (C) 2000 Regents of the University of California ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ * ++ * Derived from exit_actn.c by ++ * Copyright (C) 2003 Quadrics Ltd. ++ */ + -+The network interface for an HPC network differs significantly from -+network interfaces for traditional IP networks. HPC networks tend to -+be used directly from user processes and perform large RDMA transfers -+between theses processes address space. They also have a requirement -+for low latency communication, and typically achieve this by OS bypass -+techniques. This then requires a different model to traditional -+interconnects, in that a process may need to expose a large amount of -+it's address space to the network RDMA. + -+Locking down of memory has been a common mechanism for performing -+this, together with a pin-down cache implemented in user -+libraries. The disadvantage of this method is that large portions of -+the physical memory can be locked down for a single process, even if -+it's working set changes over the different phases of it's -+execution. This leads to inefficient memory utilisation - akin to the -+disadvantage of swapping compared to paging. ++#include ++#include ++#include ++#include ++#include ++#include + -+This model also has problems where memory is being dynamically -+allocated and freed, since the pin down cache is unaware that memory -+may have been released by a call to munmap() and so it will still be -+locking down the now unused pages. ++#include + -+Some modern HPC network interfaces implement their own MMU and are -+able to handle a translation fault during a network access. The -+Quadrics (http://www.quadrics.com) devices (Elan3 and Elan4) have done -+this for some time and we expect others to follow the same route in -+the relatively near future. These NICs are able to operate in an -+environment where paging occurs and do not require memory to be locked -+down. The advantage of this is that the user process can expose large -+portions of it's address space without having to worry about physical -+memory constraints. ++int ++ptrack_register (ptrack_callback_t callback, void *arg) ++{ ++ struct ptrack_desc *desc = kmalloc (sizeof (struct ptrack_desc), GFP_KERNEL); ++ ++ if (desc == NULL) ++ return -ENOMEM; + -+However should the operating system decide to swap a page to disk, -+then the NIC must be made aware that it should no longer read/write -+from this memory, but should generate a translation fault instead. ++ desc->callback = callback; ++ desc->arg = arg; ++ ++ list_add_tail (&desc->link, ¤t->ptrack_list); ++ ++ return 0; ++} + -+The ioproc patch has been developed to provide a mechanism whereby the -+device driver for a NIC can be aware of when a user process's address -+translations change, either by paging or by explicitly mapping or -+unmapping memory. ++void ++ptrack_deregister (ptrack_callback_t callback, void *arg) ++{ ++ struct list_head *el, *nel; ++ ++ list_for_each_safe (el, nel, ¤t->ptrack_list) { ++ struct ptrack_desc *desc = list_entry (el, struct ptrack_desc, link); ++ ++ if (desc->callback == callback && desc->arg == arg) { ++ list_del (&desc->link); ++ kfree (desc); ++ } ++ } ++} + -+The patch involves inserting callbacks where translations are being -+invalidated to notify the NIC that the memory behind those -+translations is no longer visible to the application (and so should -+not be visible to the NIC). This callback is then responsible for -+ensuring that the NIC will not access the physical memory that was -+being mapped. ++int ++ptrack_registered (ptrack_callback_t callback, void *arg) ++{ ++ struct list_head *el; ++ ++ list_for_each (el, ¤t->ptrack_list) { ++ struct ptrack_desc *desc = list_entry (el, struct ptrack_desc, link); ++ ++ if (desc->callback == callback && desc->arg == arg) ++ return 1; ++ } ++ return 0; ++} ++ ++int ++ptrack_call_callbacks (int phase, struct task_struct *child) ++{ ++ struct list_head *el, *nel; ++ struct ptrack_desc *new; ++ int res; + -+An ioproc invalidate callback in the kswapd code could be utilised to -+prevent memory from being paged out if the NIC is unable to support -+network page faulting. ++ if (phase == PTRACK_PHASE_CLONE) ++ INIT_LIST_HEAD (&child->ptrack_list); + -+For NICs which support network page faulting, there is no requirement -+for a user level pin down cache, since they are able to page-in their -+translations on the first communication using a buffer. However this -+is likely to be inefficient, resulting in slow first use of the -+buffer. If the communication buffers were continually allocated and -+freed using mmap based malloc() calls then this would lead to all -+communications being slower than desirable. ++ list_for_each_safe (el, nel, ¤t->ptrack_list) { ++ struct ptrack_desc *desc = list_entry (el, struct ptrack_desc, link); ++ ++ res = desc->callback (desc->arg, phase, child); ++ ++ switch (phase) ++ { ++ case PTRACK_PHASE_EXIT: ++ list_del (&desc->link); ++ kfree (desc); ++ break; ++ ++ case PTRACK_PHASE_CLONE: ++ switch (res) ++ { ++ case PTRACK_FINISHED: ++ break; + -+To optimise these warm-up cases the ioproc patch adds calls to -+ioproc_update wherever the kernel is creating translations for a user -+process. These then allows the device driver to preload translations -+so that they are already present for the first network communication -+from a buffer. ++ case PTRACK_INNHERIT: ++ if ((new = kmalloc (sizeof (struct ptrack_desc), GFP_ATOMIC)) == NULL) ++ { ++ /* allocation failed - notify that this process is not going ++ * to be started by signalling clone failure. ++ */ ++ desc->callback (desc->arg, PTRACK_PHASE_CLONE_FAIL, child); ++ ++ goto failed; ++ } + -+Linux 2.6 IOPROC implementation details -+======================================= ++ new->callback = desc->callback; ++ new->arg = desc->arg; ++ ++ list_add_tail (&new->link, &child->ptrack_list); ++ break; + -+The Linux IOPROC patch adds hooks to the Linux VM code whenever page -+table entries are being created and/or invalidated. IOPROC device -+drivers can register their interest in being informed of such changes -+by registering an ioproc_ops structure which is defined as follows; ++ case PTRACK_DENIED: ++ goto failed; ++ } ++ break; ++ } ++ } + -+extern int ioproc_register_ops(struct mm_struct *mm, struct ioproc_ops *ip); -+extern int ioproc_unregister_ops(struct mm_struct *mm, struct ioproc_ops *ip); ++ return 0; + -+typedef struct ioproc_ops { -+ struct ioproc_ops *next; -+ void *arg; ++ failed: ++ while (! list_empty (&child->ptrack_list)) ++ { ++ struct ptrack_desc *desc = list_entry (child->ptrack_list.next, struct ptrack_desc, link); ++ ++ desc->callback (desc->arg, PTRACK_PHASE_CLONE_FAIL, child); ++ ++ list_del (&desc->link); ++ kfree (desc); ++ } ++ return 1; ++} ++EXPORT_SYMBOL(ptrack_register); ++EXPORT_SYMBOL(ptrack_deregister); ++EXPORT_SYMBOL(ptrack_registered); +Index: linux-269-5502/kernel/signal.c +=================================================================== +--- linux-269-5502.orig/kernel/signal.c ++++ linux-269-5502/kernel/signal.c +@@ -2329,6 +2329,7 @@ sys_kill(int pid, int sig) + + return kill_something_info(sig, &info, pid); + } ++EXPORT_SYMBOL_GPL(sys_kill); + + /** + * sys_tgkill - send signal to one specific thread +Index: linux-269-5502/kernel/Kconfig +=================================================================== +--- /dev/null ++++ linux-269-5502/kernel/Kconfig +@@ -0,0 +1,14 @@ ++# ++# Kernel subsystem specific config ++# ++ ++# Support for Process Tracking callbacks ++# ++config PTRACK ++ bool "Enable PTRACK process tracking hooks" ++ default y ++ help ++ This option enables hooks to be called when processes are ++ created and destoryed in order for a resource management ++ system to know which processes are a member of a "job" and ++ to be able to clean up when the job is terminated. +Index: linux-269-5502/kernel/Makefile +=================================================================== +--- linux-269-5502.orig/kernel/Makefile ++++ linux-269-5502/kernel/Makefile +@@ -26,6 +26,7 @@ obj-$(CONFIG_AUDIT) += audit.o + obj-$(CONFIG_AUDITSYSCALL) += auditsc.o + obj-$(CONFIG_AUDITFILESYSTEM) += auditfs.o + obj-$(CONFIG_KPROBES) += kprobes.o ++obj-$(CONFIG_PTRACK) += ptrack.o + + ifneq ($(CONFIG_IA64),y) + # According to Alan Modra , the -fno-omit-frame-pointer is +Index: linux-269-5502/kernel/exit.c +=================================================================== +--- linux-269-5502.orig/kernel/exit.c ++++ linux-269-5502/kernel/exit.c +@@ -32,6 +32,8 @@ + #include + #include + ++#include ++ + extern void sem_exit (void); + extern struct task_struct *child_reaper; + +@@ -825,6 +827,9 @@ asmlinkage NORET_TYPE void do_exit(long + current->tux_exit(); + } + ++ /* Notify any ptrack callbacks of the process exit */ ++ ptrack_call_callbacks(PTRACK_PHASE_EXIT, NULL); ++ + if (unlikely(tsk->audit_context)) + audit_free(tsk); + __exit_mm(tsk); +Index: linux-269-5502/kernel/fork.c +=================================================================== +--- linux-269-5502.orig/kernel/fork.c ++++ linux-269-5502/kernel/fork.c +@@ -14,6 +14,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -443,6 +444,9 @@ static struct mm_struct * mm_init(struct + mm->page_table_lock = SPIN_LOCK_UNLOCKED; + mm->ioctx_list_lock = RW_LOCK_UNLOCKED; + mm->ioctx_list = NULL; ++#ifdef CONFIG_IOPROC ++ mm->ioproc_ops = NULL; ++#endif + mm->default_kioctx = (struct kioctx)INIT_KIOCTX(mm->default_kioctx, *mm); + mm->free_area_cache = TASK_UNMAPPED_BASE; + +@@ -1312,6 +1316,11 @@ long do_fork(unsigned long clone_flags, + set_tsk_thread_flag(p, TIF_SIGPENDING); + } + ++ if (ptrack_call_callbacks(PTRACK_PHASE_CLONE, p)) { ++ sigaddset(&p->pending.signal, SIGKILL); ++ set_tsk_thread_flag(p, TIF_SIGPENDING); ++ } ++ + if (!(clone_flags & CLONE_STOPPED)) + wake_up_new_task(p, clone_flags); + else +Index: linux-269-5502/Makefile +=================================================================== +--- linux-269-5502.orig/Makefile ++++ linux-269-5502/Makefile +@@ -1,7 +1,7 @@ + VERSION = 2 + PATCHLEVEL = 6 + SUBLEVEL = 9 +-EXTRAVERSION = -prep ++EXTRAVERSION = -prep.qp3.5.34.4qsnet + RHEL_VERSION = 4 + RHEL_UPDATE = 5 + NAME=AC 1 +Index: linux-269-5502/Documentation/vm/ioproc.txt +=================================================================== +--- /dev/null ++++ linux-269-5502/Documentation/vm/ioproc.txt +@@ -0,0 +1,467 @@ ++Linux IOPROC patch overview ++=========================== ++ ++The network interface for an HPC network differs significantly from ++network interfaces for traditional IP networks. HPC networks tend to ++be used directly from user processes and perform large RDMA transfers ++between theses processes address space. They also have a requirement ++for low latency communication, and typically achieve this by OS bypass ++techniques. This then requires a different model to traditional ++interconnects, in that a process may need to expose a large amount of ++it's address space to the network RDMA. ++ ++Locking down of memory has been a common mechanism for performing ++this, together with a pin-down cache implemented in user ++libraries. The disadvantage of this method is that large portions of ++the physical memory can be locked down for a single process, even if ++it's working set changes over the different phases of it's ++execution. This leads to inefficient memory utilisation - akin to the ++disadvantage of swapping compared to paging. ++ ++This model also has problems where memory is being dynamically ++allocated and freed, since the pin down cache is unaware that memory ++may have been released by a call to munmap() and so it will still be ++locking down the now unused pages. ++ ++Some modern HPC network interfaces implement their own MMU and are ++able to handle a translation fault during a network access. The ++Quadrics (http://www.quadrics.com) devices (Elan3 and Elan4) have done ++this for some time and we expect others to follow the same route in ++the relatively near future. These NICs are able to operate in an ++environment where paging occurs and do not require memory to be locked ++down. The advantage of this is that the user process can expose large ++portions of it's address space without having to worry about physical ++memory constraints. ++ ++However should the operating system decide to swap a page to disk, ++then the NIC must be made aware that it should no longer read/write ++from this memory, but should generate a translation fault instead. ++ ++The ioproc patch has been developed to provide a mechanism whereby the ++device driver for a NIC can be aware of when a user process's address ++translations change, either by paging or by explicitly mapping or ++unmapping memory. ++ ++The patch involves inserting callbacks where translations are being ++invalidated to notify the NIC that the memory behind those ++translations is no longer visible to the application (and so should ++not be visible to the NIC). This callback is then responsible for ++ensuring that the NIC will not access the physical memory that was ++being mapped. ++ ++An ioproc invalidate callback in the kswapd code could be utilised to ++prevent memory from being paged out if the NIC is unable to support ++network page faulting. ++ ++For NICs which support network page faulting, there is no requirement ++for a user level pin down cache, since they are able to page-in their ++translations on the first communication using a buffer. However this ++is likely to be inefficient, resulting in slow first use of the ++buffer. If the communication buffers were continually allocated and ++freed using mmap based malloc() calls then this would lead to all ++communications being slower than desirable. ++ ++To optimise these warm-up cases the ioproc patch adds calls to ++ioproc_update wherever the kernel is creating translations for a user ++process. These then allows the device driver to preload translations ++so that they are already present for the first network communication ++from a buffer. ++ ++Linux 2.6 IOPROC implementation details ++======================================= ++ ++The Linux IOPROC patch adds hooks to the Linux VM code whenever page ++table entries are being created and/or invalidated. IOPROC device ++drivers can register their interest in being informed of such changes ++by registering an ioproc_ops structure which is defined as follows; ++ ++extern int ioproc_register_ops(struct mm_struct *mm, struct ioproc_ops *ip); ++extern int ioproc_unregister_ops(struct mm_struct *mm, struct ioproc_ops *ip); ++ ++typedef struct ioproc_ops { ++ struct ioproc_ops *next; ++ void *arg; + + void (*release)(void *arg, struct mm_struct *mm); + void (*sync_range)(void *arg, struct vm_area_struct *vma, unsigned long start, unsigned long end); @@ -177,7 +504,7 @@ diff -urN clean/Documentation/vm/ioproc.txt linux-2.6.9/Documentation/vm/ioproc. +ioproc_register_ops +=================== +This function should be called by the IOPROC device driver to register -+its interest in PTE changes for the process associated with the passed ++it's interest in PTE changes for the process associated with the passed +in mm_struct. + +The ioproc registration is not inherited across fork() and should be @@ -200,7 +527,7 @@ diff -urN clean/Documentation/vm/ioproc.txt linux-2.6.9/Documentation/vm/ioproc. +ioproc_ops struct +================= +A linked list ioproc_ops structures is hung off the user process -+mm_struct (linux/sched.h). At each hook point in the patched kernel ++mm_struct (linux/sched.h). At each hook point in the patched kernel, +the ioproc patch will call the associated ioproc_ops callback function +pointer in turn for each registered structure. + @@ -209,12 +536,12 @@ diff -urN clean/Documentation/vm/ioproc.txt linux-2.6.9/Documentation/vm/ioproc. +(e.g. find_pte_map()). These callbacks should not modify the Linux +kernel VM state or PTE entries. + -+The ioproc_ops callback function pointers are defined as follows; ++The ioproc_ops callback function pointers are: + +ioproc_release +============== -+The release hook is called when a program exits and all its vma areas -+are torn down and unmapped. i.e. during exit_mmap(). Before each ++The release hook is called when a program exits and all it's vma areas ++are torn down and unmapped, i.e. during exit_mmap(). Before each +release hook is called the ioproc_ops structure is unlinked from the +mm_struct. + @@ -228,7 +555,7 @@ diff -urN clean/Documentation/vm/ioproc.txt linux-2.6.9/Documentation/vm/ioproc. +or write by the IOPROC device to the associated pages should cause the +page to be marked as referenced or modified. + -+Called holding the mm->page_table_lock ++Called holding the mm->page_table_lock. + +ioproc_invalidate_[range|page] +============================== @@ -237,7 +564,7 @@ diff -urN clean/Documentation/vm/ioproc.txt linux-2.6.9/Documentation/vm/ioproc. +kernel. After this call the IOPROC must not access the physical memory +again unless a new translation is loaded. + -+Called holding the mm->page_table_lock ++Called holding the mm->page_table_lock. + +ioproc_update_[range|page] +========================== @@ -247,7 +574,7 @@ diff -urN clean/Documentation/vm/ioproc.txt linux-2.6.9/Documentation/vm/ioproc. +opportunity to load translations speculatively, which can improve +performance by avoiding device translation faults. + -+Called holding the mm->page_table_lock ++Called holding the mm->page_table_lock. + +ioproc_change_protection +======================== @@ -257,16 +584,16 @@ diff -urN clean/Documentation/vm/ioproc.txt linux-2.6.9/Documentation/vm/ioproc. +The IOPROC must not be able to write to a read-only page, so if the +permissions are downgraded then it must honour them. If they are +upgraded it can treat this in the same way as the -+ioproc_update_[range|page]() calls ++ioproc_update_[range|page]() calls. + -+Called holding the mm->page_table_lock ++Called holding the mm->page_table_lock. + + +Linux 2.6 IOPROC patch details +============================== + +Here are the specific details of each ioproc hook added to the Linux -+2.6 VM system and the reasons for doing so; ++2.6 VM system and the reasons for doing so: + +++++ FILE + mm/fremap.c @@ -543,96440 +870,381 @@ diff -urN clean/Documentation/vm/ioproc.txt linux-2.6.9/Documentation/vm/ioproc. +ADDED HOOK + ioproc_invalidate_range + -+ -+-- Last update DavidAddison - 17 Aug 2004 -diff -urN clean/drivers/net/qsnet/eip/eip_linux.c linux-2.6.9/drivers/net/qsnet/eip/eip_linux.c ---- clean/drivers/net/qsnet/eip/eip_linux.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/eip/eip_linux.c 2005-09-07 10:34:58.000000000 -0400 -@@ -0,0 +1,1575 @@ ++-- Last update Daniel J Blueman - 24 Mar 2006 +Index: linux-269-5502/mm/ioproc.c +=================================================================== +--- /dev/null ++++ linux-269-5502/mm/ioproc.c +@@ -0,0 +1,52 @@ +/* -+ * Copyright (c) 2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file ++ * Copyright (C) 2006 Quadrics Ltd ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. + * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + -+#ident "@(#)$Id: eip_linux.c,v 1.96.2.3 2005/09/07 14:34:58 mike Exp $" -+ -+#include -+#include ++/* ++ * Registration for IO processor page table updates. ++ */ + -+#include -+#include -+#include -+#include -+#include -+#include +#include -+#include -+#include -+#include -+ -+#include -+#include -+ -+#undef ASSERT -+#include -+#include -+ -+ -+ -+#include -+#include -+ -+#include "eip_linux.h" -+#include "eip_stats.h" -+ -+#ifdef UNUSED -+static void eip_skb_display(struct sk_buff *); -+#endif -+static void eip_iph_display(struct iphdr *); -+#ifdef UNUSED -+static void eip_eiph_display(EIP_HEADER *); -+static void eip_packet_display(unsigned char *); -+#endif -+static void eip_tmd_display(EIP_TMD *); -+static void eip_tmd_head_display(EIP_TMD_HEAD *); -+static void eip_rmd_display(EIP_RMD *); -+static void eip_rmd_head_display(EIP_RMD_HEAD *); -+ -+static void eip_rmd_reclaim(EIP_RMD *); -+ -+static inline EP_NMH *eip_dma_reserve(int, int); -+static inline void __eip_tmd_load(EIP_TMD *, EP_RAILMASK *); -+static inline void __eip_tmd_unload(EIP_TMD *); -+static inline unsigned long eip_buff_alloc(int, int); -+static inline void eip_buff_free(unsigned long, int); -+static struct iphdr *eip_ipfrag_get(char *); -+static inline void eip_rmd_free(EIP_RMD *); -+static inline void eip_skb_load(EIP_RMD *); -+static inline void eip_skb_unload(EIP_RMD *); -+static inline void eip_rmd_requeue(EIP_RMD *); -+static EIP_RMD *eip_rmd_alloc(int, int); -+static int eip_rmd_alloc_replace(EIP_RMD *, int, int); -+static int eip_rmd_alloc_queue(int, int, int, int); -+static int eip_rmds_alloc(void); -+static void eip_rxhandler(EP_RXD *); -+static void eip_rx_tasklet(unsigned long); -+static inline void eip_tmd_init(EIP_TMD *, unsigned long, EIP_TMD_HEAD *, unsigned long, int); -+static inline EIP_TMD *eip_tmd_get(int); -+static inline void eip_tmd_put(EIP_TMD *); -+static inline void eip_tmd_load(EIP_TMD *); -+static inline void eip_tmd_unload(EIP_TMD *); -+static inline EIP_TMD *eip_tmd_alloc_queue(EIP_TMD *, EIP_TMD_HEAD *, int); -+static inline EIP_TMD *eip_tmd_alloc_queue_copybreak(EIP_TMD_HEAD *, int); -+static inline EIP_TMD *eip_tmd_alloc_queue_aggreg(EIP_TMD_HEAD *, int); -+static int eip_tmds_alloc(void); -+int eip_hard_start_xmit(struct sk_buff *, struct net_device *); -+static inline int eip_do_xmit(EIP_TMD *, EP_NMD *i, EP_PAYLOAD *); -+static void eip_txhandler(EP_TXD *, void *, EP_STATUS); -+static void eip_tx_tasklet(unsigned long); -+void eip_stop_queue(void); -+void eip_start_queue(void); -+static int eip_open(struct net_device *); -+static int eip_close(struct net_device *); -+static struct net_device_stats *eip_get_stats(struct net_device *); -+static int eip_change_mtu(struct net_device *, int); -+ -+static int eip_rx_dropping = 0; -+static int eip_rx_tasklet_locked = 1; -+ -+/* Global */ -+struct timer_list eip_rx_tasklet_timer; -+ -+EIP_RX *eip_rx = NULL; -+EIP_TX *eip_tx = NULL; -+int eip_checksum_state=CHECKSUM_NONE; ++#include + -+int tmd_max = EIP_TMD_MAX_NR; -+int rmd_max = EIP_RMD_MAX_NR; -+int rx_envelope_nr = EIP_RX_ENVELOPE_NR; -+int rx_granularity = EIP_RX_GRANULARITY; -+int tx_copybreak_max = EIP_TX_COPYBREAK_MAX; -+EP_RAILMASK tx_railmask = EP_RAILMASK_ALL; -+int eipdebug = 0; ++#include ++#include + -+#ifdef UNUSED -+static void eip_skb_display(struct sk_buff *skb) -+{ -+ if (skb) { -+ __EIP_DBG_PRINTF("SKB [%p] : len %d truesize %d proto %x pkt type %x cloned %d users %d summed %d\n", -+ skb, skb->len, skb->truesize, skb->protocol, skb->pkt_type, skb->cloned, atomic_read(&skb->users), skb->ip_summed); -+ __EIP_DBG_PRINTF("SKB [%p] : skb_shinfo dataref %d nr_frags %d frag_list[%p] (device %p)\n", skb, -+ atomic_read(&skb_shinfo(skb)->dataref), skb_shinfo(skb)->nr_frags, skb_shinfo(skb)->frag_list, skb->dev); -+ __EIP_DBG_PRINTF("SKB [%p] : head[%p] data[%p] tail [%p] end [%p] data_len [%d]\n", skb, skb->head, skb->data, -+ skb->tail, skb->end, skb->data_len); -+ __EIP_DBG_PRINTF("SKB [%p] : Transport Layer h.(th, uh, icmph, raw)[%p]\n", skb, skb->h.th); -+ __EIP_DBG_PRINTF("SKB [%p] : Network Layer nh.(iph, arph, raw)[%p]\n", skb, skb->nh.iph); -+ __EIP_DBG_PRINTF("SKB [%p] : Link Layer mac.(ethernet, raw)[%p]\n", skb, skb->mac.ethernet); -+ return; -+ } -+ EIP_ERR_PRINTF("SKB IS NULL - NO SKB TO DISPLAY\n"); -+} -+#endif -+static void eip_iph_display(struct iphdr *iph) -+{ -+ if (iph) { -+ __EIP_DBG_PRINTF("IPH [%p] : version %d header len %d TOS 0x%x Total len %d\n", -+ iph, iph->version, iph->ihl, htons(iph->tos), htons(iph->tot_len)); -+ __EIP_DBG_PRINTF("IPH [%p] : id %d frag flags 0x%x offset %d\n", -+ iph, htons(iph->id), (iph->frag_off & htons(IP_CE | IP_DF | IP_MF)) >> 4, -+ (htons(iph->frag_off) << 3) & IP_OFFSET); -+ __EIP_DBG_PRINTF("IPH [%p] : TTL %d proto %d header checksum 0x%x\n", iph, iph->ttl, iph->protocol, iph->check); -+ __EIP_DBG_PRINTF("IPH [%p] : IP src %u.%u.%u.%u dest %u.%u.%u.%u\n", iph, -+ ((unsigned char *)&(iph->saddr))[0],((unsigned char *)&(iph->saddr))[1], ((unsigned char *)&(iph->saddr))[2],((unsigned char *)&(iph->saddr))[3], -+ ((unsigned char *)&(iph->daddr))[0],((unsigned char *)&(iph->daddr))[1], ((unsigned char *)&(iph->daddr))[2],((unsigned char *)&(iph->daddr))[3]); -+ return; -+ } -+ EIP_ERR_PRINTF("IPH IS NULL - NO IPH TO DISPLAY\n"); -+} -+#ifdef UNUSED -+static void eip_eiph_display(EIP_HEADER * eiph) -+{ -+ if (eiph) { -+ __EIP_DBG_PRINTF("EIPH [%p] : dhost %04x.%04x.%04x sap %x\n", eiph, eiph->h_dhost.ip_bcast, eiph->h_dhost.ip_inst, -+ eiph->h_dhost.ip_addr, eiph->h_sap); -+ __EIP_DBG_PRINTF("EIPH [%p] : shost %04x.%04x.%04x \n", eiph, eiph->h_shost.ip_bcast, eiph->h_shost.ip_inst, -+ eiph->h_shost.ip_addr); -+ return; -+ } -+ EIP_ERR_PRINTF("EIPH IS NULL - NO EIPH TO DISPLAY\n"); -+} -+static void eip_packet_display(unsigned char *data) -+{ -+ eip_eiph_display((EIP_HEADER *) data); -+ eip_iph_display((struct iphdr *) (data + EIP_HEADER_PAD + ETH_HLEN)); -+} -+#endif -+static void eip_tmd_display(EIP_TMD * tmd) -+{ -+ if (tmd) { -+ __EIP_DBG_PRINTF("\t\tTMD [%p] : next[%p] skb[%p] DVMA[%d]\n", tmd, tmd->chain.next, tmd->skb, tmd->dvma_idx); -+ if (tmd->dma_base) -+ __EIP_DBG_PRINTF("TMD [%p] : head[%p] *data 0x%lx\n", tmd, tmd->head, *((unsigned long *) tmd->dma_base)); -+ else -+ __EIP_DBG_PRINTF("TMD [%p] : head[%p] NO DATA !!!\n", tmd, tmd->head); -+ __EIP_DBG_PRINTF("TMD [%p] : DMA(%lx,%d,%d) ebase[%x]\n",tmd, tmd->dma_base, tmd->dma_len, tmd->nmd.nmd_len, -+ tmd->nmd.nmd_addr); -+ return; -+ } -+ EIP_ERR_PRINTF("TMD IS NULL - NO TMD TO DISPLAY\n"); -+ -+} -+static void eip_ipf_display(EIP_IPFRAG * ipf) ++int ioproc_register_ops(struct mm_struct *mm, struct ioproc_ops *ip) +{ -+ if (ipf) { -+ __EIP_DBG_PRINTF("IPF[%p] : datagram len %d dma correction %d uts %lx frag_nr %d\n", ipf, ipf->datagram_len, -+ ipf->dma_correction, ipf->timestamp.tv_usec, ipf->frag_nr); -+ eip_tmd_display((EIP_TMD *) ipf); -+ return; -+ } -+ EIP_ERR_PRINTF("IPF IS NULL - NO IPF TO DISPLAY\n"); -+} ++ ip->next = mm->ioproc_ops; ++ mm->ioproc_ops = ip; + -+static void eip_tmd_head_display(EIP_TMD_HEAD * head) -+{ -+ if (head) { -+ __EIP_DBG_PRINTF("TMD HEAD [%p] : handle[%p] tmds[%p] %3.3d/%3.3d/%3.3d\n", head, head->handle, head->tmd, -+ EIP_STAT_QUEUED_GET(&head->stats), EIP_STAT_ALLOC_GET(&head->stats), -+ eip_tx->tmd_max_nr); -+ return; -+ } -+ EIP_ERR_PRINTF("TMD HEAD IS NULL - NO TMD HEAD TO DISPLAY\n"); -+} -+static void eip_rmd_display(EIP_RMD * rmd) -+{ -+ if (rmd) { -+ __EIP_DBG_PRINTF("RMD [%p] : next[%p] rxd[%p] DVMA[%d]\n", rmd, rmd->chain.next, rmd->rxd, rmd->dvma_idx); -+ __EIP_DBG_PRINTF("RMD [%p] : head[%p]\n", rmd, rmd->head); -+ __EIP_DBG_PRINTF("RMD [%p] : ebase[%x]\n", rmd, rmd->nmd.nmd_addr); -+ return; -+ } -+ EIP_ERR_PRINTF("RMD IS NULL - NO RMD TO DISPLAY\n"); -+} -+static void eip_rmd_head_display(EIP_RMD_HEAD * head) -+{ -+ if (head) { -+ __EIP_DBG_PRINTF("RMD HEAD [%p] : rcvr[%p] handle[%p] busy list[%p]\n", head, head->rcvr, head->handle, head->busy_list); -+ __EIP_DBG_PRINTF("RMD HEAD [%p] : %3.3d/%3.3d/%3.3d\n", head, -+ EIP_STAT_QUEUED_GET(&head->stats), EIP_STAT_ALLOC_GET(&head->stats), eip_rx->rmd_max_nr); -+ return; -+ } -+ EIP_ERR_PRINTF("RMD HEAD IS NULL - NO RMD HEAD TO DISPLAY\n"); ++ return 0; +} + -+/* END - DISPLAY FUNCTIONS */ -+static inline EP_NMH *eip_dma_reserve(int pages_nr, int perm) -+{ -+ EP_NMH *handle = ep_dvma_reserve(eip_tx->ep_system, pages_nr, perm); -+ -+ if (handle) -+ EIP_DBG_PRINTF(EIP_DBG_EP_DVMA, "HANDLE [%p] %d pages of elan address space reserved\n", -+ handle, pages_nr); -+ else -+ EIP_ERR_PRINTF("cannot reserve %d page(s) of elan address space\n", pages_nr); -+ -+ return handle; -+} ++EXPORT_SYMBOL_GPL(ioproc_register_ops); + -+static inline void __eip_tmd_load(EIP_TMD * tmd, EP_RAILMASK *rmask) ++int ioproc_unregister_ops(struct mm_struct *mm, struct ioproc_ops *ip) +{ -+ EIP_ASSERT(tmd->nmd.nmd_len > 0); -+ -+ ep_dvma_load(eip_tx->ep_system, NULL, (caddr_t) tmd->dma_base, tmd->nmd.nmd_len, tmd->head->handle, -+ tmd->dvma_idx, rmask, &tmd->nmd); -+} ++ struct ioproc_ops **tmp; + -+static inline void __eip_tmd_unload(EIP_TMD * tmd) -+{ -+ EIP_ASSERT(tmd->nmd.nmd_addr && tmd->head->handle); -+ -+ ep_dvma_unload(eip_tx->ep_system, tmd->head->handle, &tmd->nmd); -+ tmd->nmd.nmd_addr = 0; -+} -+static inline unsigned long eip_buff_alloc(int buff_len, int gfp) -+{ -+ unsigned long buff_base = (buff_len < PAGE_SIZE) ? -+ (unsigned long) kmalloc(buff_len, gfp) : -+ __get_dma_pages(gfp, get_order(buff_len)); -+ -+ if (likely(buff_base)) -+ return buff_base; ++ for (tmp = &mm->ioproc_ops; *tmp && *tmp != ip; tmp = &(*tmp)->next) ; ++ if (*tmp) { ++ *tmp = ip->next; ++ return 0; ++ } + -+ EIP_ERR_PRINTF("cannot allocate %db of memory\n", buff_len); -+ return 0; -+} -+static inline void eip_buff_free(unsigned long buff_base, int buff_len) -+{ -+ (buff_len < PAGE_SIZE) ? kfree((void *) buff_base) : -+ free_pages(buff_base, get_order(buff_len)); ++ return -EINVAL; +} -+static struct iphdr *eip_ipfrag_get(char *data) -+{ -+ struct ethhdr *eh = (struct ethhdr *) (data); -+ struct iphdr *iph; -+ -+ if (eh->h_proto == htons(ETH_P_IP)) { -+ iph = (struct iphdr *) ((char *) eh + ETH_HLEN); -+ -+ /* EIP_DBG(eip_iph_display(iph)); */ -+ -+ if ((iph->frag_off & htons(IP_MF | IP_OFFSET))) -+ return iph; -+ } -+ return NULL; -+} -+ -+static inline void eip_rmd_free(EIP_RMD * rmd) -+{ -+ EIP_ASSERT2(rmd->nmd.nmd_addr == 0, eip_rmd_display, rmd); -+ -+ if ( rmd->skb != NULL) -+ kfree_skb (rmd->skb); -+ -+ kfree(rmd); -+ -+ EIP_DBG_PRINTF(EIP_DBG_MEMFREE, "RMD [%p] : FREED\n", rmd); -+} -+static inline void eip_skb_load(EIP_RMD * rmd) -+{ -+ EP_RAILMASK rmask = rmd->rxd ? ep_rxd_railmask (rmd->rxd) : 0; -+ -+ EIP_ASSERT(skb_tailroom(rmd->skb) > 0); -+ -+ ep_dvma_load(eip_tx->ep_system, NULL, (caddr_t) rmd->skb->data, skb_tailroom(rmd->skb), rmd->head->handle, -+ rmd->dvma_idx, &rmask, &rmd->nmd); -+ -+ EIP_DBG_PRINTF(EIP_DBG_RMD_EP_DVMA, "RMD [%p] : LOADED\n", rmd); -+} -+static inline void eip_skb_unload(EIP_RMD * rmd) -+{ -+ EIP_ASSERT(rmd->nmd.nmd_addr && rmd->head->handle); -+ -+ ep_dvma_unload(eip_tx->ep_system, rmd->head->handle, &rmd->nmd); -+ rmd->nmd.nmd_addr = 0; -+ -+ EIP_DBG_PRINTF(EIP_DBG_RMD_EP_DVMA, "RMD [%p] : UNLOADED\n", rmd); -+} -+static inline void eip_rmd_requeue(EIP_RMD * rmd) -+{ -+ EIP_ASSERT(rmd->rxd); -+ -+ rmd->chain.next = NULL; -+ -+ ep_requeue_receive(rmd->rxd, eip_rxhandler, rmd, &rmd->nmd, EP_NO_ALLOC|EP_NO_SLEEP ); -+ -+ atomic_inc(&rmd->head->stats); -+ -+ EIP_DBG_PRINTF(EIP_DBG_RMD_QUEUE, "RMD [%p] : REQUEUED\n", rmd); -+} -+static EIP_RMD * eip_rmd_alloc(int svc, int gfp) -+{ -+ int buff_len = EIP_SVC_SMALLEST_LEN << svc; -+ EIP_RMD *rmd; -+ struct sk_buff *skb; -+ -+ if (!(skb = alloc_skb((buff_len - EIP_EXTRA), gfp))) -+ return NULL; -+ -+ skb_reserve(skb, 2); -+ -+ if (!(rmd = (EIP_RMD *) kmalloc(buff_len, gfp))) { -+ kfree_skb(skb); -+ return NULL; -+ } -+ -+ rmd->skb = skb; -+ -+ rmd->chain.next = NULL; -+ rmd->rxd = NULL; -+ rmd->head = &eip_rx->head[svc]; -+ -+ return rmd; -+} -+ -+static int eip_rmd_alloc_replace(EIP_RMD *rmd, int svc, int gfp) -+{ -+ struct sk_buff *skb,*old; -+ int buff_len = EIP_SVC_SMALLEST_LEN << svc; -+ -+ if (!(skb = alloc_skb(buff_len, gfp))) -+ return 1; -+ -+ skb_reserve(skb, 2); -+ -+ eip_skb_unload(rmd); -+ -+ old = rmd->skb; -+ rmd->skb = skb; -+ -+ eip_skb_load(rmd); -+ -+ eip_rmd_requeue(rmd); -+ -+ kfree_skb(old); -+ -+ return 0; -+} -+ -+static int eip_rmd_alloc_queue(int svc, int dvma_idx, int gfp, int attr) -+{ -+ EIP_RMD * rmd = eip_rmd_alloc(svc, gfp); -+ -+ if (!rmd) -+ return 1; -+ -+ EIP_STAT_ALLOC_ADD(&rmd->head->stats, 1); -+ -+ rmd->dvma_idx = dvma_idx; -+ eip_skb_load(rmd); -+ -+ EIP_DBG2(EIP_DBG_RMD, eip_rmd_display, rmd, "RMD [%p] : ALLOCATED for SVC 0x%x\n", rmd, svc); -+ -+ if (ep_queue_receive(rmd->head->rcvr, eip_rxhandler, (void *) rmd, &rmd->nmd, attr) == ESUCCESS) { -+ atomic_inc(&rmd->head->stats); -+ EIP_DBG_PRINTF(EIP_DBG_RMD_QUEUE, "RMD [%p] : QUEUED on SVC 0x%x\n", rmd, svc); -+ return 0; -+ } -+ -+ EIP_ERR_PRINTF("RMD [%p] : couldn't be QUEUED on SVC 0x%x\n", rmd, svc); -+ -+ EIP_STAT_ALLOC_SUB(&rmd->head->stats, 1); -+ -+ eip_skb_unload(rmd); -+ eip_rmd_free(rmd); -+ -+ return 1; -+} -+ -+static int eip_rmds_alloc(void) -+{ -+ int idx, svc; -+ -+ eip_rx->irq_list = NULL; -+ eip_rx->irq_list_nr = 0; -+ -+ for (svc = 0; svc < EIP_SVC_NR; svc++) { -+ eip_rx->head[svc].rcvr = ep_alloc_rcvr(eip_tx->ep_system, EIP_SVC_EP(svc), rx_envelope_nr); -+ if (!eip_rx->head[svc].rcvr) { -+ EIP_ERR_PRINTF("Cannot install receiver for SVC 0x%x - maybe cable is disconnected\n", svc); -+ return -EAGAIN; -+ } -+ -+ eip_rx->head[svc].handle = -+ eip_dma_reserve(EIP_DVMA_PAGES((EIP_SVC_SMALLEST_LEN << svc)) * eip_rx->rmd_max_nr, -+ EP_PERM_WRITE); -+ if (!eip_rx->head[svc].handle) -+ return -ENOMEM; -+ -+ EIP_DBG(EIP_DBG_RMD_HEAD, eip_rmd_head_display, &eip_rx->head[svc]); -+ -+ for (idx = 0; idx < EIP_RMD_NR; idx++) { -+ if (eip_rmd_alloc_queue(svc, idx * EIP_DVMA_PAGES((EIP_SVC_SMALLEST_LEN << svc)), -+ GFP_KERNEL, EP_NO_SLEEP)) -+ return -ENOMEM; -+ } -+ } -+ return 0; -+} -+static void eip_rmds_free(void) -+{ -+ unsigned long flags; -+ EIP_RMD *rmd; -+ int svc; -+ -+ spin_lock_irqsave(&eip_rx->lock, flags); -+ rmd = eip_rx->irq_list; -+ eip_rx->irq_list = NULL; -+ eip_rx->irq_list_nr = 0; -+ spin_unlock_irqrestore(&eip_rx->lock, flags); -+ -+ eip_rmd_reclaim(rmd); -+ -+ for (svc = 0; svc < EIP_SVC_NR ; svc++) { -+ -+ while ((rmd = eip_rx->head[svc].busy_list)) { -+ eip_rx->head[svc].busy_list = NULL; -+ eip_rmd_reclaim(rmd); -+ if (eip_rx->head[svc].busy_list) { -+ EIP_DBG_PRINTF(EIP_DBG_RMD_QUEUE, "Still RMD [%p] on BUSY list SVC 0x%d - Scheduling\n", rmd, svc); -+ schedule(); -+ } -+ } -+ -+ EIP_ASSERT(EIP_STAT_QUEUED_GET(&eip_rx->head[svc].stats) == EIP_STAT_ALLOC_GET(&eip_rx->head[svc].stats)); -+ -+ EIP_DBG_PRINTF(EIP_DBG_GEN, "HEAD[%p] : FREEING RCVR [%p]\n", &eip_rx->head[svc], -+ eip_rx->head[svc].rcvr); -+ -+ ep_free_rcvr(eip_rx->head[svc].rcvr); -+ -+ EIP_DBG_PRINTF(EIP_DBG_EP_DVMA, "HEAD[%p] : RELEASING DVMA [%p]\n", &eip_rx->head[svc], -+ eip_rx->head[svc].handle); -+ -+ ep_dvma_release(eip_tx->ep_system, eip_rx->head[svc].handle); -+ } -+ -+} -+static int eip_rx_queues_low (void) { -+ int svc; -+ for (svc = 0; svc < EIP_SVC_NR; svc++) -+ if (EIP_STAT_QUEUED_GET(&eip_rx->head[svc].stats) < EIP_RMD_ALLOC_THRESH) -+ return (1); -+ return (0); -+} -+static void eip_rxhandler(EP_RXD * rxd) -+{ -+ EIP_RMD *rmd = (EIP_RMD *) ep_rxd_arg(rxd); -+ EP_STATUS ret = ep_rxd_status(rxd); -+ EP_PAYLOAD * payload = ep_rxd_payload(rxd); -+ unsigned long data = (unsigned long) rmd->skb->data; -+ int frag_nr = 0; -+ int len; -+ -+ struct sk_buff *skb; -+ static char count = 0; -+ -+ atomic_dec(&rmd->head->stats); -+ rmd->rxd = rxd; -+ -+ if (likely(ret == EP_SUCCESS)) { -+ -+ rmd->head->dma++; -+ -+ if ( eip_rx_dropping) { -+ eip_rmd_requeue(rmd); -+ return; -+ } -+ -+ len = (payload) ? payload->Data[frag_nr++] : ep_rxd_len(rxd); -+ -+ EIP_DBG(EIP_DBG_RMD, eip_rmd_display, rmd); -+ -+again: -+ if ( (skb = skb_clone(rmd->skb, GFP_ATOMIC)) ) { -+ unsigned int off = (data - (unsigned long) rmd->skb->data); -+ -+ /* have to set the length before calling -+ * skb pull as it will not allow you to -+ * pull past the end */ -+ -+ skb_put (skb, off + len); -+ skb_pull (skb, off); -+ -+ skb->protocol = eth_type_trans(skb, eip_rx->net_device); -+ skb->ip_summed = eip_checksum_state; -+ skb->dev = eip_rx->net_device; -+ -+ /* Fabien/David/Mike this is a hack/fix to allow aggrigation of packets to work. -+ * The problem is ip_frag looks at the truesize to see if it is caching too much space. -+ * As we are reusing a large skb (cloned) for a number of small fragments, they appear to take up alot of space. -+ * so ip_frag dropped them after 4 frags (not good). So we lie and set the truesize to just bigger than the data. -+ */ -+ if (payload) -+ skb->truesize = SKB_DATA_ALIGN(skb->len + EIP_HEADER_PAD) +sizeof(struct sk_buff); -+ -+ } -+ if ( (skb) && -+ (netif_rx(skb) != NET_RX_DROP)){ -+ -+ eip_rx->bytes += len; -+ -+ if (payload && payload->Data[frag_nr] ) { -+ data += EIP_IP_ALIGN(len); -+ len = payload->Data[frag_nr++]; -+ goto again; -+ } -+ eip_rx->packets += ++frag_nr; -+ } else if ( (eip_rx->dropped++ % 20) == 0) -+ __EIP_DBG_PRINTK("Packet dropped by the TCP/IP stack - increase /proc/sys/net/core/netdev_max_backlog\n"); -+ } else if (ret == EP_SHUTDOWN ) { -+ EIP_DBG2(EIP_DBG_RMD, eip_rmd_display, rmd, "ABORTING\n"); -+ ep_complete_receive(rxd); -+ eip_skb_unload(rmd); -+ EIP_STAT_ALLOC_SUB(&rmd->head->stats, 1); -+ eip_rmd_free(rmd); -+ return; -+ } else { -+ EP_ENVELOPE *env = ep_rxd_envelope(rxd); -+ EP_NMD *nmd ; -+ -+ EIP_ERR_PRINTF("RMD[%p] : RECEIVE ret = %d\n", rmd, ret); -+ -+ for (len = 0 ; len < env->nFrags ; len++) { -+ nmd = &env->Frags[len]; -+ EIP_ERR_PRINTF("RMD[%p] : ep_frag #%d nmd_addr [%x] nmd_len %d\n", rmd, len, -+ (unsigned int) nmd->nmd_addr, nmd->nmd_len); -+ } -+ eip_rx->errors++; -+ EIP_ASSERT2(atomic_read(&skb_shinfo(rmd->skb)->dataref) == 1, eip_rmd_display, rmd); -+ } -+ -+ /* data is used to store the irq flags */ -+ spin_lock_irqsave(&eip_rx->lock, data); -+ rmd->chain.next = eip_rx->irq_list; -+ eip_rx->irq_list = rmd; -+ eip_rx->irq_list_nr++; -+ spin_unlock_irqrestore(&eip_rx->lock, data); -+ -+ if (((count++ % eip_rx->sysctl_granularity) == 0) /* and either we have passed up a number of them */ -+ || eip_rx_queues_low()) /* or we are low */ -+ tasklet_schedule(&eip_rx->tasklet); -+ else -+ { -+ if ( !timer_pending (&eip_rx_tasklet_timer) ) /* the timer not already set */ -+ mod_timer (&eip_rx_tasklet_timer, lbolt); -+ } -+} -+ -+/* dest ; if the buffer still reference on it mocve the rmd to the dest list */ -+static void eip_rmd_reclaim(EIP_RMD *rmd) -+{ -+ EIP_RMD *rmd_next = rmd; -+ int dataref; -+ -+ while (rmd_next) { -+ rmd = rmd_next; -+ rmd_next = rmd_next->chain.next; -+ -+ dataref = atomic_read(&skb_shinfo(rmd->skb)->dataref); -+ EIP_ASSERT(dataref > 0); -+ -+ if (dataref == 1) { -+ eip_rmd_requeue(rmd); -+ } else { -+ rmd->chain.next = rmd->head->busy_list; -+ rmd->head->busy_list = rmd; -+ } -+ } -+} -+static void eip_rx_tasklet(unsigned long arg) -+{ -+ EIP_RMD *rmd, *rmd_next; -+ unsigned long flags; -+ short svc, queued; -+ int needs_reschedule; -+ -+ if (eip_rx_tasklet_locked) /* we dont want the tasklet to do anything when we are finishing */ -+ return; -+ -+ for (svc = 0; svc < EIP_SVC_NR; svc++) { -+ rmd = eip_rx->head[svc].busy_list; -+ eip_rx->head[svc].busy_list = NULL; -+ eip_rmd_reclaim(rmd); -+ } -+ -+ spin_lock_irqsave(&eip_rx->lock, flags); -+ rmd = eip_rx->irq_list; -+ eip_rx->irq_list = NULL; -+ eip_rx->irq_list_nr = 0; -+ spin_unlock_irqrestore(&eip_rx->lock, flags); -+ -+ eip_rmd_reclaim(rmd); -+ -+ needs_reschedule = 0; -+ -+ for (svc = 0; svc < EIP_SVC_NR; svc++) { -+ /* the plan is : allocate some more if possible or steall some dvma space from those on the EIP_BUSY_LIST */ -+ queued = EIP_STAT_QUEUED_GET(&eip_rx->head[svc].stats); -+ -+ EIP_ASSERT(queued >= 0 && queued <= EIP_RMD_MAX_NR); -+ -+ if (queued < EIP_RMD_ALLOC_THRESH) { -+ short allocated = EIP_STAT_ALLOC_GET(&eip_rx->head[svc].stats); -+ short how_many; -+ -+ EIP_ASSERT(allocated >= 0 && allocated <= EIP_RMD_MAX_NR); -+ -+ if (likely(allocated < eip_rx->rmd_max_nr)) { -+ -+ how_many = (((allocated / EIP_RMD_ALLOC_STEP) + 1) * EIP_RMD_ALLOC_STEP); -+ if (how_many > eip_rx->rmd_max_nr) -+ how_many = eip_rx->rmd_max_nr; -+ -+ for (; allocated < how_many && -+ (eip_rmd_alloc_queue(svc, allocated * EIP_DVMA_PAGES((EIP_SVC_SMALLEST_LEN << svc)), -+ GFP_ATOMIC, EP_NO_ALLOC|EP_NO_SLEEP) == 0) ; allocated++); -+ if ( allocated != how_many ) { -+ eip_rx->reschedule++; -+ needs_reschedule = 1; -+ } -+ } else { -+ /* steal how_many rmds and put them on the aside list */ -+ how_many = EIP_RMD_ALLOC_THRESH - queued; -+ -+ EIP_ASSERT(how_many >= 0 && how_many <= EIP_RMD_ALLOC_THRESH); -+ -+ rmd_next = eip_rx->head[svc].busy_list; -+ eip_rx->head[svc].busy_list = NULL; -+ -+ while (how_many-- && rmd_next) { -+ rmd = rmd_next; -+ rmd_next = rmd_next->chain.next; -+ -+ if (eip_rmd_alloc_replace(rmd, svc, GFP_ATOMIC)) { -+ rmd_next = rmd; -+ break; -+ } -+ } -+ eip_rx->head[svc].busy_list = rmd_next; -+ if ( how_many ) -+ needs_reschedule = 1; -+ } -+ } -+ } -+ -+ if (needs_reschedule) -+ { -+ if ( !timer_pending (&eip_rx_tasklet_timer)) -+ mod_timer (&eip_rx_tasklet_timer, lbolt); -+ } -+} -+static void eip_rx_tasklet_resched(unsigned long arg) -+{ -+ tasklet_schedule(&eip_rx->tasklet); -+} -+ -+static inline void eip_tmd_init(EIP_TMD * tmd, unsigned long buff_base, EIP_TMD_HEAD * head, unsigned long buff_len, -+ int dvma_idx) -+{ -+ tmd->dvma_idx = dvma_idx; -+ tmd->dma_base = buff_base; -+ tmd->dma_len = -1; -+ tmd->skb = NULL; -+ tmd->head = head; -+ tmd->chain.next = NULL; -+ -+ if (tmd->head != &eip_tx->head[EIP_TMD_STD]) { -+ tmd->nmd.nmd_len = buff_len; -+ eip_tmd_load(tmd); -+ } else { -+ tmd->nmd.nmd_len = -1; -+ tmd->nmd.nmd_addr = 0; -+ } -+} -+ -+static inline EIP_TMD *eip_tmd_get(int id) -+{ -+ unsigned long flags; -+ EIP_TMD *tmd = NULL; -+ spin_lock_irqsave(&eip_tx->lock, flags); -+ while ((tmd = eip_tx->head[id].tmd) == NULL) { -+ spin_unlock_irqrestore(&eip_tx->lock, flags); -+ if (ep_enable_txcallbacks(eip_tx->xmtr) == 0) { -+ -+ spin_lock_irqsave (&eip_tx->lock, flags); -+ if (eip_tx->head[id].tmd == NULL) { -+ __EIP_DBG_PRINTF("Cannot get a TMD on head %d ... stopping queue\n", id); -+ -+ eip_stop_queue (); -+ -+ spin_unlock_irqrestore (&eip_tx->lock, flags); -+ -+ return NULL; -+ } -+ spin_unlock_irqrestore (&eip_tx->lock, flags); -+ } -+ -+ ep_disable_txcallbacks(eip_tx->xmtr); -+ spin_lock_irqsave(&eip_tx->lock, flags); -+ } -+ eip_tx->head[id].tmd = tmd->chain.next; -+ spin_unlock_irqrestore(&eip_tx->lock, flags); -+ atomic_dec(&tmd->head->stats); -+ return tmd; -+} -+ -+static inline void eip_tmd_put(EIP_TMD * tmd) -+{ -+ unsigned long flags; -+ -+ tmd->skb = NULL; -+ -+ spin_lock_irqsave(&eip_tx->lock, flags); -+ tmd->chain.next = tmd->head->tmd; -+ tmd->head->tmd = tmd; -+ spin_unlock_irqrestore(&eip_tx->lock, flags); -+ atomic_inc(&tmd->head->stats); -+ -+ eip_start_queue(); -+ -+ EIP_DBG_PRINTF(EIP_DBG_TMD_QUEUE, "TMD [%p] : REQUEUED\n", tmd); -+} -+static inline void eip_tmd_load(EIP_TMD * tmd) -+{ -+ EP_RAILMASK rmask = tx_railmask; -+ -+ __eip_tmd_load(tmd, &rmask); -+ -+ EIP_DBG_PRINTF(EIP_DBG_EP_DVMA, "TMD [%p] : LOADED\n", tmd); -+} -+static inline void eip_tmd_unload(EIP_TMD * tmd) -+{ -+ __eip_tmd_unload(tmd); -+ -+ EIP_DBG_PRINTF(EIP_DBG_EP_DVMA, "TMD [%p] : UNLOADED\n", tmd); -+} -+static inline void eip_tmd_free(EIP_TMD * tmd) -+{ -+ eip_buff_free(tmd->dma_base, tmd->nmd.nmd_len); -+ -+ EIP_DBG_PRINTF(EIP_DBG_MEMFREE, "TMD [%p] : FREED\n", tmd); -+ -+ EIP_STAT_ALLOC_SUB(&tmd->head->stats, 1); -+} -+ -+/* tmd on a separate block */ -+static inline EIP_TMD *eip_tmd_alloc_queue(EIP_TMD * tmd, EIP_TMD_HEAD * head, int dvma_idx) -+{ -+ eip_tmd_init(tmd, 0, head, -1, dvma_idx); -+ -+ eip_tmd_put(tmd); -+ -+ EIP_STAT_ALLOC_ADD(&tmd->head->stats, 1); -+ EIP_DBG(EIP_DBG_TMD, eip_tmd_display, tmd); -+ return tmd; -+} -+/* tmd on the buffer */ -+static inline EIP_TMD *eip_tmd_alloc_queue_copybreak(EIP_TMD_HEAD * head, int dvma_idx) -+{ -+ EIP_TMD *tmd; -+ unsigned long buff_base; -+ -+ if (!(buff_base = eip_buff_alloc(tx_copybreak_max + sizeof(EIP_TMD), GFP_KERNEL))) -+ return NULL; -+ -+ tmd = (EIP_TMD *) (buff_base + tx_copybreak_max); -+ eip_tmd_init(tmd, buff_base, head, tx_copybreak_max, dvma_idx); -+ -+ eip_tmd_put(tmd); -+ EIP_STAT_ALLOC_ADD(&tmd->head->stats, 1); -+ EIP_DBG(EIP_DBG_TMD, eip_tmd_display, tmd); -+ return tmd; -+} -+ -+/* ipf are on the buffer */ -+static inline EIP_TMD *eip_tmd_alloc_queue_aggreg(EIP_TMD_HEAD * head, int dvma_idx) -+{ -+ EIP_TMD *tmd; -+ unsigned long buff_base; -+ -+ if (!(buff_base = eip_buff_alloc(EIP_SVC_BIGGEST_LEN, GFP_KERNEL))) -+ return NULL; -+ -+ tmd = (EIP_TMD *) (buff_base + EIP_SVC_BIGGEST_LEN - sizeof(EIP_IPFRAG)); -+ eip_tmd_init(tmd, buff_base, head, EIP_SVC_BIGGEST_LEN - sizeof(EIP_IPFRAG), dvma_idx); -+ -+ eip_tmd_put(tmd); -+ EIP_STAT_ALLOC_ADD(&tmd->head->stats, 1); -+ EIP_DBG(EIP_DBG_TMD, eip_tmd_display, tmd); -+ return tmd; -+} -+ -+static int eip_tmds_alloc() -+{ -+ int i; -+ int page_nr; -+ EIP_TMD *tmd; -+ -+ page_nr = EIP_DVMA_PAGES(tx_copybreak_max); -+ -+ eip_tx->head[EIP_TMD_COPYBREAK].handle = eip_dma_reserve(page_nr * eip_tx->tmd_max_nr, EP_PERM_READ); -+ -+ EIP_DBG(EIP_DBG_TMD_HEAD, eip_tmd_head_display, &eip_tx->head[EIP_TMD_COPYBREAK]); -+ -+ for (i = 0; i < EIP_TMD_NR; i++) { -+ if (!eip_tmd_alloc_queue_copybreak(&eip_tx->head[EIP_TMD_COPYBREAK], i * page_nr)) -+ return -ENOMEM; -+ } -+ -+ eip_tx->head[EIP_TMD_STD].handle = -+ eip_dma_reserve(EIP_DVMA_PAGES(EIP_SVC_BIGGEST_LEN) * eip_tx->tmd_max_nr, EP_PERM_READ); -+ -+ EIP_DBG(EIP_DBG_TMD_HEAD, eip_tmd_head_display, &eip_tx->head[EIP_TMD_STD]); -+ -+ tmd = kmalloc(sizeof(EIP_TMD) * EIP_TMD_NR, GFP_KERNEL); -+ if (!tmd) { -+ EIP_ERR_PRINTF("Cannot ALLOCATE %d of tmds\n", (int) sizeof(EIP_TMD) * EIP_TMD_NR); -+ return -ENOMEM; -+ } -+ -+ page_nr = EIP_DVMA_PAGES(EIP_SVC_BIGGEST_LEN); -+ -+ for (i = 0; i < EIP_TMD_NR; i++, tmd++) { -+ if (!eip_tmd_alloc_queue(tmd, &eip_tx->head[EIP_TMD_STD], i * page_nr)) -+ return -ENOMEM; -+ } -+ -+ page_nr = EIP_DVMA_PAGES(EIP_SVC_BIGGEST_LEN); -+ -+ eip_tx->head[EIP_TMD_AGGREG].handle = eip_dma_reserve(page_nr * eip_tx->tmd_max_nr, EP_PERM_READ); -+ EIP_DBG(EIP_DBG_TMD_HEAD, eip_tmd_head_display, &eip_tx->head[EIP_TMD_AGGREG]); -+ -+ for (i = 0; i < EIP_TMD_NR; i++) { -+ if (!eip_tmd_alloc_queue_aggreg(&eip_tx->head[EIP_TMD_AGGREG], i * page_nr)) -+ return -ENOMEM; -+ } -+ return 0; -+} -+ -+static void eip_tmds_free(void) -+{ -+ EIP_TMD *tmd; -+ EIP_TMD *tmd_next; -+ int i; -+ -+ ep_poll_transmits(eip_tx->xmtr); -+ -+ for (i = 0 ; i < 3 ; i++) { -+again: -+ if (EIP_STAT_QUEUED_GET(&eip_rx->head[i].stats) < EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats)) { -+ EIP_DBG_PRINTF(EIP_DBG_TMD, "Polling XMTR [%p]\n", eip_tx->xmtr); -+ ep_poll_transmits(eip_tx->xmtr); -+ goto again; -+ } -+ } -+ /* everything should be queued */ -+ if ((tmd = eip_tx->head[EIP_TMD_COPYBREAK].tmd)) { -+ do { -+ tmd_next = tmd->chain.next; -+ eip_tmd_unload(tmd); -+ -+ EIP_DBG(EIP_DBG_TMD, eip_tmd_display, tmd); -+ -+ eip_tmd_free(tmd); -+ } while (tmd_next && (tmd = tmd_next)); -+ } -+ -+ EIP_DBG_PRINTF(EIP_DBG_TMD_EP_DVMA, "HEAD[EIP_TMD_COPYBREAK] release DVMA [%p]\n", -+ eip_tx->head[EIP_TMD_COPYBREAK].handle); -+ -+ ep_dvma_release(eip_tx->ep_system, eip_tx->head[EIP_TMD_COPYBREAK].handle); -+ -+ /* these ones have been allocated as a block */ -+ if ((tmd = eip_tx->head[EIP_TMD_STD].tmd)) { -+ do { -+ if (tmd->dvma_idx == 0 ) { -+ kfree(tmd); -+ /* eip_tmd_free(tmd); */ -+ EIP_STAT_ALLOC_SUB(&tmd->head->stats, EIP_TMD_NR); -+ tmd_next = NULL; -+ EIP_DBG_PRINTF(EIP_DBG_TMD_EP_DVMA, "TMD HEAD[%p] : [EIP_TMD_STD] BLOCK FREED\n", tmd); -+ } else -+ tmd_next = tmd->chain.next; -+ } while (tmd_next && (tmd = tmd_next)); -+ } -+ EIP_DBG_PRINTF(EIP_DBG_TMD_EP_DVMA, "HEAD[EIP_TMD_STD] release DVMA [%p]\n", -+ eip_tx->head[EIP_TMD_STD].handle); -+ -+ ep_dvma_release(eip_tx->ep_system, eip_tx->head[EIP_TMD_STD].handle); -+ -+ if ((tmd = eip_tx->head[EIP_TMD_AGGREG].tmd)) { -+ do { -+ tmd_next = tmd->chain.next; -+ -+ EIP_DBG(EIP_DBG_TMD, eip_tmd_display, tmd); -+ -+ eip_tmd_unload(tmd); -+ eip_tmd_free(tmd); -+ } while (tmd_next && (tmd = tmd_next)); -+ } -+ EIP_DBG_PRINTF(EIP_DBG_TMD_EP_DVMA, "TMD HEAD[%p] : [EIP_TMD_AGGREG] release DVMA\n", -+ eip_tx->head[EIP_TMD_AGGREG].handle); -+ -+ ep_dvma_release(eip_tx->ep_system, eip_tx->head[EIP_TMD_AGGREG].handle); -+ -+ ep_free_xmtr(eip_tx->xmtr); -+ EIP_DBG_PRINTF(EIP_DBG_TMD, "XMTR[%p] : FREED\n", eip_tx->xmtr); -+} -+ -+static inline void eip_ipf_skb_add(EIP_IPFRAG * ipf, struct sk_buff *skb) -+{ -+ int align = EIP_IP_ALIGN(skb->len); -+ -+ -+ if (ipf->dma_len == -1) { /* like a virgin; touched for the very first time */ -+ do_gettimeofday(&ipf->timestamp); -+ /* FIXE ME put that in release tmd code */ -+ ipf->frag_nr = 0; -+ ipf->dma_len = 0; -+ ipf->datagram_len = -1; -+ ipf->dma_correction = 0; -+ } -+ -+ memcpy((void *) (ipf->dma_base + ipf->dma_len), skb->data, skb->len); -+ -+ if (ipf->datagram_len == -1) { -+ struct iphdr * iph = skb->nh.iph; -+ int offset = ntohs(iph->frag_off); -+ -+ /* last one ? ; offset & ~IP_OFFSET = IP fragment flags */ -+ if (((offset & ~IP_OFFSET) & IP_MF) == 0) { -+ offset &= IP_OFFSET; -+ offset <<= 3; -+ ipf->datagram_len = offset + htons(iph->tot_len) - sizeof(struct iphdr); -+ } -+ } -+ -+ skb->next = ipf->skb; -+ ipf->skb = skb; -+ ipf->payload.Data[ipf->frag_nr] = skb->len; -+ ipf->dma_len += align; -+ ipf->dma_correction += align - skb->len + ETH_HLEN + sizeof(struct iphdr); -+ /* FIXME ; Count got wrong if ip header has options */ -+ -+ ipf->frag_nr++; -+ -+ EIP_DBG2(EIP_DBG_TMD, eip_ipf_display, ipf, "ADDED skb[%p] len %db ALIGNED(%db)\n", skb, skb->len, EIP_IP_ALIGN(skb->len)); -+} -+ -+#define eip_ipf_hasroom(ipf, skb) ((ipf->dma_len + EIP_IP_ALIGN(skb->len) < eip_tx->sysctl_ipfrag_copybreak)) -+int eip_hard_start_xmit(struct sk_buff *skb, struct net_device *devnet) -+{ -+ -+ EIP_TMD *tmd; -+ EP_NMD nmd; -+ struct iphdr *iph; -+ int j; -+ -+ if (skb->destructor){ -+ atomic_inc(&eip_tx->destructor); -+ tasklet_schedule(&eip_tx->tasklet); -+ } -+ -+ if (!(iph = eip_ipfrag_get(skb->data)) || (eip_tx->sysctl_aggregation == 0)) { /* not ip fragment */ -+no_aggreg: -+ j = (skb->len < eip_tx->sysctl_copybreak) ? EIP_TMD_COPYBREAK : EIP_TMD_STD; /* j = head id */ -+ -+ if (!(tmd = eip_tmd_get(j))) { -+ if (skb->destructor) -+ atomic_dec(&eip_tx->destructor); -+ return 1; -+ } -+ -+ tmd->dma_len = skb->len; -+ tmd->skb = skb; -+ tmd->skb->next = NULL; -+ tmd->chain.next = NULL; -+ -+ if (j == EIP_TMD_COPYBREAK) { -+ memcpy((void *) tmd->dma_base, skb->data, skb->len); -+ -+ ep_nmd_subset(&nmd, &tmd->nmd, 0, skb->len); -+#ifdef EIP_MORE_STATS -+ eip_tx->sent_copybreak++; -+#endif -+ return eip_do_xmit(tmd, &nmd, NULL); -+ } -+ tmd->dma_base = (unsigned long) skb->data; -+ tmd->nmd.nmd_len = skb->len; -+ eip_tmd_load(tmd); -+ -+#ifdef EIP_MORE_STATS -+ eip_tx->sent_std++; -+#endif -+ return eip_do_xmit(tmd, &tmd->nmd, NULL); -+ } else if ( skb->len > EIP_SVC_BIGGEST_LEN/2 ) { -+ /* don't aggregate when we have a full mtu of data */ -+ /* or more than 32k ; in this case it is cheaper */ -+ /* to just map the buffer and send it */ -+ goto no_aggreg; -+ } else { -+ EIP_IPFRAG *ipf = NULL; -+ unsigned long flags; -+ struct list_head *l; -+ struct iphdr *iph2; -+ int i; -+ __u16 id = iph->id; -+ __u32 saddr = iph->saddr; -+ __u32 daddr = iph->daddr; -+ __u8 protocol = iph->protocol; -+ -+ EIP_DBG(EIP_DBG_IPH, eip_iph_display, iph); -+ -+ j = 0; -+ -+ /* here we can't have full mtu size aggregated packet */ -+ EIP_ASSERT_RET(skb->len < eip_tx->sysctl_ipfrag_copybreak, 0); -+ -+ spin_lock_irqsave(&eip_tx->ipfraglock, flags); -+ list_for_each(l, &eip_tx->ipfrag) { -+ ipf = list_entry(l, EIP_IPFRAG, list); -+ iph2 = eip_ipfrag_get((char *) ipf->dma_base); -+ -+ EIP_ASSERT(iph2); -+ -+ if ((iph2->id == id) && -+ (get_unaligned(&iph2->saddr) == saddr) && -+ (get_unaligned(&iph2->daddr) == daddr) && -+ (iph2->protocol == protocol)) { -+ /* || timeout */ -+ if (eip_ipf_hasroom(ipf, skb)) { -+ -+ eip_ipf_skb_add(ipf, skb); -+ -+ if ((ipf->datagram_len != -1) && -+ (ipf->dma_len == (ipf->datagram_len + ipf->dma_correction) || -+ ipf->frag_nr == (128 / sizeof(uint32_t)))) { -+send_aggreg: -+ ipf->payload.Data[ipf->frag_nr] = 0; -+ list_del(&ipf->list); -+ eip_tx->ipfrag_count--; -+ spin_unlock_irqrestore(&eip_tx->ipfraglock, flags); -+ -+ ep_nmd_subset(&nmd, &ipf->nmd, 0, ipf->dma_len); -+ -+#ifdef EIP_MORE_STATS -+ eip_tx->sent_aggreg++; -+#endif -+ if ((i = eip_do_xmit((EIP_TMD *) ipf, &nmd, &ipf->payload)) != EP_SUCCESS) -+ return i; -+ if (j) -+ goto new; -+ return 0; -+ } -+ -+ spin_unlock_irqrestore(&eip_tx->ipfraglock, flags); -+ tasklet_schedule(&eip_tx->tasklet); -+ return 0; -+ } else { -+ EIP_DBG_PRINTF(EIP_DBG_TMD, "IPF[%p] : FULL %db full - sending it\n", ipf, ipf->dma_len); -+ j = 1; -+ goto send_aggreg; -+ } -+ } -+ } -+ spin_unlock_irqrestore(&eip_tx->ipfraglock, flags); -+new: -+ if (!(ipf = (EIP_IPFRAG *) eip_tmd_get(EIP_TMD_AGGREG))) -+ goto no_aggreg; -+ -+ eip_ipf_skb_add(ipf, skb); -+ -+ spin_lock_irqsave(&eip_tx->ipfraglock, flags); -+ list_add_tail(&ipf->list, &eip_tx->ipfrag); -+ eip_tx->ipfrag_count++; -+ spin_unlock_irqrestore(&eip_tx->ipfraglock, flags); -+ tasklet_schedule(&eip_tx->tasklet); -+ } -+ return 0; -+} -+static int eip_do_xmit(EIP_TMD * tmd, EP_NMD *nmd, EP_PAYLOAD *payload) -+{ -+ EIP_HEADER *eiph = (EIP_HEADER *) tmd->dma_base; -+ int attr = EP_SET_DATA((EP_NO_SLEEP | EP_NO_INTERRUPT | EP_NO_FAILOVER), EP_TYPE_SVC_INDICATOR, EP_SVC_EIP); -+ unsigned long flags; -+ int svc, rnum; -+ -+ SIZE_TO_SVC(nmd->nmd_len, svc); -+ -+ EIP_DBG(EIP_DBG_TMD, eip_tmd_display, tmd); -+ /* EIP_DBG(eip_eiph_display(eiph)); */ -+ -+ if (unlikely (eiph->h_dhost.ip_bcast)) -+ rnum = ep_pickRail (EP_NMD_RAILMASK (nmd) & tx_railmask & ep_xmtr_availrails(eip_tx->xmtr)); -+ else -+ rnum = ep_pickRail (EP_NMD_RAILMASK (nmd) & tx_railmask & ep_xmtr_noderails(eip_tx->xmtr, ntohs(eiph->h_dhost.ip_addr))); -+ -+ if (rnum >= 0) -+ attr = EP_SET_PREFRAIL(attr, rnum); -+ -+ /* add to inuse list */ -+ spin_lock_irqsave (&eip_tx->lock, flags); -+ list_add_tail (&tmd->chain.link, &eip_tx->inuse); -+ spin_unlock_irqrestore (&eip_tx->lock, flags); -+ -+ /* ENOMEM EINVAL ECONNREFUSED ESUCCESS */ -+ svc = (unlikely(eiph->h_dhost.ip_bcast)) ? -+ ep_multicast_message(eip_tx->xmtr, -1, -1, NULL, EIP_SVC_EP(svc), attr | EP_NOT_MYSELF, eip_txhandler, tmd, payload, nmd, 1) : -+ -+ ep_transmit_message(eip_tx->xmtr, ntohs(eiph->h_dhost.ip_addr), EIP_SVC_EP(svc), attr, eip_txhandler, tmd, payload, nmd, 1); -+ -+ if (likely(svc == EP_SUCCESS)) -+ return 0; -+ else if (svc == ENOMEM) { -+ EIP_ERR_PRINTF("%s", "Memory allocation error ...\n"); -+ eip_tx->errors++; -+ } -+ else -+ { -+ /* EP_EINVAL occurs when the svc has a bad value or the iovec has too many frag; */ -+ /* we don't use the latter option here */ -+ __EIP_DBG_PRINTF("TMD [%p] : DROPPED skb[%p] status = %d from ep_?_message\n", tmd, tmd->skb, svc); -+ -+ eip_tx->dropped++; -+ } -+ -+ eip_txhandler(NULL, tmd, -99); -+ -+ /* Quadrics GNAT sw-elan/4397 - since we will "never" be able to send this packet to the */ -+ /* destination node, we drop it and feign success - this has the same behaviour as an */ -+ /* ethernet where it sticks the packet on the wire, but no-one receives it. */ -+ return 0; -+} -+ -+static void eip_txhandler(EP_TXD * txd, void *arg, EP_STATUS status) -+{ -+ EIP_TMD *tmd = (EIP_TMD *) arg; -+ struct sk_buff *skb_next; -+ unsigned long flags; -+ int svc = 0; -+ -+ if (likely(status == EP_SUCCESS)) { -+ SIZE_TO_SVC(tmd->dma_len, svc); -+ eip_tx->dma[svc]++; -+ eip_tx->bytes += tmd->dma_len; -+ -+ if (tmd->head == &eip_tx->head[EIP_TMD_AGGREG]) { -+ EIP_IPFRAG *ipf = (EIP_IPFRAG *) tmd; -+ eip_tx->packets += ipf->frag_nr; -+ } else -+ eip_tx->packets++; -+ } else { -+ if (tmd->head == &eip_tx->head[EIP_TMD_AGGREG]) { -+ EIP_IPFRAG *ipf = (EIP_IPFRAG *) tmd; -+ eip_tx->dropped += ipf->frag_nr; -+ EIP_DBG_PRINTF(EIP_DBG_TMD, "txhandler aggreg packet dropped status = %d\n", status); -+ } else { -+ eip_tx->dropped++; -+ EIP_DBG_PRINTF(EIP_DBG_TMD, "txhandler packet dropped status = %d\n", status); -+ } -+ } -+ -+ if (tmd->head == &eip_tx->head[EIP_TMD_STD]) { -+ eip_tmd_unload(tmd); -+ tmd->dma_base = 0; -+ tmd->nmd.nmd_len = -1; -+ } -+ -+ tmd->dma_len = -1; -+ -+ svc = 0; -+ while (tmd->skb) { -+ svc++; -+ -+ if (tmd->skb->destructor) -+ atomic_dec(&eip_tx->destructor); -+ -+ skb_next = tmd->skb->next; -+ dev_kfree_skb_any(tmd->skb); -+ tmd->skb = skb_next; -+ } -+ EIP_DBG_PRINTF(EIP_DBG_TMD, "IPF/TMD [%p] : %d skb RELEASE/FREED\n", tmd, svc); -+ -+ /* remove from inuse list */ -+ spin_lock_irqsave (&eip_tx->lock, flags); -+ list_del (&tmd->chain.link); -+ spin_unlock_irqrestore (&eip_tx->lock, flags); -+ -+ eip_tmd_put(tmd); -+} -+ -+static void eip_tx_tasklet(unsigned long arg) -+{ -+ struct timeval now; -+ unsigned long flags; -+ EIP_IPFRAG *ipf, *ipfq = NULL; -+ EP_NMD nmd; -+ struct list_head *list; -+ struct list_head *tmp; -+ char resched = 0; -+ char poll = 1; -+ -+ do_gettimeofday(&now); -+ -+ spin_lock_irqsave(&eip_tx->ipfraglock, flags); -+ if (eip_tx->ipfrag_count) { -+ list_for_each_safe(list, tmp, &eip_tx->ipfrag) { -+ ipf = list_entry(list, EIP_IPFRAG, list); -+ /* delta = (((now.tv_sec - ipf->timestamp.tv_sec) * 1000000UL) + now.tv_usec) - ipf->timestamp.tv_usec; */ -+ if (((((now.tv_sec - ipf->timestamp.tv_sec) * 1000000UL) + now.tv_usec) - -+ ipf->timestamp.tv_usec) >= (1000UL * eip_tx->sysctl_ipfrag_to)) { -+ list_del(&ipf->list); -+ eip_tx->ipfrag_count--; -+ ipf->chain.next = (EIP_TMD *) ipfq; -+ ipfq = ipf; -+ } -+ } -+ } -+ if (eip_tx->ipfrag_count) -+ resched = 1; -+ spin_unlock_irqrestore(&eip_tx->ipfraglock, flags); -+ -+ while (ipfq) { -+ poll = 0; -+ -+ ep_nmd_subset(&nmd, &ipfq->nmd, 0, ipfq->dma_len); -+ -+ ipfq->payload.Data[ipfq->frag_nr] = 0; -+ -+#ifdef EIP_MORE_STATS -+ eip_tx->sent_aggreg++; -+#endif -+ ipf = (EIP_IPFRAG *) ipfq->chain.next; -+ eip_do_xmit((EIP_TMD *) ipfq, &nmd, &ipfq->payload); -+ ipfq = ipf; -+ } -+ -+ if (poll) -+ ep_poll_transmits(eip_tx->xmtr); -+ -+ if (atomic_read(&eip_tx->destructor) || resched ) -+ tasklet_schedule(&eip_tx->tasklet); -+} -+void eip_start_queue() -+{ -+ if (netif_queue_stopped(eip_tx->net_device)) { -+ EIP_DBG_PRINTK(EIP_DBG_GEN, "Waking up %s queue\n", eip_tx->net_device->name); -+ netif_wake_queue(eip_tx->net_device); -+ } -+} -+void eip_stop_queue() -+{ -+ EIP_DBG_PRINTK(EIP_DBG_GEN, "Stopping %s queue\n", eip_tx->net_device->name); -+ netif_stop_queue(eip_tx->net_device); -+} -+ -+static int eip_open(struct net_device *devnet) -+{ -+ if (devnet->flags & IFF_PROMISC) -+ EIP_DBG_PRINTK(EIP_DBG_GEN, "%s entering in promiscuous mode\n", devnet->name); -+ -+ netif_start_queue(devnet); -+ EIP_DBG_PRINTK(EIP_DBG_GEN, "iface %s MAC %02x:%02x:%02x:%02x:%02x:%02x up\n", -+ devnet->name, (devnet->dev_addr[0]) & 0xff, -+ (devnet->dev_addr[1]) & 0xff, (devnet->dev_addr[2]) & 0xff, (devnet->dev_addr[3]) & 0xff, -+ (devnet->dev_addr[4]) & 0xff, (devnet->dev_addr[5]) & 0xff); -+ return 0; -+} -+ -+static int eip_close(struct net_device *devnet) -+{ -+ if (devnet->flags & IFF_PROMISC) -+ EIP_DBG_PRINTK(EIP_DBG_GEN, "%s leaving promiscuous mode\n", devnet->name); -+ -+ netif_stop_queue(devnet); -+ -+ eip_rx_tasklet(0); -+ -+ EIP_DBG_PRINTK(EIP_DBG_GEN, "iface %s MAC %02x:%02x:%02x:%02x:%02x:%02x down\n", -+ devnet->name, (devnet->dev_addr[0]) & 0xff, -+ (devnet->dev_addr[1]) & 0xff, (devnet->dev_addr[2]) & 0xff, (devnet->dev_addr[3]) & 0xff, -+ (devnet->dev_addr[4]) & 0xff, (devnet->dev_addr[5]) & 0xff); -+ return 0; -+} -+ -+static struct net_device_stats *eip_get_stats(struct net_device *devnet) -+{ -+ static struct net_device_stats stats; -+ -+ stats.rx_packets = eip_rx->packets; -+ stats.rx_bytes = eip_rx->bytes; -+ stats.rx_errors = eip_rx->errors; -+ stats.rx_dropped = eip_rx->dropped; -+ -+ stats.tx_packets = eip_tx->packets; -+ stats.tx_bytes = eip_tx->bytes; -+ stats.tx_errors = eip_tx->errors; -+ stats.tx_dropped = eip_tx->dropped; -+ return &stats; -+} -+ -+static int eip_change_mtu(struct net_device *devnet, int mtu) -+{ -+ if (mtu <= EIP_MTU_MAX) { -+ EIP_DBG_PRINTK(EIP_DBG_GEN, "MTU size changed from %d to %d\n", devnet->mtu, mtu); -+ devnet->mtu = mtu; -+ } -+ return 0; -+} -+ -+#ifdef MODULE -+int eip_init(void) -+{ -+ struct net_device *devnet; -+ int errno = 0; -+ -+ eip_rx_dropping = 0; -+ eip_rx_tasklet_locked = 1; -+ -+ /* timer up but not started */ -+ init_timer (&eip_rx_tasklet_timer); -+ eip_rx_tasklet_timer.function = eip_rx_tasklet_resched; -+ eip_rx_tasklet_timer.data = (unsigned long) 0; -+ eip_rx_tasklet_timer.expires = lbolt + hz; -+ -+ devnet = alloc_etherdev(sizeof(EIP_RX) + sizeof(EIP_TX)); -+ if (!devnet) { -+ EIP_ERR_PRINTF("Unable to ALLOCATE etherdev structure\n"); -+ return -ENOMEM; -+ } -+ strcpy (devnet->name, "eip0"); -+ -+ EIP_DBG_PRINTK(EIP_DBG_GEN, "Enabling aggregation code\n"); -+ devnet->change_mtu = eip_change_mtu; -+ devnet->mtu = EIP_MTU_MAX; -+ devnet->open = eip_open; -+ devnet->stop = eip_close; -+ devnet->hard_start_xmit = eip_hard_start_xmit; -+ devnet->get_stats = eip_get_stats; -+ -+ /* devnet->features |= (NETIF_F_DYNALLOC); */ -+ /* devnet->features = (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA); */ -+ /* devnet->features |= (NETIF_F_SG|NETIF_F_FRAGLIST|NETIF_F_HIGHDMA|NETIF_F_HW_CSUM); */ -+ -+ eip_rx = (EIP_RX *) devnet->priv; -+ eip_tx = (EIP_TX *) (eip_rx + 1); -+ -+ /* instance 0 */ -+ eip_tx->ep_system = ep_system(); -+ if (eip_tx->ep_system == NULL) { -+ EIP_ERR_PRINTF("kernel comms for iface %s does not exist\n", devnet->name); -+ errno = -ENXIO; -+ goto out; -+ } -+ if (ep_waitfor_nodeid(eip_tx->ep_system) == ELAN_INVALID_NODE) { -+ EIP_ERR_PRINTF("network position not found\n"); -+ errno = -EAGAIN; -+ goto out; -+ } -+ eip_tx->xmtr = ep_alloc_xmtr(eip_tx->ep_system); -+ if (!eip_tx->xmtr) { -+ EIP_ERR_PRINTF("Cannot create allocated transmitter - maybe cable is disconnected\n"); -+ errno = -EAGAIN; -+ goto out; -+ } -+ /* assign MAC address */ -+ *((int *) &devnet->dev_addr[4]) = htons(ep_nodeid(eip_tx->ep_system)); -+ eip_rx->net_device = devnet; -+ eip_tx->net_device = devnet; -+ -+ atomic_set(&eip_tx->destructor, 0); -+ -+ if ((tmd_max >= EIP_TMD_MIN_NR) && (tmd_max <= EIP_TMD_MAX_NR)) { -+ EIP_DBG_PRINTF(EIP_DBG_GEN, "Setting tmd_max_nr to %d\n", tmd_max); -+ eip_tx->tmd_max_nr = tmd_max; -+ } else { -+ EIP_ERR_PRINTF("parameter error : %d <= tmd_max(%d) <= %d using default %d\n", -+ EIP_TMD_MIN_NR, tmd_max, EIP_TMD_MAX_NR, EIP_TMD_MAX_NR); -+ eip_tx->tmd_max_nr = EIP_TMD_MAX_NR; -+ } -+ -+ if ((rmd_max >= EIP_RMD_MIN_NR) && (rmd_max <= EIP_RMD_MAX_NR)) { -+ EIP_DBG_PRINTF(EIP_DBG_GEN, "Setting rmd_max_nr to %d\n", rmd_max); -+ eip_rx->rmd_max_nr = rmd_max; -+ } else { -+ EIP_ERR_PRINTF("parameter error : %d <= rmd_max(%d) <= %d using default %d\n", EIP_RMD_MIN_NR, -+ rmd_max, EIP_RMD_MAX_NR, EIP_RMD_MAX_NR); -+ eip_rx->rmd_max_nr = EIP_RMD_MAX_NR; -+ } -+ -+ if ((rx_envelope_nr > 0) && (rx_envelope_nr <= 1024)) { /* > 1024 don't be silly */ -+ EIP_DBG_PRINTK(EIP_DBG_GEN, "Setting rx_envelope_nr to %d\n", rx_envelope_nr); -+ } else { -+ EIP_ERR_PRINTF("parameter error : 0 < rx_envelope_nr(%d) <= 1024 using default %d\n", -+ rx_envelope_nr, EIP_RX_ENVELOPE_NR); -+ rx_envelope_nr = EIP_RX_ENVELOPE_NR; -+ } -+ -+ if (tx_copybreak_max <= EIP_TX_COPYBREAK_MAX) { -+ EIP_DBG_PRINTF(EIP_DBG_GEN, "Setting tx_copybreak_max to %d\n", tx_copybreak_max); -+ } else { -+ EIP_ERR_PRINTF("parameter error : tx_copybreak_max > %d using default %d\n", -+ EIP_TX_COPYBREAK_MAX, EIP_TX_COPYBREAK_MAX); -+ tx_copybreak_max = EIP_TX_COPYBREAK_MAX; -+ } -+#ifdef EIP_MORE_STATS -+ eip_tx->sent_copybreak = 0; -+ eip_tx->sent_std = 0; -+ eip_tx->sent_aggreg = 0; -+#endif -+ -+ eip_tx->ipfrag_count = 0; -+ eip_aggregation_set(1); -+ eip_rx_granularity_set(rx_granularity); -+ eip_tx_copybreak_set(EIP_TX_COPYBREAK); -+ eip_ipfrag_to_set(EIP_IPFRAG_TO); -+ eip_ipfrag_copybreak_set(EIP_IPFRAG_COPYBREAK); -+ -+ spin_lock_init(&eip_tx->lock); -+ spin_lock_init(&eip_tx->ipfraglock); -+ spin_lock_init(&eip_rx->lock); -+ tasklet_init(&eip_rx->tasklet, eip_rx_tasklet, 0); -+ tasklet_init(&eip_tx->tasklet, eip_tx_tasklet, 0); -+ INIT_LIST_HEAD(&eip_tx->ipfrag); -+ INIT_LIST_HEAD(&eip_tx->inuse); -+ -+ /* if we fail here cannot do much yet; waiting for rcvr remove code in ep. */ -+ errno = eip_tmds_alloc(); -+ if (errno) -+ goto out; -+ -+ errno = eip_rmds_alloc(); -+ if (errno) -+ goto out; -+ -+ errno = eip_stats_init(); -+ if (errno) -+ goto out; -+ -+ if (ep_svc_indicator_set(eip_tx->ep_system, EP_SVC_EIP) != EP_SUCCESS) { -+ EIP_ERR_PRINTF("Cannot set the service indicator\n"); -+ errno = -EINVAL; -+ goto out; -+ } -+ -+ eip_rx_tasklet_locked = 0; -+ tasklet_schedule(&eip_rx->tasklet); -+ -+ SET_MODULE_OWNER(eip_tx->net_device); -+ -+ if (register_netdev(devnet)) { -+ printk("eip: failed to register netdev\n"); -+ goto out; -+ } -+ -+ EIP_DBG_PRINTK(EIP_DBG_GEN, "iface %s MAC %02x:%02x:%02x:%02x:%02x:%02x ready\n", -+ devnet->name, (devnet->dev_addr[0]) & 0xff, -+ (devnet->dev_addr[1]) & 0xff, (devnet->dev_addr[2]) & 0xff, (devnet->dev_addr[3]) & 0xff, -+ (devnet->dev_addr[4]) & 0xff, (devnet->dev_addr[5]) & 0xff); -+ -+ return 0; -+ out: -+ unregister_netdev(devnet); -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 25) -+ kfree(devnet); -+#else -+ free_netdev(devnet); -+#endif -+ -+ return errno; -+} -+void eip_exit(void) -+{ -+ int i; -+ -+ eip_rx_dropping = 1; /* means that new messages wont be sent to tcp stack */ -+ eip_rx_tasklet_locked = 1; -+ -+ netif_stop_queue(eip_tx->net_device); -+ -+ if (ep_svc_indicator_clear(eip_tx->ep_system, EP_SVC_EIP) != EP_SUCCESS) { -+ EIP_ERR_PRINTF("Cannot unset the service indicator\n"); -+ } -+ -+ schedule_timeout(10); -+ -+ del_timer_sync (&eip_rx_tasklet_timer); -+ -+ tasklet_disable(&eip_rx->tasklet); -+ tasklet_disable(&eip_tx->tasklet); -+ -+ tasklet_kill(&eip_tx->tasklet); -+ tasklet_kill(&eip_rx->tasklet); -+ -+ eip_rmds_free(); -+ eip_tmds_free(); -+ -+ /* that things freed */ -+ for (i = 0 ; i < EIP_SVC_NR ; i++) { -+ if ( EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats) != 0 ) -+ EIP_ERR_PRINTF("%d RMDs not FREED on SVC[%d]\n", EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats), i); -+ } -+ for (i = 0 ; i < 3 ; i++) { -+ if ( EIP_STAT_ALLOC_GET(&eip_tx->head[i].stats) != 0 ) -+ EIP_ERR_PRINTF("%d TMDs not freed on TX HEAD[%d]\n", EIP_STAT_ALLOC_GET(&eip_tx->head[i].stats), i); -+ -+ } -+ unregister_netdev(eip_tx->net_device); -+ kfree(eip_tx->net_device); -+ -+ eip_stats_cleanup(); -+} -+ -+module_init(eip_init); -+module_exit(eip_exit); -+ -+module_param(eipdebug, uint, 0); -+MODULE_PARM_DESC(eipdebug, "Set debug flags"); -+ -+module_param(rx_envelope_nr, uint, 0); -+MODULE_PARM_DESC(rx_enveloppe_nr, "Number of allocated enveloppe on the rx side"); -+ -+module_param(tx_copybreak_max, uint, 0); -+MODULE_PARM_DESC(tx_copybreak_max, "Maximum size of the tx copybreak limit (default 512)"); -+ -+module_param(tmd_max, uint, 0); -+module_param(rmd_max, uint, 0); -+MODULE_PARM_DESC(tmd_max, "Maximun number of transmit buffers (default 64)"); -+MODULE_PARM_DESC(rmd_max, "Maximun number of receive buffers (default 64)"); -+ -+module_param(tx_railmask, ushort, 0); -+MODULE_PARM_DESC(tx_railmask, "Mask of which rails transmits can be queued on"); -+ -+MODULE_AUTHOR("Quadrics Ltd."); -+MODULE_DESCRIPTION("Elan IP driver"); -+MODULE_LICENSE("GPL"); -+#endif /* MODULE */ -+ -+/* -+ * Local variables: -+ * c-file-style: "linux" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/eip/eip_linux.h linux-2.6.9/drivers/net/qsnet/eip/eip_linux.h ---- clean/drivers/net/qsnet/eip/eip_linux.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/eip/eip_linux.h 2004-10-01 06:49:29.000000000 -0400 -@@ -0,0 +1,399 @@ -+/* -+ * Copyright (c) 2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "$Id: eip_linux.h,v 1.47 2004/10/01 10:49:29 mike Exp $" -+ -+#ifndef __EIP_LINUX_H -+#define __EIP_LINUX_H -+ -+#define EIP_WATERMARK (0xfab1e) -+ -+#define EIP_PAGES(s) (((s - 1) >> PAGE_SHIFT) + 1) -+#define EIP_DVMA_PAGES(s) ((s < PAGE_SIZE) ? EIP_PAGES(s) + 1 : EIP_PAGES(s)) -+ -+#define EIP_SVC_SMALLEST_LEN (1 << 9) /* 512 */ -+#define EIP_SVC_BIGGEST_LEN (1 << 16) /* 64k */ -+ -+#define EIP_SVC_SMALLEST (0) -+#define EIP_SVC_BIGGEST (7) -+ -+#define EIP_SVC_NR (8) -+#define EIP_SVC_EP(s) (s + EP_MSG_SVC_EIP512) -+ -+#define EIP_STAT_ALLOC_SHIFT (8) -+#define EIP_STAT_ALLOC_GET(atomicp) ((int) atomic_read(atomicp) >> EIP_STAT_ALLOC_SHIFT) -+#define EIP_STAT_ALLOC_ADD(atomicp, v) (atomic_add((v << EIP_STAT_ALLOC_SHIFT), atomicp)) -+#define EIP_STAT_ALLOC_SUB(atomicp, v) (atomic_sub((v << EIP_STAT_ALLOC_SHIFT), atomicp)) -+ -+#define EIP_STAT_QUEUED_MASK (0xff) -+#define EIP_STAT_QUEUED_GET(atomicp) ((int) atomic_read(atomicp) & EIP_STAT_QUEUED_MASK) -+ -+#define EIP_RMD_NR (8) -+#define EIP_RMD_MIN_NR (8) -+#define EIP_RMD_MAX_NR (64) /* should be < than (1 << EIP_STAT_ALLOC_SHIFT) */ -+ -+#define EIP_RMD_ALLOC_STEP (8) -+#define EIP_RMD_ALLOC_THRESH (16) -+ -+#define EIP_RMD_ALLOC (1) -+#define EIP_RMD_REPLACE (0) -+ -+#define EIP_TMD_NR (64) -+#define EIP_TMD_MIN_NR (16) -+#define EIP_TMD_MAX_NR (64) /* should be < than (1 << EIP_STAT_ALLOC_SHIFT) */ -+ -+#define EIP_TMD_TYPE_NR (3) -+#define EIP_TMD_COPYBREAK (0x0) -+#define EIP_TMD_STD (0x1) -+#define EIP_TMD_AGGREG (0x2) -+ -+#define EIP_TX_COPYBREAK (512) -+#define EIP_TX_COPYBREAK_MAX (1024) -+ -+#define EIP_IPFRAG_TO (50) /* time out before a frag is sent in msec */ -+#define EIP_IPFRAG_COPYBREAK (EIP_SVC_BIGGEST_LEN - sizeof(EIP_IPFRAG) - EIP_HEADER_PAD) -+ -+#define EIP_RX_ENVELOPE_NR ((EIP_RMD_MAX_NR*EIP_SVC_NR)/2) -+#define EIP_RX_GRANULARITY (1) -+ -+#define EIP_IP_ALIGN(X) (((X) + (15)) & ~(15)) -+#define EIP_EXTRA roundup (sizeof(EIP_RMD), 256) -+#define EIP_RCV_DMA_LEN(s) (s - EIP_EXTRA - EIP_HEADER_PAD) -+#define EIP_MTU_MAX (EIP_RCV_DMA_LEN(EIP_SVC_BIGGEST_LEN) - (ETH_HLEN)) -+ -+#define SIZE_TO_SVC(s, svc) \ -+ do { \ -+ if (s <= EIP_RCV_DMA_LEN((1 << 9))) {svc = 0;break;} \ -+ if (s <= EIP_RCV_DMA_LEN((1 << 10))) {svc = 1;break;} \ -+ if (s <= EIP_RCV_DMA_LEN((1 << 11))) {svc = 2;break;} \ -+ if (s <= EIP_RCV_DMA_LEN((1 << 12))) {svc = 3;break;} \ -+ if (s <= EIP_RCV_DMA_LEN((1 << 13))) {svc = 4;break;} \ -+ if (s <= EIP_RCV_DMA_LEN((1 << 14))) {svc = 5;break;} \ -+ if (s <= EIP_RCV_DMA_LEN((1 << 15))) {svc = 6;break;} \ -+ if (s <= EIP_RCV_DMA_LEN((1 << 16))) {svc = 7;break;} \ -+ svc = -666; \ -+ EIP_ASSERT(1 == 0); \ -+ } while (0) -+ -+extern int eipdebug; -+#define EIP_ASSERT_ON -+/* #define NO_DEBUG */ -+ -+ -+/* ######################## */ -+#ifdef NO_DEBUG -+#define __EIP_DBG_PRINTF(fmt, args...) -+#define EIP_DBG_PRINTF(flag, fmt, args...) -+#else -+ -+#define EIP_DBG_RMD 0x1 -+#define EIP_DBG_TMD 0x2 -+#define EIP_DBG_RMD_HEAD 0x4 -+#define EIP_DBG_TMD_HEAD 0x8 -+#define EIP_DBG_EIPH 0x10 -+#define EIP_DBG_IPH 0x20 -+#define EIP_DBG_RMD_EP_DVMA 0x40 -+#define EIP_DBG_TMD_EP_DVMA 0x80 -+#define EIP_DBG_EP_DVMA (EIP_DBG_RMD_EP_DVMA|EIP_DBG_TMD_EP_DVMA) -+#define EIP_DBG_MEMALLOC 0x100 -+#define EIP_DBG_MEMFREE 0x200 -+#define EIP_DBG_RMD_QUEUE 0x400 -+#define EIP_DBG_TMD_QUEUE 0x800 -+#define EIP_DBG_GEN 0x1000 -+#define EIP_DBG_DEBUG 0x2000 -+ -+#define __EIP_DBG_PRINTF(fmt, args...) (qsnet_debugf (QSNET_DEBUG_BUFFER, " CPU #%d %s: " fmt, smp_processor_id(), __func__, ## args)) -+#define EIP_DBG_PRINTF(flag, fmt, args...) (unlikely(eipdebug & flag) ? __EIP_DBG_PRINTF(fmt, ## args):(void)0) -+ -+#define __EIP_DBG_PRINTK(fmt, args...) (qsnet_debugf (QSNET_DEBUG_BUF_CON, " CPU #%d %s: " fmt, smp_processor_id(), __func__, ## args)) -+#define EIP_DBG_PRINTK(flag, fmt, args...) (unlikely(eipdebug & flag) ? __EIP_DBG_PRINTF(fmt, ## args):(void)0) -+ -+#define EIP_ERR_PRINTF(fmt, args...) __EIP_DBG_PRINTK("!!! ERROR !!! - " fmt, ## args) -+ -+ -+#define EIP_DBG2(flag, fn, fn_arg, fmt, args...) \ -+ if (unlikely(eipdebug & flag)) { \ -+ qsnet_debugf (QSNET_DEBUG_BUFFER, "+CPU #%d %s: " fmt, smp_processor_id(), __func__, ##args); \ -+ (void)(fn)(fn_arg); \ -+ qsnet_debugf (QSNET_DEBUG_BUFFER, "-CPU #%d %s: " fmt, smp_processor_id(), __func__, ##args); \ -+ } -+ -+ -+#define EIP_DBG(flag, fn, args...) \ -+ if (unlikely(eipdebug & flag)) { \ -+ qsnet_debugf (QSNET_DEBUG_BUFFER, "+CPU #%d %s\n", smp_processor_id(), __func__); \ -+ (void)(fn)(args); \ -+ qsnet_debugf (QSNET_DEBUG_BUFFER, "-CPU #%d %s :\n", smp_processor_id(), __func__); \ -+ } -+#endif /* NO_DEBUG */ -+ -+ -+#ifdef EIP_ASSERT_ON -+ -+#define __EIP_ASSERT_PRINT(exp) \ -+ eipdebug = 0xffff; \ -+ EIP_ERR_PRINTF("ASSERT : %s, %s::%d\n", \ -+ #exp, __BASE_FILE__, __LINE__); -+ -+#define EIP_ASSERT(exp) \ -+ if (!(exp)) { \ -+ __EIP_ASSERT_PRINT(exp); \ -+ netif_stop_queue(eip_tx->net_device); \ -+ } -+ -+#define EIP_ASSERT2(exp, f, arg) \ -+ do { \ -+ if (!(exp)) { \ -+ __EIP_ASSERT_PRINT(exp); \ -+ f(arg); \ -+ } \ -+ } while (0) -+ -+#define EIP_ASSERT_BUG(exp) \ -+ do { \ -+ if (!(exp)) { \ -+ __EIP_ASSERT_PRINT(exp); \ -+ BUG(); \ -+ } \ -+ } while (0) -+ -+#define EIP_ASSERT_GOTO(exp, label, f, arg) \ -+ do { \ -+ if (!(exp)) { \ -+ __EIP_ASSERT_PRINT(exp); \ -+ f(arg); \ -+ goto label; \ -+ } \ -+ } while (0) -+ -+#define EIP_ASSERT_RET(exp, ret) \ -+ do { \ -+ if (!(exp)) { \ -+ __EIP_ASSERT_PRINT(exp); \ -+ return ret; \ -+ } \ -+ } while (0) -+ -+#define EIP_ASSERT_RETURN(exp, f, arg) \ -+ do { \ -+ if (!(exp)) { \ -+ __EIP_ASSERT_PRINT(exp); \ -+ f(arg); \ -+ return; \ -+ } \ -+ } while (0) -+ -+#define EIP_ASSERT_RETNULL(exp, f, arg) \ -+ do { \ -+ if (!(exp)) { \ -+ __EIP_ASSERT_PRINT(exp); \ -+ f(arg); \ -+ return NULL; \ -+ } \ -+ } while (0) -+ -+#else -+ -+#define EIP_ASSERT(exp) do {} while(0) -+#define EIP_ASSERT_OUT(exp) do {} while(0) -+#define EIP_ASSERT_RETURN(exp) do {} while(0) -+#define EIP_ASSERT_RETNULL(exp) do {} while(0) -+#define EIP_ASSERT_BUG(exp) do {} while(0) -+ -+#endif /* EIP_ASSERT */ -+ -+ -+ -+typedef struct { -+ u_short ip_bcast; -+ u_short ip_inst; -+ u_short ip_addr; -+} EIP_ADDRESS; -+ -+typedef struct { -+ EIP_ADDRESS h_dhost; -+ EIP_ADDRESS h_shost; -+ u_short h_sap; -+} EIP_HEADER; -+#define EIP_HEADER_PAD (2) -+ -+typedef struct eip_proc_fs { -+ const char *name; -+ struct proc_dir_entry **parent; -+ read_proc_t *read; -+ write_proc_t *write; -+ unsigned char allocated; -+ struct proc_dir_entry *entry; -+} EIP_PROC_FS; -+ -+#define EIP_PROC_ROOT_DIR "eip" -+ -+#define EIP_PROC_DEBUG_DIR "debug" -+#define EIP_PROC_DEBUG_RX_FLUSH "rx_flush" -+#define EIP_PROC_DEBUG_TX_FLUSH "tx_flush" -+ -+#define EIP_PROC_AGGREG_DIR "aggregation" -+#define EIP_PROC_AGGREG_ONOFF "enable" -+#define EIP_PROC_AGGREG_TO "timeout" -+#define EIP_PROC_AGGREG_COPYBREAK "copybreak" -+ -+#define EIP_PROC_TX_COPYBREAK "tx_copybreak" -+#define EIP_PROC_STATS "stats" -+#define EIP_PROC_RX_GRAN "rx_granularity" -+#define EIP_PROC_TX_RAILMASK "tx_railmask" -+#define EIP_PROC_TMD_INUSE "tmd_inuse" -+#define EIP_PROC_EIPDEBUG "eipdebug" -+#define EIP_PROC_CHECKSUM "checksum" -+ -+/* RX */ -+/* dma_len is used to keep the len of a received packet */ -+/* nmd.nmd_len is the max dma that can be received */ -+/* */ -+struct eip_rmd { -+ struct sk_buff *skb; -+ -+ EP_NMD nmd; -+ u16 dvma_idx; -+ -+ EP_RXD *rxd; -+ struct eip_rmd_head *head; -+ union { -+ struct list_head link; /* when on "busy" list */ -+ struct eip_rmd *next; /* all other lists */ -+ } chain; -+}; -+typedef struct eip_rmd EIP_RMD; -+struct eip_rmd_head { -+ EP_NMH *handle; -+ -+ EP_RCVR *rcvr; -+ EIP_RMD *busy_list; -+ -+ /* stats */ -+ atomic_t stats; -+ unsigned long dma; -+}; -+ -+typedef struct eip_rmd_head EIP_RMD_HEAD; -+typedef struct eip_rx { -+ struct eip_rmd_head head[EIP_SVC_NR]; -+ -+ EIP_RMD *irq_list; -+ short irq_list_nr; -+ -+ /* stats */ -+ unsigned long packets; -+ unsigned long bytes; -+ unsigned long errors; -+ unsigned long dropped; -+ unsigned long reschedule; -+ -+ spinlock_t lock; -+ struct tasklet_struct tasklet; -+ unsigned char rmd_max_nr; -+ unsigned char sysctl_granularity; -+ struct net_device *net_device; -+} EIP_RX; -+ -+/* TX */ -+/* dma_len_max is the maximum len for a given DMA */ -+/* where mnd.nmd_len is the len of the packet to send ~> than skb->len */ -+typedef struct eip_ipfrag_handle { -+ /* common with tmd */ -+ unsigned long dma_base; -+ int dma_len; -+ EP_NMD nmd; -+ u16 dvma_idx; -+ -+ struct sk_buff *skb; -+ struct eip_tmd_head *head; -+ union { -+ struct list_head link; /* when on "busy" list */ -+ struct eip_tmd *next; /* all other lists */ -+ } chain; -+ -+ /* private */ -+ struct list_head list; -+ struct timeval timestamp; -+ unsigned int frag_nr; -+ int datagram_len; /* Ip data */ -+ int dma_correction; -+ EP_PAYLOAD payload; -+} EIP_IPFRAG; -+ -+struct eip_tmd { -+ unsigned long dma_base; -+ int dma_len; -+ EP_NMD nmd; -+ u16 dvma_idx; -+ -+ struct sk_buff *skb; -+ struct eip_tmd_head *head; -+ union { -+ struct list_head link; /* when on "busy" list */ -+ struct eip_tmd *next; /* all other lists */ -+ } chain; -+}; -+ -+struct eip_tmd_head { -+ EP_NMH *handle; -+ -+ struct eip_tmd *tmd; -+ atomic_t stats; -+}; -+ -+typedef struct eip_tmd EIP_TMD; -+typedef struct eip_tmd_head EIP_TMD_HEAD; -+ -+/* #define EIP_MORE_STATS */ -+ -+typedef struct eip_tx { -+ struct net_device *net_device; -+ EP_XMTR *xmtr; -+ EP_SYS *ep_system; -+ -+ struct eip_tmd_head head[EIP_TMD_TYPE_NR]; -+ struct list_head inuse; -+ atomic_t destructor; -+ -+ /* stats */ -+ unsigned long packets; -+ unsigned long bytes; -+ unsigned long errors; -+ unsigned long dropped; -+ unsigned long dma[EIP_SVC_NR]; -+ -+#ifdef EIP_MORE_STATS -+ unsigned long sent_copybreak; -+ unsigned long sent_std; -+ unsigned long sent_aggreg; -+#endif -+ -+ unsigned char tmd_max_nr; -+ -+ unsigned short sysctl_copybreak; -+ unsigned short sysctl_ipfrag_to; -+ unsigned short sysctl_ipfrag_copybreak; -+ unsigned short sysctl_aggregation; -+ -+ unsigned short ipfrag_count; -+ struct list_head ipfrag; -+ spinlock_t ipfraglock; -+ -+ spinlock_t lock; -+ struct tasklet_struct tasklet; -+} EIP_TX; -+ -+/* =============================================== */ -+ /* unsigned long multicast; */ -+#endif /* __EIP_LINUX_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "linux" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/eip/eip_stats.c linux-2.6.9/drivers/net/qsnet/eip/eip_stats.c ---- clean/drivers/net/qsnet/eip/eip_stats.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/eip/eip_stats.c 2005-09-07 10:34:58.000000000 -0400 -@@ -0,0 +1,374 @@ -+/* -+ * Copyright (c) 2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+/* -+ * $Id: eip_stats.c,v 1.36.2.2 2005/09/07 14:34:58 mike Exp $ -+ * $Source: /cvs/master/quadrics/eipmod/eip_stats.c,v $ -+ */ -+ -+#include -+#include -+#include -+ -+#include -+ -+#include -+#include -+ -+#include -+ -+#include -+ -+#include "eip_linux.h" -+#include "eip_stats.h" -+ -+extern EIP_RX *eip_rx; -+extern EIP_TX *eip_tx; -+extern int tx_copybreak_max; -+extern EP_RAILMASK tx_railmask; -+extern int eip_checksum_state; -+extern void eip_stop_queue(void); -+extern void eip_start_queue(void); -+ -+static int eip_stats_read(char *buf, char **start, off_t off, int count, int *eof, void *data) -+{ -+ int i, outlen = 0; -+ -+ *buf = '\0'; -+ strcat(buf, "\n"); -+ strcat(buf, "--------------------------------------------+------------+-----------------+\n"); -+ strcat(buf, " SKB/DMA | | Rx | Tx | TMD TYPE |\n"); -+ strcat(buf, "--------------------------------------------+------------|-----------------+\n"); -+ -+ i = 0; -+ sprintf(buf + strlen(buf), " [%5d/%5d] | [%3.3d/%3.3d/%3.3d] | %10ld | %10ld | #1[%3.3d/%3.3d/%3.3d] |\n", -+ EIP_SVC_SMALLEST_LEN << i, (int) EIP_RCV_DMA_LEN((EIP_SVC_SMALLEST_LEN << i)), -+ EIP_STAT_QUEUED_GET(&eip_rx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats), -+ eip_rx->rmd_max_nr, eip_rx->head[i].dma, eip_tx->dma[i], -+ EIP_STAT_QUEUED_GET(&eip_tx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_tx->head[i].stats), -+ eip_tx->tmd_max_nr); -+ -+ i++; -+ sprintf(buf + strlen(buf), " [%5d/%5d] | [%3.3d/%3.3d/%3.3d] | %10ld | %10ld | #2[%3.3d/%3.3d/%3.3d] |\n", -+ EIP_SVC_SMALLEST_LEN << i, (int) EIP_RCV_DMA_LEN((EIP_SVC_SMALLEST_LEN << i)), -+ EIP_STAT_QUEUED_GET(&eip_rx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats), -+ eip_rx->rmd_max_nr, eip_rx->head[i].dma, eip_tx->dma[i], -+ EIP_STAT_QUEUED_GET(&eip_tx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_tx->head[i].stats), -+ eip_tx->tmd_max_nr); -+ -+ i++; -+ sprintf(buf + strlen(buf), " [%5d/%5d] | [%3.3d/%3.3d/%3.3d] | %10ld | %10ld | #3[%3.3d/%3.3d/%3.3d] |\n", -+ EIP_SVC_SMALLEST_LEN << i, (int) EIP_RCV_DMA_LEN((EIP_SVC_SMALLEST_LEN << i)), -+ EIP_STAT_QUEUED_GET(&eip_rx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats), -+ eip_rx->rmd_max_nr, eip_rx->head[i].dma, eip_tx->dma[i], -+ EIP_STAT_QUEUED_GET(&eip_tx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_tx->head[i].stats), -+ eip_tx->tmd_max_nr); -+ -+ i++; -+ sprintf(buf + strlen(buf), " [%5d/%5d] | [%3.3d/%3.3d/%3.3d] | %10ld | %10ld +-----------------+\n", -+ EIP_SVC_SMALLEST_LEN << i, (int) EIP_RCV_DMA_LEN((EIP_SVC_SMALLEST_LEN << i)), -+ EIP_STAT_QUEUED_GET(&eip_rx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats), -+ eip_rx->rmd_max_nr, eip_rx->head[i].dma, eip_tx->dma[i]); -+ -+ i++; -+ sprintf(buf + strlen(buf), " [%5d/%5d] | [%3.3d/%3.3d/%3.3d] | %10ld | %10ld |\n", -+ EIP_SVC_SMALLEST_LEN << i, (int) EIP_RCV_DMA_LEN((EIP_SVC_SMALLEST_LEN << i)), -+ EIP_STAT_QUEUED_GET(&eip_rx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats), -+ eip_rx->rmd_max_nr, eip_rx->head[i].dma, eip_tx->dma[i]); -+ -+ i++; -+ sprintf(buf + strlen(buf), " [%5d/%5d] | [%3.3d/%3.3d/%3.3d] | %10ld | %10ld |\n", -+ EIP_SVC_SMALLEST_LEN << i, (int) EIP_RCV_DMA_LEN((EIP_SVC_SMALLEST_LEN << i)), -+ EIP_STAT_QUEUED_GET(&eip_rx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats), -+ eip_rx->rmd_max_nr, eip_rx->head[i].dma, eip_tx->dma[i]); -+ -+ i++; -+ sprintf(buf + strlen(buf), " [%5d/%5d] | [%3.3d/%3.3d/%3.3d] | %10ld | %10ld |\n", -+ EIP_SVC_SMALLEST_LEN << i, (int) EIP_RCV_DMA_LEN((EIP_SVC_SMALLEST_LEN << i)), -+ EIP_STAT_QUEUED_GET(&eip_rx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats), -+ eip_rx->rmd_max_nr, eip_rx->head[i].dma, eip_tx->dma[i]); -+ -+ i++; -+ sprintf(buf + strlen(buf), " [%5d/%5d] | [%3.3d/%3.3d/%3.3d] | %10ld | %10ld |\n", -+ EIP_SVC_SMALLEST_LEN << i, (int) EIP_RCV_DMA_LEN((EIP_SVC_SMALLEST_LEN << i)), -+ EIP_STAT_QUEUED_GET(&eip_rx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats), -+ eip_rx->rmd_max_nr, eip_rx->head[i].dma, eip_tx->dma[i]); -+ -+ strcat(buf, "--------------------------------------------+------------+\n"); -+ sprintf(buf + strlen(buf), " RMD IRQ %4.4d %10lu | %10lu |\n", -+ eip_rx->irq_list_nr, -+ eip_rx->packets, eip_tx->packets); -+ strcat(buf, "--------------------------------------------+------------+\n"); -+ -+#ifdef EIP_MORE_STATS -+ strcat(buf, "\n"); -+ sprintf(buf + strlen(buf), " Copybreak %10ld Std %10ld Aggreg %10ld\n", -+ eip_tx->sent_copybreak, eip_tx->sent_std, eip_tx->sent_aggreg); -+#endif -+ -+ -+ strcat(buf, "\n"); -+ sprintf(buf + strlen(buf), "Rx bytes: %lu (%lu Mb) errors: %lu dropped: %lu reschedule: %lu\n", -+ eip_rx->bytes, eip_rx->bytes / (1024 * 1024), eip_rx->errors, eip_rx->dropped, eip_rx->reschedule); -+ sprintf(buf + strlen(buf), "Tx bytes: %lu (%lu Mb) errors: %lu dropped: %lu\n", -+ eip_tx->bytes, eip_tx->bytes / (1024 * 1024), eip_tx->errors, eip_tx->dropped); -+ strcat(buf, "\n"); -+ -+ outlen = strlen(buf); -+ ASSERT(outlen < PAGE_SIZE); -+ *eof = 1; -+ return outlen; -+} -+ -+void eip_stats_dump(void) -+{ -+ int eof; -+ -+ char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL); -+ -+ if (buf == NULL) -+ { -+ printk("no memory to produce eip_stats\n"); -+ return; -+ } -+ -+ eip_stats_read(buf, NULL, 0, 0, &eof, NULL); -+ -+ printk(buf); -+ -+ kfree(buf); -+} -+ -+static int eip_stats_write(struct file *file, const char *buf, unsigned long count, void *data) -+{ -+ int i; -+ unsigned long flags; -+ -+ spin_lock_irqsave(&eip_rx->lock, flags); -+ eip_rx->packets = 0; -+ eip_rx->bytes = 0; -+ eip_rx->errors = 0; -+ eip_rx->dropped = 0; -+ eip_rx->reschedule = 0; -+ for (i = 0; i < EIP_SVC_NR; eip_rx->head[i].dma = 0, i++); -+ spin_unlock_irqrestore(&eip_rx->lock, flags); -+ -+ spin_lock_irqsave(&eip_tx->lock, flags); -+ eip_tx->packets = 0; -+ eip_tx->bytes = 0; -+ eip_tx->errors = 0; -+ eip_tx->dropped = 0; -+#ifdef EIP_MORE_STATS -+ eip_tx->sent_copybreak = 0; -+ eip_tx->sent_std = 0; -+ eip_tx->sent_aggreg = 0; -+#endif -+ for (i = 0; i < EIP_SVC_NR; eip_tx->dma[i] = 0, i++); -+ spin_unlock_irqrestore(&eip_tx->lock, flags); -+ -+ return count; -+} -+ -+#define eip_stats_var_write(name) \ -+static int eip_stats_##name##_write(struct file *file, const char *buf, unsigned long count, void *data) \ -+{ \ -+ char * b = (char *) buf; \ -+ *(b + count) = '\0'; \ -+ eip_##name##_set((int) simple_strtoul(b, NULL, 10)); \ -+ return count; \ -+} -+ -+#define eip_stats_var_read(name, var) \ -+static int eip_stats_##name##_read(char *buf, char **start, off_t off, int count, int *eof, void *data) \ -+{ \ -+ sprintf(buf, "%d\n", var); \ -+ *eof = 1; \ -+ return strlen(buf); \ -+} -+ -+ -+#define eip_stats_var_set(name, min, max, default, var) \ -+void eip_##name##_set(int i) \ -+{ \ -+ if ( (i >= min) && (i <= max)) { \ -+ EIP_DBG_PRINTK(EIP_DBG_GEN, "Setting " #name " to %d\n", i); \ -+ var =(unsigned short) i; \ -+ } \ -+ else { \ -+ EIP_ERR_PRINTF("parameter error : %d <= " #name "(%d) <= %d using default %d\n", min, i, (int) max, (int) default); \ -+ } \ -+} -+ -+eip_stats_var_set(tx_copybreak, 0, tx_copybreak_max, EIP_TX_COPYBREAK, eip_tx->sysctl_copybreak); -+eip_stats_var_set(rx_granularity, 1, EIP_RMD_MIN_NR, EIP_RX_GRANULARITY, eip_rx->sysctl_granularity); -+eip_stats_var_set(tx_railmask, 0, EP_RAILMASK_ALL, EP_RAILMASK_ALL, tx_railmask); -+eip_stats_var_set(ipfrag_to, 0, (1 << 16), EIP_IPFRAG_TO, eip_tx->sysctl_ipfrag_to); -+eip_stats_var_set(aggregation, 0, 1, 1, eip_tx->sysctl_aggregation); -+eip_stats_var_set(ipfrag_copybreak, 0, EIP_IPFRAG_COPYBREAK, EIP_IPFRAG_COPYBREAK, eip_tx->sysctl_ipfrag_copybreak); -+/* eip_stats_var_set(eipdebug, 0, , 0, eipdebug); */ -+ -+eip_stats_var_read(aggregation, eip_tx->sysctl_aggregation); -+eip_stats_var_read(ipfrag_count, eip_tx->ipfrag_count); -+eip_stats_var_read(ipfrag_to, eip_tx->sysctl_ipfrag_to); -+eip_stats_var_read(ipfrag_copybreak, eip_tx->sysctl_ipfrag_copybreak); -+eip_stats_var_read(tx_copybreak, eip_tx->sysctl_copybreak); -+eip_stats_var_read(rx_granularity, eip_rx->sysctl_granularity); -+eip_stats_var_read(tx_railmask, tx_railmask); -+ -+eip_stats_var_write(aggregation); -+eip_stats_var_write(ipfrag_to); -+eip_stats_var_write(ipfrag_copybreak); -+eip_stats_var_write(tx_copybreak); -+eip_stats_var_write(rx_granularity); -+eip_stats_var_write(tx_railmask); -+ -+ -+static int eip_checksum_write(struct file *file, const char *buf, unsigned long count, void *data) -+{ -+ char * b = (char *) buf; -+ int value; -+ -+ *(b + count) = '\0'; -+ -+ value = (int) simple_strtoul(b, NULL, 10); -+ if ((value >= CHECKSUM_NONE) && (value <= CHECKSUM_UNNECESSARY)) -+ eip_checksum_state = value; -+ else -+ EIP_ERR_PRINTF("%d <= checksum(%d) <= %d using old value %d\n", CHECKSUM_NONE, value, CHECKSUM_UNNECESSARY, eip_checksum_state); -+ -+ return count; -+} -+ -+static int eip_checksum_read(char *buf, char **start, off_t off, int count, int *eof, void *data) -+{ -+ switch ( eip_checksum_state ) -+ { -+ case 0 : sprintf(buf, "0 CHECKSUM_NONE\n"); break; -+ case 1 : sprintf(buf, "1 CHECKSUM_HW\n"); break; -+ case 2 : sprintf(buf, "2 CHECKSUM_UNNECESSARY\n"); break; -+ default : sprintf(buf, "%d INVALID VALUE\n", eip_checksum_state); break; -+ } -+ *eof = 1; -+ return strlen(buf); -+} -+ -+static int eip_stats_eipdebug_read(char *buf, char **start, off_t off, int count, int *eof, void *data) -+{ -+ *buf = '\0'; -+ sprintf(buf + strlen(buf), "0x%x\n", eipdebug); -+ *eof = 1; -+ return strlen(buf); -+} -+static int eip_stats_eipdebug_write(struct file *file, const char *buf, unsigned long count, void *data) -+{ -+ char * p = (char *) buf; -+ *(p + count - 1) = '\0'; -+ eipdebug = simple_strtoul(p, NULL, 0); -+ __EIP_DBG_PRINTK("Setting eipdebug to 0x%x\n", eipdebug); -+ return count; -+} -+ -+static int eip_stats_tmd_inuse_read(char *page, char **start, off_t off, int count, int *eof, void *data) -+{ -+ struct list_head *lp; -+ unsigned long flags; -+ unsigned int len = 0; -+ -+ spin_lock_irqsave(&eip_tx->lock, flags); -+ list_for_each (lp, &eip_tx->inuse) { -+ EIP_TMD *tmd = list_entry (lp, EIP_TMD, chain.link); -+ EIP_HEADER *eiph = (EIP_HEADER *) tmd->dma_base; -+ -+ len += sprintf(page+len, "tmd=%p id=%d len=%d\n", -+ tmd, eiph ? ntohs(eiph->h_dhost.ip_addr) : -1, -+ tmd->dma_len); -+ -+ if (len + 40 >= count) -+ break; -+ } -+ spin_unlock_irqrestore(&eip_tx->lock, flags); -+ -+ return qsnet_proc_calc_metrics (page, start, off, count, eof, len); -+} -+ -+static int eip_stats_debug_rx_flush(struct file *file, const char *buf, unsigned long count, void *data) -+{ -+ EIP_DBG_PRINTF(EIP_DBG_GEN, "Flushing rx ...\n"); -+ tasklet_schedule(&eip_rx->tasklet); -+ return count; -+} -+static int eip_stats_debug_tx_flush(struct file *file, const char *buf, unsigned long count, void *data) -+{ -+ EIP_DBG_PRINTF(EIP_DBG_GEN, "Flushing tx ... %d tmds reclaimed\n", ep_enable_txcallbacks(eip_tx->xmtr)); -+ ep_disable_txcallbacks(eip_tx->xmtr); -+ tasklet_schedule(&eip_tx->tasklet); -+ return count; -+} -+ -+#define EIP_PROC_PARENT_NR (3) -+/* NOTE : the parents should be declared b4 the children */ -+static EIP_PROC_FS eip_procs[] = { -+ /* {name, parent, read fn, write fn, allocated, entry}, */ -+ {EIP_PROC_ROOT_DIR, &qsnet_procfs_root, NULL, NULL, 0, NULL}, -+ {EIP_PROC_DEBUG_DIR, &eip_procs[0].entry, NULL, NULL, 0, NULL}, -+ {EIP_PROC_AGGREG_DIR, &eip_procs[0].entry, NULL, NULL, 0, NULL}, /* end of parents */ -+ {EIP_PROC_STATS, &eip_procs[0].entry, eip_stats_read, eip_stats_write, 0, NULL}, -+ {EIP_PROC_TX_COPYBREAK, &eip_procs[0].entry, eip_stats_tx_copybreak_read, eip_stats_tx_copybreak_write, 0, NULL}, -+ {EIP_PROC_RX_GRAN, &eip_procs[0].entry, eip_stats_rx_granularity_read, eip_stats_rx_granularity_write, 0, NULL}, -+ {EIP_PROC_TX_RAILMASK, &eip_procs[0].entry, eip_stats_tx_railmask_read, eip_stats_tx_railmask_write, 0, NULL}, -+ {EIP_PROC_TMD_INUSE, &eip_procs[0].entry, eip_stats_tmd_inuse_read, NULL, 0, NULL}, -+ {EIP_PROC_EIPDEBUG, &eip_procs[0].entry, eip_stats_eipdebug_read, eip_stats_eipdebug_write, 0, NULL}, -+ {EIP_PROC_CHECKSUM, &eip_procs[0].entry, eip_checksum_read, eip_checksum_write, 0, NULL}, -+ {EIP_PROC_DEBUG_RX_FLUSH, &eip_procs[1].entry, NULL, eip_stats_debug_rx_flush, 0, NULL}, -+ {EIP_PROC_DEBUG_TX_FLUSH, &eip_procs[1].entry, NULL, eip_stats_debug_tx_flush, 0, NULL}, -+ {"ipfrag_count", &eip_procs[2].entry, eip_stats_ipfrag_count_read, NULL, 0, NULL}, -+ {EIP_PROC_AGGREG_TO, &eip_procs[2].entry, eip_stats_ipfrag_to_read, eip_stats_ipfrag_to_write, 0, NULL}, -+ {EIP_PROC_AGGREG_ONOFF, &eip_procs[2].entry, eip_stats_aggregation_read, eip_stats_aggregation_write, 0, NULL}, -+ {EIP_PROC_AGGREG_COPYBREAK, &eip_procs[2].entry, eip_stats_ipfrag_copybreak_read, eip_stats_ipfrag_copybreak_write, 0, NULL}, -+ {NULL, NULL, NULL, NULL, 1, NULL}, -+}; -+ -+int eip_stats_init(void) -+{ -+ int p; -+ -+ for (p = 0; !eip_procs[p].allocated; p++) { -+ if (p < EIP_PROC_PARENT_NR) -+ eip_procs[p].entry = proc_mkdir(eip_procs[p].name, *eip_procs[p].parent); -+ else -+ eip_procs[p].entry = create_proc_entry(eip_procs[p].name, 0, *eip_procs[p].parent); -+ -+ if (!eip_procs[p].entry) { -+ EIP_ERR_PRINTF("%s\n", "Cannot allocate proc entry"); -+ eip_stats_cleanup(); -+ return -ENOMEM; -+ } -+ -+ eip_procs[p].entry->owner = THIS_MODULE; -+ eip_procs[p].entry->write_proc = eip_procs[p].write; -+ eip_procs[p].entry->read_proc = eip_procs[p].read; -+ eip_procs[p].allocated = 1; -+ } -+ eip_procs[p].allocated = 0; -+ return 0; -+} -+ -+void eip_stats_cleanup(void) -+{ -+ int p; -+ for (p = (sizeof (eip_procs)/sizeof (eip_procs[0]))-1; p >= 0; p--) -+ if (eip_procs[p].allocated) { -+ EIP_DBG_PRINTF(EIP_DBG_GEN, "Removing %s from proc\n", eip_procs[p].name); -+ remove_proc_entry(eip_procs[p].name, *eip_procs[p].parent); -+ } -+} -+ -+/* -+ * Local variables: -+ * c-file-style: "linux" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/eip/eip_stats.h linux-2.6.9/drivers/net/qsnet/eip/eip_stats.h ---- clean/drivers/net/qsnet/eip/eip_stats.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/eip/eip_stats.h 2004-05-10 10:47:47.000000000 -0400 -@@ -0,0 +1,22 @@ -+/* -+ * Copyright (c) 2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "$Id: eip_stats.h,v 1.14 2004/05/10 14:47:47 daniel Exp $" -+ -+#ifndef __EIP_STATS_H -+#define __EIP_STATS_H -+ -+int eip_stats_init(void); -+void eip_stats_cleanup(void); -+void eip_rx_granularity_set(int); -+void eip_tx_copybreak_set(int); -+void eip_ipfrag_to_set(int); -+void eip_aggregation_set(int); -+void eip_ipfrag_copybreak_set(int); -+void eip_stats_dump(void); -+ -+#endif /* __EIP_STATS_H */ -diff -urN clean/drivers/net/qsnet/eip/Makefile linux-2.6.9/drivers/net/qsnet/eip/Makefile ---- clean/drivers/net/qsnet/eip/Makefile 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/eip/Makefile 2005-10-10 17:47:30.000000000 -0400 -@@ -0,0 +1,15 @@ -+# -+# Makefile for Quadrics QsNet -+# -+# Copyright (c) 2002-2004 Quadrics Ltd -+# -+# File: drivers/net/qsnet/eip/Makefile -+# -+ -+ -+# -+ -+obj-$(CONFIG_EIP) += eip.o -+eip-objs := eip_linux.o eip_stats.o -+ -+EXTRA_CFLAGS += -DDEBUG -DDEBUG_PRINTF -DDEBUG_ASSERT -diff -urN clean/drivers/net/qsnet/eip/Makefile.conf linux-2.6.9/drivers/net/qsnet/eip/Makefile.conf ---- clean/drivers/net/qsnet/eip/Makefile.conf 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/eip/Makefile.conf 2005-09-07 10:39:48.000000000 -0400 -@@ -0,0 +1,10 @@ -+# Flags for generating QsNet Linux Kernel Makefiles -+MODNAME = eip.o -+MODULENAME = eip -+KOBJFILES = eip_linux.o eip_stats.o -+EXPORT_KOBJS = -+CONFIG_NAME = CONFIG_EIP -+SGALFC = -+# EXTRALINES START -+ -+# EXTRALINES END -diff -urN clean/drivers/net/qsnet/eip/quadrics_version.h linux-2.6.9/drivers/net/qsnet/eip/quadrics_version.h ---- clean/drivers/net/qsnet/eip/quadrics_version.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/eip/quadrics_version.h 2005-09-07 10:39:49.000000000 -0400 -@@ -0,0 +1 @@ -+#define QUADRICS_VERSION "5.11.3qsnet" -diff -urN clean/drivers/net/qsnet/elan/bitmap.c linux-2.6.9/drivers/net/qsnet/elan/bitmap.c ---- clean/drivers/net/qsnet/elan/bitmap.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan/bitmap.c 2004-01-20 12:32:17.000000000 -0500 -@@ -0,0 +1,287 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: bitmap.c,v 1.5 2004/01/20 17:32:17 david Exp $" -+/* $Source: /cvs/master/quadrics/elanmod/shared/bitmap.c,v $*/ -+ -+#if defined(__KERNEL__) -+#include -+#endif -+#include -+#include -+ -+/* -+ * Return the index of the first available bit in the -+ * bitmap , or -1 for failure -+ */ -+int -+bt_freebit (bitmap_t *bitmap, int nbits) -+{ -+ int last = (--nbits) >> BT_ULSHIFT; -+ int maxbit; -+ int i, j; -+ -+ /* look for a word with a bit off */ -+ for (i = 0; i <= last; i++) -+ if (bitmap[i] != ~((bitmap_t) 0)) -+ break; -+ -+ if (i <= last) -+ { -+ /* found an word with a bit off, now see which bit it is */ -+ maxbit = (i == last) ? (nbits & BT_ULMASK) : (BT_NBIPUL-1); -+ for (j = 0; j <= maxbit; j++) -+ if ((bitmap[i] & (1 << j)) == 0) -+ return ((i << BT_ULSHIFT) | j); -+ } -+ return (-1); -+ -+} -+ -+/* -+ * bt_lowbit: -+ * Return the index of the lowest set bit in the -+ * bitmap, or -1 for failure. -+ */ -+int -+bt_lowbit (bitmap_t *bitmap, int nbits) -+{ -+ int last = (--nbits) >> BT_ULSHIFT; -+ int maxbit; -+ int i, j; -+ -+ /* look for a word with a bit on */ -+ for (i = 0; i <= last; i++) -+ if (bitmap[i] != 0) -+ break; -+ if (i <= last) -+ { -+ /* found a word bit a bit on, now see which bit it is */ -+ maxbit = (i == last) ? (nbits & BT_ULMASK) : (BT_NBIPUL-1); -+ for (j = 0; j <= maxbit; j++) -+ if (bitmap[i] & (1 << j)) -+ return ((i << BT_ULSHIFT) | j); -+ } -+ -+ return (-1); -+} -+ -+/* -+ * Return the index of the first available bit in the -+ * bitmap , or -1 for failure -+ */ -+int -+bt_nextbit (bitmap_t *bitmap, int nbits, int last, int isset) -+{ -+ int first = ((last+1) + BT_NBIPUL-1) >> BT_ULSHIFT; -+ int end = (--nbits) >> BT_ULSHIFT; -+ int maxbit; -+ int i, j; -+ -+ /* look for bits before the first whole word */ -+ if (((last+1) & BT_ULMASK) != 0) -+ { -+ maxbit = ((first-1) == last) ? (nbits & BT_ULMASK) : (BT_NBIPUL-1); -+ for (j = ((last+1) & BT_ULMASK); j <= maxbit; j++) -+ if ((bitmap[first-1] & (1 << j)) == (isset << j)) -+ return (((first-1) << BT_ULSHIFT) | j); -+ } -+ -+ /* look for a word with a bit off */ -+ for (i = first; i <= end; i++) -+ if (bitmap[i] != (isset ? 0 : ~((bitmap_t) 0))) -+ break; -+ -+ if (i <= end) -+ { -+ /* found an word with a bit off, now see which bit it is */ -+ maxbit = (i == end) ? (nbits & BT_ULMASK) : (BT_NBIPUL-1); -+ for (j = 0; j <= maxbit; j++) -+ if ((bitmap[i] & (1 << j)) == (isset << j)) -+ return ((i << BT_ULSHIFT) | j); -+ } -+ return (-1); -+} -+ -+void -+bt_copy (bitmap_t *a, bitmap_t *b, int nbits) -+{ -+ int i; -+ -+ for (i = 0; i < (nbits>>BT_ULSHIFT); i++) -+ b[i] = a[i]; -+ -+ for (i <<= BT_ULSHIFT; i < nbits; i++) -+ if (BT_TEST(a, i)) -+ BT_SET(b,i); -+ else -+ BT_CLEAR(b,i); -+} -+ -+void -+bt_zero (bitmap_t *bitmap, int nbits) -+{ -+ int i; -+ -+ for (i = 0; i < (nbits>>BT_ULSHIFT); i++) -+ bitmap[i] = 0; -+ -+ for (i <<= BT_ULSHIFT; i < nbits; i++) -+ BT_CLEAR(bitmap,i); -+} -+ -+void -+bt_fill (bitmap_t *bitmap, int nbits) -+{ -+ int i; -+ -+ for (i = 0; i < (nbits>>BT_ULSHIFT); i++) -+ bitmap[i] = ~((bitmap_t) 0); -+ -+ for (i <<= BT_ULSHIFT; i < nbits; i++) -+ BT_SET(bitmap,i); -+} -+ -+int -+bt_cmp (bitmap_t *a, bitmap_t *b, int nbits) -+{ -+ int i; -+ -+ for (i = 0; i < (nbits>>BT_ULSHIFT); i++) -+ if (a[i] != b[i]) -+ return (1); -+ -+ for (i <<= BT_ULSHIFT; i < nbits; i++) -+ if (BT_TEST (a, i) != BT_TEST(b, i)) -+ return (1); -+ return (0); -+} -+ -+void -+bt_intersect (bitmap_t *a, bitmap_t *b, int nbits) -+{ -+ int i; -+ -+ for (i = 0; i < (nbits>>BT_ULSHIFT); i++) -+ a[i] &= b[i]; -+ -+ for (i <<= BT_ULSHIFT; i < nbits; i++) -+ if (BT_TEST (a, i) && BT_TEST (b, i)) -+ BT_SET (a, i); -+ else -+ BT_CLEAR (a, i); -+} -+ -+void -+bt_remove (bitmap_t *a, bitmap_t *b, int nbits) -+{ -+ int i; -+ -+ for (i = 0; i < (nbits>>BT_ULSHIFT); i++) -+ a[i] &= ~b[i]; -+ -+ for (i <<= BT_ULSHIFT; i < nbits; i++) -+ if (BT_TEST (b, i)) -+ BT_CLEAR (a, i); -+} -+ -+void -+bt_add (bitmap_t *a, bitmap_t *b, int nbits) -+{ -+ int i; -+ -+ for (i = 0; i < (nbits>>BT_ULSHIFT); i++) -+ a[i] |= b[i]; -+ -+ for (i <<= BT_ULSHIFT; i < nbits; i++) -+ if (BT_TEST(b, i)) -+ BT_SET (a, i); -+} -+ -+/* -+ * bt_spans : partition a spans partition b -+ * == all bits set in 'b' are set in 'a' -+ */ -+int -+bt_spans (bitmap_t *a, bitmap_t *b, int nbits) -+{ -+ int i; -+ -+ for (i = 0; i < nbits; i++) -+ if (BT_TEST (b, i) && !BT_TEST (a, i)) -+ return (0); -+ return (1); -+} -+ -+/* -+ * bt_subset: copy [base,base+nbits-1] from 'a' to 'b' -+ */ -+void -+bt_subset (bitmap_t *a, bitmap_t *b, int base, int nbits) -+{ -+ int i; -+ -+ for (i = 0; i < nbits; i++) -+ { -+ if (BT_TEST (a, base+i)) -+ BT_SET(b,i); -+ else -+ BT_CLEAR (b,i); -+ } -+} -+ -+void -+bt_up (bitmap_t *a, bitmap_t *b, bitmap_t *c, int nbits) -+{ -+ int i; -+ -+ for (i = 0; i < nbits; i++) -+ { -+ if (!BT_TEST (a, i) && BT_TEST (b, i)) -+ { -+ BT_SET (c, i); -+ } -+ else -+ { -+ BT_CLEAR (c, i); -+ } -+ } -+} -+ -+void -+bt_down (bitmap_t *a, bitmap_t *b, bitmap_t *c, int nbits) -+{ -+ int i; -+ -+ for (i = 0; i < nbits; i++) -+ { -+ if (BT_TEST (a, i) && !BT_TEST (b, i)) -+ { -+ BT_SET (c, i); -+ } -+ else -+ { -+ BT_CLEAR (c, i); -+ } -+ } -+} -+ -+int -+bt_nbits (bitmap_t *a, int nbits) -+{ -+ int i, c; -+ for (i = 0, c = 0; i < nbits; i++) -+ if (BT_TEST (a, i)) -+ c++; -+ return (c); -+} -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/elan/capability.c linux-2.6.9/drivers/net/qsnet/elan/capability.c ---- clean/drivers/net/qsnet/elan/capability.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan/capability.c 2005-07-21 06:42:36.000000000 -0400 -@@ -0,0 +1,796 @@ -+/* -+ * Copyright (c) 2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: capability.c,v 1.19.2.2 2005/07/21 10:42:36 addy Exp $" -+/* $Source: /cvs/master/quadrics/elanmod/modsrc/capability.c,v $ */ -+ -+ -+#include -+#include -+ -+static LIST_HEAD(elan_cap_list); -+ -+typedef struct elan_vp_struct -+{ -+ struct list_head list; -+ ELAN_CAPABILITY vp; -+} ELAN_VP_NODE_STRUCT; -+ -+/* There is an array of these structs for each process/context in the CAP -+ * This is then replicated for each rail. The usercopy handle stuff is -+ * only maintained in rail 0 though -+ */ -+typedef struct elan_attached_struct -+{ -+ void *cb_args; -+ ELAN_DESTROY_CB cb_func; -+ struct task_struct *handle; /* usercopy: attached task handle */ -+ struct task_struct *owner; /* usercopy: attached task handle owner */ -+} ELAN_ATTACHED_STRUCT; -+ -+typedef struct elan_cap_node_struct -+{ -+ struct list_head list; -+ ELAN_CAP_STRUCT node; -+ ELAN_ATTACHED_STRUCT *attached[ELAN_MAX_RAILS]; -+ struct list_head vp_list; -+} ELAN_CAP_NODE_STRUCT; -+ -+ -+ELAN_CAP_NODE_STRUCT * -+find_cap_node(ELAN_CAPABILITY *cap) -+{ -+ struct list_head *tmp; -+ ELAN_CAP_NODE_STRUCT *ptr=NULL; -+ -+ list_for_each(tmp, &elan_cap_list) { -+ ptr = list_entry(tmp, ELAN_CAP_NODE_STRUCT , list); -+ /* is it an exact match (key not checked) */ -+ if ( ELAN_CAP_TYPE_MATCH(&ptr->node.cap,cap) -+ && ELAN_CAP_GEOM_MATCH(&ptr->node.cap,cap)) { -+ return ptr; -+ } -+ } -+ return ptr; -+} -+ -+ELAN_VP_NODE_STRUCT * -+find_vp_node( ELAN_CAP_NODE_STRUCT *cap_node,ELAN_CAPABILITY *map) -+{ -+ struct list_head * tmp; -+ ELAN_VP_NODE_STRUCT * ptr = NULL; -+ -+ list_for_each(tmp, &cap_node->vp_list) { -+ ptr = list_entry(tmp, ELAN_VP_NODE_STRUCT , list); -+ /* is it an exact match (key not checked) */ -+ if ( ELAN_CAP_TYPE_MATCH(&ptr->vp,map) -+ && ELAN_CAP_GEOM_MATCH(&ptr->vp,map)){ -+ return ptr; -+ } -+ } -+ return ptr; -+} -+ -+int -+elan_validate_cap(ELAN_CAPABILITY *cap) -+{ -+ char space[127]; -+ -+ ELAN_DEBUG1 (ELAN_DBG_VP,"elan_validate_cap %s\n",elan_capability_string(cap,space)); -+ -+ /* check versions */ -+ if (cap->cap_version != ELAN_CAP_VERSION_NUMBER) -+ { -+ ELAN_DEBUG2 (ELAN_DBG_VP,"elan_validate_cap: (cap->Version != ELAN_CAP_VERSION) %d %d\n", cap->cap_version, ELAN_CAP_VERSION_NUMBER); -+ return (EINVAL); -+ } -+ -+ /* check its not HWTEST */ -+ if ( cap->cap_type & ELAN_CAP_TYPE_HWTEST ) -+ { -+ ELAN_DEBUG0 (ELAN_DBG_VP,"elan_validate_cap: failed type = ELAN_CAP_TYPE_HWTEST \n"); -+ return (EINVAL); -+ } -+ -+ /* check its type */ -+ switch (cap->cap_type & ELAN_CAP_TYPE_MASK) -+ { -+ case ELAN_CAP_TYPE_KERNEL : -+ ELAN_DEBUG0 (ELAN_DBG_VP,"elan_validate_cap: failed type = ELAN_CAP_TYPE_KERNEL \n"); -+ return (EINVAL); -+ -+ /* check it has a valid type */ -+ case ELAN_CAP_TYPE_BLOCK: -+ case ELAN_CAP_TYPE_CYCLIC: -+ break; -+ -+ /* all others are failed as well */ -+ default: -+ ELAN_DEBUG1 (ELAN_DBG_VP,"elan_validate_cap: failed unknown type = %x \n", (cap->cap_type & ELAN_CAP_TYPE_MASK)); -+ return (EINVAL); -+ } -+ -+ if ((cap->cap_lowcontext == ELAN_CAP_UNINITIALISED) || (cap->cap_highcontext == ELAN_CAP_UNINITIALISED) -+ || (cap->cap_lownode == ELAN_CAP_UNINITIALISED) || (cap->cap_highnode == ELAN_CAP_UNINITIALISED)) -+ { -+ -+ ELAN_DEBUG4 (ELAN_DBG_VP,"elan_validate_cap: ELAN_CAP_UNINITIALISED LowNode %d HighNode %d LowContext %d highContext %d\n", -+ cap->cap_lownode , cap->cap_highnode, -+ cap->cap_lowcontext , cap->cap_highcontext); -+ return (EINVAL); -+ } -+ -+ if (cap->cap_lowcontext > cap->cap_highcontext) -+ { -+ ELAN_DEBUG2 (ELAN_DBG_VP,"elan_validate_cap: (cap->cap_lowcontext > cap->cap_highcontext) %d %d\n",cap->cap_lowcontext , cap->cap_highcontext); -+ return (EINVAL); -+ } -+ -+ if (cap->cap_lownode > cap->cap_highnode) -+ { -+ ELAN_DEBUG2 (ELAN_DBG_VP,"elan_validate_cap: (cap->cap_lownode > cap->cap_highnode) %d %d\n",cap->cap_lownode, cap->cap_highnode); -+ return (EINVAL); -+ } -+ -+ if (cap->cap_mycontext != ELAN_CAP_UNINITIALISED) -+ { -+ ELAN_DEBUG1 (ELAN_DBG_VP,"elan_validate_cap: failed cap->cap_mycontext is set %d \n", cap->cap_mycontext); -+ return (EINVAL); -+ } -+ -+ -+ if ((ELAN_CAP_NUM_NODES(cap) * ELAN_CAP_NUM_CONTEXTS(cap)) > ELAN_MAX_VPS) -+ { -+ ELAN_DEBUG6 (ELAN_DBG_VP,"elan_validate_cap: too many vps LowNode %d HighNode %d LowContext %d highContext %d, %d >% d\n", -+ cap->cap_lownode , cap->cap_highnode, -+ cap->cap_lowcontext , cap->cap_highcontext, -+ (ELAN_CAP_NUM_NODES(cap) * ELAN_CAP_NUM_CONTEXTS(cap)), -+ ELAN_MAX_VPS); -+ -+ return (EINVAL); -+ } -+ -+ return (ESUCCESS); -+} -+ -+int -+elan_validate_map(ELAN_CAPABILITY *cap, ELAN_CAPABILITY *map) -+{ -+ ELAN_CAP_NODE_STRUCT * ptr = NULL; -+ ELAN_VP_NODE_STRUCT * vptr = NULL; -+ char space[256]; -+ -+ ELANMOD_RWLOCK_READ(&elan_rwlock); -+ -+ ELAN_DEBUG0 (ELAN_DBG_VP,"elan_validate_map \n"); -+ ELAN_DEBUG1 (ELAN_DBG_VP,"elan_validate_map cap = %s \n",elan_capability_string(cap,space)); -+ ELAN_DEBUG1 (ELAN_DBG_VP,"elan_validate_map map = %s \n",elan_capability_string(map,space)); -+ -+ /* does cap exist */ -+ ptr = find_cap_node(cap); -+ if ( ptr == NULL ) -+ { -+ ELAN_DEBUG0 (ELAN_DBG_VP,"elan_validate_map: cap not found \n"); -+ ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock); -+ return EINVAL; -+ } -+ /* is it active */ -+ if ( ! ptr->node.active ) -+ { -+ ELAN_DEBUG0 (ELAN_DBG_VP,"elan_validate_map: cap not active \n"); -+ ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock); -+ return EINVAL; -+ } -+ -+ /* are they the same */ -+ if ( ELAN_CAP_TYPE_MATCH(cap,map) -+ && ELAN_CAP_GEOM_MATCH(cap,map)) -+ { -+ ELAN_DEBUG0 (ELAN_DBG_VP,"elan_validate_map: cap == map passed\n"); -+ ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock); -+ return ESUCCESS; -+ } -+ -+ /* is map in map list */ -+ vptr = find_vp_node(ptr, map); -+ if ( vptr == NULL ) -+ { -+ ELAN_DEBUG0 (ELAN_DBG_VP,"elan_validate_map: map not found\n"); -+ ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock); -+ return EINVAL; -+ } -+ -+ ELAN_DEBUG0 (ELAN_DBG_VP,"elan_validate_map: map passed\n"); -+ ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock); -+ return ESUCCESS; -+} -+ -+int -+elan_create_cap(ELAN_CAP_OWNER owner, ELAN_CAPABILITY *cap) -+{ -+ char space[127]; -+ struct list_head * tmp; -+ ELAN_CAP_NODE_STRUCT * ptr = NULL; -+ int i, rail; -+ -+ ELANMOD_RWLOCK_WRITE(&elan_rwlock); -+ -+ ELAN_DEBUG1 (ELAN_DBG_VP,"elan_create_cap %s\n",elan_capability_string(cap,space)); -+ -+ /* need to check that the cap does not over lap another one -+ or is an exact match with only the userkey changing */ -+ list_for_each(tmp, &elan_cap_list) { -+ ptr = list_entry(tmp, ELAN_CAP_NODE_STRUCT , list); -+ -+ /* is it an exact match (key not checked) */ -+ if ( ELAN_CAP_TYPE_MATCH(&ptr->node.cap,cap) -+ && ELAN_CAP_GEOM_MATCH(&ptr->node.cap,cap) -+ && (ptr->node.owner == owner)) { -+ if ( ptr->node.active ) { -+ /* dont inc attached count as its like a create */ -+ ptr->node.cap.cap_userkey = cap->cap_userkey; -+ ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock); -+ return ESUCCESS; -+ } -+ else -+ { -+ ELAN_DEBUG (ELAN_DBG_VP,"elan_create_cap failed %s\n", -+ elan_capability_string(&ptr->node.cap,space)); -+ ELAN_DEBUG (ELAN_DBG_VP,"elan_create_cap failed ptr %p owner %p attached %d\n", -+ ptr, owner, ptr->node.attached); -+ -+ ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock); -+ return EINVAL; -+ } -+ } -+ -+ /* does it overlap, even with ones being destroyed */ -+ if (elan_cap_overlap(&ptr->node.cap,cap)) -+ { -+ ELAN_DEBUG (ELAN_DBG_VP,"elan_create_cap failed overlap %s\n", -+ elan_capability_string(&ptr->node.cap,space)); -+ ELAN_DEBUG (ELAN_DBG_VP,"elan_create_cap failed overlap ptr %p owner %p attached %d active %d\n", -+ ptr, owner, ptr->node.attached, ptr->node.active); -+ -+ ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock); -+ return EACCES; -+ } -+ } -+ -+ /* create it */ -+ KMEM_ALLOC(ptr, ELAN_CAP_NODE_STRUCT *, sizeof(ELAN_CAP_NODE_STRUCT), 1); -+ if (ptr == NULL) -+ { -+ ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock); -+ return ENOMEM; -+ } -+ -+ /* create per rail space for the attached array */ -+ for(rail=0;railattached[rail]=NULL; -+ /* GNAT 7685: Always need to allocate an attached structure in rail 0 for the usercopy device */ -+ if ( ELAN_CAP_IS_RAIL_SET(cap,rail) || rail == 0 ) -+ { -+ KMEM_ALLOC(ptr->attached[rail], ELAN_ATTACHED_STRUCT *, sizeof(ELAN_ATTACHED_STRUCT) * ELAN_CAP_NUM_CONTEXTS(cap), 1); -+ if (ptr->attached[rail] == NULL) -+ { -+ for(;rail>=0;rail--) -+ if ( ptr->attached[rail] ) -+ KMEM_FREE(ptr->attached[rail], sizeof(ELAN_ATTACHED_STRUCT) * ELAN_CAP_NUM_CONTEXTS(cap)); -+ -+ KMEM_FREE(ptr, sizeof(ELAN_CAP_NODE_STRUCT)); -+ ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock); -+ return ENOMEM; -+ } -+ /* blank the per context attached array */ -+ for(i=0;iattached[rail][i].cb_func = NULL; -+ /* user-to-user copy */ -+ ptr->attached[rail][i].handle = NULL; -+ ptr->attached[rail][i].owner = NULL; -+ } -+ } -+ } -+ -+ ptr->node.owner = owner; -+ ptr->node.cap = *cap; -+ ptr->node.attached = 1; /* creator counts as attached */ -+ ptr->node.active = 1; -+ ptr->vp_list.next = &(ptr->vp_list); -+ ptr->vp_list.prev = &(ptr->vp_list); -+ -+ list_add_tail(&ptr->list, &elan_cap_list); -+ -+ ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock); -+ return ESUCCESS; -+} -+ -+void -+elan_destroy_cap_test(ELAN_CAP_NODE_STRUCT *cap_ptr) -+{ -+ /* called by someone holding the mutex */ -+ struct list_head * vp_tmp; -+ ELAN_VP_NODE_STRUCT * vp_ptr = NULL; -+ int rail; -+ -+ ASSERT(cap_ptr->node.attached >= 0); -+ -+ /* check to see if it can be deleted now */ -+ if ( cap_ptr->node.attached == 0 ) { -+ -+ ELAN_DEBUG1(ELAN_DBG_CAP,"elan_destroy_cap_test: %p attached == 0\n", cap_ptr); -+ -+ /* delete the vp list */ -+ list_for_each(vp_tmp, &(cap_ptr->vp_list)) { -+ vp_ptr = list_entry(vp_tmp, ELAN_VP_NODE_STRUCT , list); -+ list_del(&vp_ptr->list); -+ KMEM_FREE( vp_ptr, sizeof(ELAN_VP_NODE_STRUCT)); -+ } -+ -+ list_del(&cap_ptr->list); -+ -+ /* delete space for the attached array */ -+ for(rail=0;railattached[rail]) -+ KMEM_FREE(cap_ptr->attached[rail], sizeof(ELAN_ATTACHED_STRUCT) * ELAN_CAP_NUM_CONTEXTS(&(cap_ptr->node.cap))); -+ -+ KMEM_FREE(cap_ptr, sizeof(ELAN_CAP_NODE_STRUCT)); -+ } -+ else -+ ELAN_DEBUG2(ELAN_DBG_CAP,"elan_destroy_cap_test: %p attached = %d\n", -+ cap_ptr, cap_ptr->node.attached); -+ -+} -+ -+int -+elan_destroy_cap(ELAN_CAP_OWNER owner, ELAN_CAPABILITY *cap) -+{ -+ char space[127]; -+ struct list_head * el; -+ struct list_head * nel; -+ ELAN_CAP_NODE_STRUCT * ptr = NULL; -+ int i, rail; -+ int found = 0; -+ -+ ELANMOD_RWLOCK_WRITE(&elan_rwlock); -+ -+ ELAN_DEBUG1 (ELAN_DBG_CAP,"elan_destroy_cap %s\n",elan_capability_string(cap,space)); -+ -+ list_for_each_safe (el, nel, &elan_cap_list) { -+ ptr = list_entry(el, ELAN_CAP_NODE_STRUCT , list); -+ -+ /* is it an exact match */ -+ if ( (ptr->node.owner == owner ) -+ && ( (cap == NULL) -+ || (ELAN_CAP_TYPE_MATCH(&ptr->node.cap,cap) && ELAN_CAP_GEOM_MATCH(&ptr->node.cap,cap)))) { -+ -+ if ( ptr->node.active ) { -+ -+ /* mark as in active and dec attached count */ -+ ptr->node.active = 0; -+ ptr->node.attached--; -+ ptr->node.owner = 0; /* no one own's it now */ -+ -+ ASSERT(ptr->node.attached >= 0); -+ -+ /* need to tell any one who was attached that this has been destroy'd */ -+ for(rail=0;railnode.cap), rail)) { -+ for(i=0;i< ELAN_CAP_NUM_CONTEXTS(&(ptr->node.cap));i++) -+ if ( ptr->attached[rail][i].cb_func != NULL) -+ ptr->attached[rail][i].cb_func(ptr->attached[rail][i].cb_args, cap, NULL); -+ } -+ -+ /* now try to destroy it */ -+ elan_destroy_cap_test(ptr); -+ -+ /* found it */ -+ found = 1; -+ } -+ } -+ } -+ -+ if ( found ) -+ { -+ ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock); -+ return ESUCCESS; -+ } -+ -+ /* failed */ -+ ELAN_DEBUG1(ELAN_DBG_CAP,"elan_destroy_cap: %p didnt find it \n", cap); -+ -+ ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock); -+ return EINVAL; -+} -+ -+int -+elan_get_caps(uint *number_of_results, uint array_size, ELAN_CAP_STRUCT *caps) -+{ -+ uint results = 0; -+ struct list_head * tmp; -+ ELAN_CAP_NODE_STRUCT * ptr = NULL; -+ -+ -+ ELANMOD_RWLOCK_READ(&elan_rwlock); -+ -+ ELAN_DEBUG0(ELAN_DBG_CAP,"elan_get_caps\n"); -+ -+ list_for_each(tmp, &elan_cap_list) { -+ ptr = list_entry(tmp, ELAN_CAP_NODE_STRUCT , list); -+ -+ copyout(&ptr->node, &caps[results], sizeof (ELAN_CAP_STRUCT)); -+ -+ results++; -+ -+ if ( results >= array_size ) -+ { -+ copyout(&results, number_of_results, sizeof(uint)); -+ ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock); -+ return ESUCCESS; -+ } -+ } -+ -+ copyout(&results, number_of_results, sizeof(uint)); -+ -+ ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock); -+ return ESUCCESS; -+} -+ -+int -+elan_create_vp(ELAN_CAP_OWNER owner, ELAN_CAPABILITY *cap, ELAN_CAPABILITY *map) -+{ -+ ELAN_CAP_NODE_STRUCT * cap_ptr = NULL; -+ ELAN_VP_NODE_STRUCT * vp_ptr = NULL; -+ -+ ELANMOD_RWLOCK_WRITE(&elan_rwlock); -+ -+ -+ ELAN_DEBUG0(ELAN_DBG_CAP,"elan_create_vp\n"); -+ -+ /* the railmasks must match */ -+ if ( cap->cap_railmask != map->cap_railmask) -+ { -+ ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock); -+ return EINVAL; -+ } -+ -+ /* does the cap exist */ -+ cap_ptr = find_cap_node(cap); -+ if ((cap_ptr == NULL) || ( cap_ptr->node.owner != owner ) || (! cap_ptr->node.active) ) -+ { -+ ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock); -+ return EINVAL; -+ } -+ -+ /* is there already a mapping */ -+ vp_ptr = find_vp_node(cap_ptr,map); -+ if ( vp_ptr != NULL) -+ { -+ ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock); -+ return EINVAL; -+ } -+ -+ /* create space for mapping */ -+ KMEM_ALLOC(vp_ptr, ELAN_VP_NODE_STRUCT *, sizeof(ELAN_VP_NODE_STRUCT), 1); -+ if (vp_ptr == NULL) -+ { -+ ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock); -+ return ENOMEM; -+ } -+ -+ /* copy map */ -+ vp_ptr->vp = *map; -+ list_add_tail(&vp_ptr->list, &(cap_ptr->vp_list)); -+ ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock); -+ return ESUCCESS; -+} -+ -+int -+elan_destroy_vp(ELAN_CAP_OWNER owner, ELAN_CAPABILITY *cap, ELAN_CAPABILITY *map) -+{ -+ ELAN_CAP_NODE_STRUCT * cap_ptr = NULL; -+ ELAN_VP_NODE_STRUCT * vp_ptr = NULL; -+ int i, rail; -+ -+ ELANMOD_RWLOCK_WRITE(&elan_rwlock); -+ -+ ELAN_DEBUG0(ELAN_DBG_CAP,"elan_destroy_vp\n"); -+ -+ cap_ptr = find_cap_node(cap); -+ if ((cap_ptr!=NULL) && (cap_ptr->node.owner == owner) && ( cap_ptr->node.active)) -+ { -+ vp_ptr = find_vp_node( cap_ptr, map ); -+ if ( vp_ptr != NULL ) -+ { -+ list_del(&vp_ptr->list); -+ KMEM_FREE(vp_ptr, sizeof(ELAN_VP_NODE_STRUCT)); -+ -+ /* need to tell those who are attached that map is nolonger in use */ -+ for(rail=0;railnode.cap));i++) -+ if ( cap_ptr->attached[rail][i].cb_func != NULL) -+ cap_ptr->attached[rail][i].cb_func( cap_ptr->attached[rail][i].cb_args, cap, map); -+ } -+ -+ ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock); -+ return ESUCCESS; -+ } -+ } -+ -+ /* didnt find it */ -+ ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock); -+ return EINVAL; -+} -+ -+int -+elan_attach_cap(ELAN_CAPABILITY *cap, unsigned int rail, void *args, ELAN_DESTROY_CB func) -+{ -+ char space[127]; -+ struct list_head *el; -+ -+ ELAN_DEBUG1 (ELAN_DBG_CAP,"elan_attach_cap %s\n",elan_capability_string(cap,space)); -+ -+ /* currently must provide a call back, as null mean something */ -+ if ( func == NULL) -+ return (EINVAL); -+ -+ /* mycontext must be set and correct */ -+ if ( ! ELAN_CAP_VALID_MYCONTEXT(cap)) -+ return (EINVAL); -+ -+ /* rail must be one of the rails in railmask */ -+ if (((1 << rail) & cap->cap_railmask) == 0) -+ return (EINVAL); -+ -+ ELANMOD_RWLOCK_WRITE(&elan_rwlock); -+ -+ list_for_each(el, &elan_cap_list) { -+ ELAN_CAP_NODE_STRUCT *cap_ptr = list_entry(el, ELAN_CAP_NODE_STRUCT , list); -+ -+ /* is it an exact match */ -+ if (ELAN_CAP_MATCH(&cap_ptr->node.cap,cap) && cap_ptr->node.active) { -+ unsigned int attached_index = cap->cap_mycontext - cap->cap_lowcontext; -+ -+ if ( cap_ptr->attached[rail][attached_index].cb_func != NULL ) /* only one per ctx per rail */ -+ { -+ ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock); -+ return EBUSY; -+ } -+ -+ /* keep track of who attached as we might need to tell them when */ -+ /* cap or maps get destroyed */ -+ cap_ptr->attached[rail][ attached_index ].cb_func = func; -+ cap_ptr->attached[rail][ attached_index ].cb_args = args; -+ cap_ptr->node.attached++; -+ -+ ELAN_DEBUG0(ELAN_DBG_CAP,"elan_attach_cap: passed\n"); -+ ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock); -+ return ESUCCESS; -+ } -+ } -+ -+ ELAN_DEBUG0(ELAN_DBG_CAP,"elan_attach_cap: failed to find \n"); -+ -+ /* didnt find one */ -+ ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock); -+ return EINVAL; -+} -+ -+int -+elan_detach_cap(ELAN_CAPABILITY *cap, unsigned int rail) -+{ -+ struct list_head *el, *nel; -+ char space[256]; -+ -+ ELANMOD_RWLOCK_WRITE(&elan_rwlock); -+ -+ ELAN_DEBUG1(ELAN_DBG_CAP,"elan_detach_cap %s\n",elan_capability_string(cap,space)); -+ list_for_each_safe (el, nel, &elan_cap_list) { -+ ELAN_CAP_NODE_STRUCT *ptr = list_entry (el, ELAN_CAP_NODE_STRUCT, list); -+ -+ /* is it an exact match (key not checked) */ -+ if (ELAN_CAP_TYPE_MATCH(&ptr->node.cap,cap) && -+ ELAN_CAP_GEOM_MATCH(&ptr->node.cap,cap) && -+ (ptr->node.cap.cap_railmask & cap->cap_railmask) == cap->cap_railmask) { -+ -+ unsigned int attached_index = cap->cap_mycontext - cap->cap_lowcontext; -+ -+ if ( ptr->attached[rail][ attached_index ].cb_func == NULL ) { -+ ELAN_DEBUG0(ELAN_DBG_CAP,"elanmod_detach_cap already removed \n"); -+ -+ ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock); -+ return ESUCCESS; -+ } -+ -+ ptr->attached[rail][ attached_index ].cb_func = NULL; -+ ptr->attached[rail][ attached_index ].cb_args = (void *)0; -+ -+ ptr->node.attached--; -+ -+ ASSERT(ptr->node.attached >= 0); -+ -+ ELAN_DEBUG1(ELAN_DBG_CAP,"elanmod_detach_cap new attach count %d \n", ptr->node.attached); -+ -+ elan_destroy_cap_test(ptr); -+ -+ ELAN_DEBUG0(ELAN_DBG_CAP,"elan_detach_cap: success\n"); -+ -+ ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock); -+ return ESUCCESS; -+ } -+ } -+ -+ ELAN_DEBUG0(ELAN_DBG_CAP,"elan_detach_cap: failed to find\n"); -+ ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock); -+ return EINVAL; -+} -+ -+int -+elan_cap_dump() -+{ -+ struct list_head * tmp; -+ ELAN_CAP_NODE_STRUCT * ptr = NULL; -+ -+ ELANMOD_RWLOCK_READ(&elan_rwlock); -+ -+ list_for_each(tmp, &elan_cap_list) { -+ ptr = list_entry(tmp, ELAN_CAP_NODE_STRUCT , list); -+ -+ ELAN_DEBUG2 (ELAN_DBG_ALL, "cap dump: owner %p type %x\n", ptr->node.owner, ptr->node.cap.cap_type); -+ -+ ELAN_DEBUG5 (ELAN_DBG_ALL, "cap dump: LowNode %d HighNode %d LowContext %d mycontext %d highContext %d\n", -+ ptr->node.cap.cap_lownode , ptr->node.cap.cap_highnode, -+ ptr->node.cap.cap_lowcontext , ptr->node.cap.cap_mycontext, ptr->node.cap.cap_highcontext); -+ -+ } -+ -+ ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock); -+ return ESUCCESS; -+} -+ -+int -+elan_usercopy_attach(ELAN_CAPABILITY *cap, ELAN_CAP_NODE_STRUCT **node_ptr, void *handle, void *owner) -+{ -+ struct list_head *el; -+ -+ /* mycontext must be set and correct */ -+ if ( ! ELAN_CAP_VALID_MYCONTEXT(cap)) -+ return -EINVAL; -+ -+ ELANMOD_RWLOCK_WRITE(&elan_rwlock); -+ -+ /* Search all cap node structs looking for an exact match (including key) */ -+ list_for_each(el, &elan_cap_list) { -+ ELAN_CAP_NODE_STRUCT *cap_ptr = list_entry(el, ELAN_CAP_NODE_STRUCT , list); -+ -+ /* is it an exact match */ -+ if (ELAN_CAP_MATCH(&cap_ptr->node.cap,cap) && cap_ptr->node.active) { -+ char space[127]; -+ /* Work out which local process index we are */ -+ unsigned int attached_index = cap->cap_mycontext - cap->cap_lowcontext; -+ -+ ELAN_DEBUG(ELAN_DBG_CAP, "usercopy_attach: %s\n", -+ elan_capability_string(cap,space)); -+ -+ ELAN_DEBUG(ELAN_DBG_CAP, -+ "usercopy_attach: cap_ptr %p handle %p owner %p idx %d\n", -+ cap_ptr, handle, owner, attached_index); -+ -+ /* Check we're not being called multiple times for the same local process */ -+ if (cap_ptr->attached[0][attached_index].handle) -+ { -+ ELAN_DEBUG(ELAN_DBG_CAP, -+ "usercopy_attach: cap_ptr %p idx %d already attached handle %p owner %p\n", -+ cap_ptr, attached_index, -+ cap_ptr->attached[0][attached_index].handle, -+ cap_ptr->attached[0][attached_index].owner); -+ -+ ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock); -+ return -EAGAIN; -+ } -+ -+ /* Reference count node struct */ -+ cap_ptr->node.attached++; -+ -+ /* Stash our task handle/owner off the cap node array */ -+ cap_ptr->attached[0][attached_index].handle = handle; -+ cap_ptr->attached[0][attached_index].owner = owner; -+ -+ ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock); -+ -+ /* Return node pointer to caller */ -+ *node_ptr = cap_ptr; -+ -+ return ESUCCESS; -+ } -+ } -+ -+ /* failed to match a cap */ -+ ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock); -+ return -EINVAL; -+} -+ -+int -+elan_usercopy_detach(ELAN_CAP_NODE_STRUCT *cap_ptr, void *owner) -+{ -+ int i; -+ -+ /* NB: The usercopy code holds a read lock on this rwlock and -+ * hence we will block here if exit_fs() gets called during a -+ * copy to this process -+ */ -+ ELANMOD_RWLOCK_WRITE(&elan_rwlock); -+ -+ /* Find this process in the attached task handle/owner array */ -+ for(i=0; i< ELAN_CAP_NUM_CONTEXTS((&cap_ptr->node.cap)); i++) -+ { -+ if (cap_ptr->attached[0][i].owner == owner) -+ { -+ ELAN_DEBUG(ELAN_DBG_CAP, -+ "usercopy_detach: cap_ptr %p handle %p owner %p id %d\n", -+ cap_ptr, cap_ptr->attached[0][i].handle, owner, i); -+ -+ /* Clear our task handle/owner off the cap node array */ -+ cap_ptr->attached[0][i].handle = NULL; -+ cap_ptr->attached[0][i].owner = NULL; -+ -+ /* Reference count node struct */ -+ cap_ptr->node.attached--; -+ -+ ASSERT(cap_ptr->node.attached >= 0); -+ -+ /* May need to destroy cap if reference count has hit zero */ -+ elan_destroy_cap_test(cap_ptr); -+ -+ ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock); -+ -+ return ESUCCESS; -+ } -+ } -+ -+ ELAN_DEBUG(ELAN_DBG_CAP, "usercopy_detach: cap_ptr %p[%d] failed owner %p\n", -+ cap_ptr, cap_ptr->node.attached, owner); -+ -+ ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock); -+ -+ return -EINVAL; -+} -+ -+/* Returns the associated handle for the supplied ctxId process in the cap node */ -+/* Should be called holding a read lock on the elan_rwlock */ -+int -+elan_usercopy_handle(ELAN_CAP_NODE_STRUCT *cap_ptr, int ctxId, void **handlep) -+{ -+ int res = ESUCCESS; -+ void *handle; -+ -+ /* Sanity check argument */ -+ if (ctxId < 0 || ctxId >= ELAN_CAP_NUM_CONTEXTS(&(cap_ptr->node.cap))) -+ return -EINVAL; -+ -+// ELANMOD_RWLOCK_READ(&elan_rwlock); -+ -+ /* Get the task handle for the remote process */ -+ if ((handle = cap_ptr->attached[0][ctxId].handle) == NULL) -+ res = -EAGAIN; -+ -+// ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock); -+ -+ *handlep = handle; -+ -+ return res; -+} -+ -+/* -+ * Local variables: -+ * c-file-style: "linux" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/elan/capability_general.c linux-2.6.9/drivers/net/qsnet/elan/capability_general.c ---- clean/drivers/net/qsnet/elan/capability_general.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan/capability_general.c 2004-02-25 08:47:59.000000000 -0500 -@@ -0,0 +1,446 @@ -+/* -+ * Copyright (c) 2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: capability_general.c,v 1.10 2004/02/25 13:47:59 daniel Exp $" -+/* $Source: /cvs/master/quadrics/elanmod/shared/capability_general.c,v $ */ -+ -+#if defined(__KERNEL__) -+ -+#include -+ -+#else -+ -+#include -+#include -+#include -+ -+#endif -+ -+#include -+ -+ -+void -+elan_nullcap (ELAN_CAPABILITY *cap) -+{ -+ register int i; -+ -+ for (i = 0; i < sizeof (cap->cap_userkey)/sizeof(cap->cap_userkey.key_values[0]); i++) -+ cap->cap_userkey.key_values[i] = ELAN_CAP_UNINITIALISED; -+ -+ cap->cap_lowcontext = ELAN_CAP_UNINITIALISED; -+ cap->cap_highcontext = ELAN_CAP_UNINITIALISED; -+ cap->cap_mycontext = ELAN_CAP_UNINITIALISED; -+ cap->cap_lownode = ELAN_CAP_UNINITIALISED; -+ cap->cap_highnode = ELAN_CAP_UNINITIALISED; -+ cap->cap_railmask = ELAN_CAP_UNINITIALISED; -+ cap->cap_type = ELAN_CAP_UNINITIALISED; -+ cap->cap_spare = 0; -+ cap->cap_version = ELAN_CAP_VERSION_NUMBER; -+ -+ for (i = 0; i < sizeof (cap->cap_bitmap)/sizeof (cap->cap_bitmap[0]); i++) -+ cap->cap_bitmap[i] = 0; -+} -+ -+char * -+elan_capability_string (ELAN_CAPABILITY *cap, char *str) -+{ -+ if (cap == NULL) -+ sprintf (str, "[-.-.-.-] cap = NULL\n"); -+ else -+ sprintf (str, "[%x.%x.%x.%x] Version %x Type %x \n" -+ "Context %x.%x.%x Node %x.%x\n", -+ cap->cap_userkey.key_values[0], cap->cap_userkey.key_values[1], -+ cap->cap_userkey.key_values[2], cap->cap_userkey.key_values[3], -+ cap->cap_version, cap->cap_type, -+ cap->cap_lowcontext, cap->cap_mycontext, cap->cap_highcontext, -+ cap->cap_lownode, cap->cap_highnode); -+ -+ return (str); -+} -+ -+ELAN_LOCATION -+elan_vp2location (u_int process, ELAN_CAPABILITY *cap) -+{ -+ ELAN_LOCATION location; -+ int i, vp, node, context, nnodes, nctxs; -+ -+ vp = 0; -+ -+ location.loc_node = ELAN_INVALID_NODE; -+ location.loc_context = -1; -+ -+ nnodes = cap->cap_highnode - cap->cap_lownode + 1; -+ nctxs = cap->cap_highcontext - cap->cap_lowcontext + 1; -+ -+ switch (cap->cap_type & ELAN_CAP_TYPE_MASK) -+ { -+ case ELAN_CAP_TYPE_BLOCK: -+ for (node = 0, i = 0; node < nnodes; node++) -+ { -+ for (context = 0; context < nctxs; context++) -+ { -+ if ((cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (cap->cap_bitmap, context + (node * nctxs))) -+ { -+ if (vp == process) -+ { -+ /* Return relative indices within the capability box */ -+ location.loc_node = node; -+ location.loc_context = context; -+ -+ return (location); -+ } -+ -+ vp++; -+ } -+ } -+ } -+ break; -+ -+ case ELAN_CAP_TYPE_CYCLIC: -+ for (context = 0, i = 0; context < nctxs; context++) -+ { -+ for (node = 0; node < nnodes; node++) -+ { -+ if ((cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (cap->cap_bitmap, node + (context * nnodes))) -+ { -+ if (vp == process) -+ { -+ location.loc_node = node; -+ location.loc_context = context; -+ -+ return (location); -+ } -+ -+ vp++; -+ } -+ } -+ } -+ break; -+ } -+ -+ return( location ); -+} -+ -+int -+elan_location2vp (ELAN_LOCATION location, ELAN_CAPABILITY *cap) -+{ -+ int vp, node, context, nnodes, nctxs; -+ -+ nnodes = cap->cap_highnode - cap->cap_lownode + 1; -+ nctxs = cap->cap_highcontext - cap->cap_lowcontext + 1; -+ -+ vp = 0; -+ -+ switch (cap->cap_type & ELAN_CAP_TYPE_MASK) -+ { -+ case ELAN_CAP_TYPE_BLOCK: -+ for (node = 0 ; node < nnodes ; node++) -+ { -+ for (context = 0; context < nctxs; context++) -+ { -+ if ((cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (cap->cap_bitmap, context + (node * nctxs))) -+ { -+ if ((location.loc_node == node) && (location.loc_context == context)) -+ { -+ /* Found it ! */ -+ return( vp ); -+ } -+ -+ vp++; -+ } -+ } -+ } -+ break; -+ -+ case ELAN_CAP_TYPE_CYCLIC: -+ for (context = 0; context < nctxs; context++) -+ { -+ for (node = 0; node < nnodes; node++) -+ { -+ if ((cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (cap->cap_bitmap, node + (context * nnodes))) -+ { -+ if ((location.loc_node == node) && (location.loc_context == context)) -+ { -+ /* Found it ! */ -+ return( vp ); -+ } -+ -+ vp++; -+ } -+ } -+ } -+ break; -+ } -+ -+ /* Failed to find it */ -+ return( -1 ); -+} -+ -+/* Return the number of processes as described by a capability */ -+int -+elan_nvps (ELAN_CAPABILITY *cap) -+{ -+ int i, c, nbits = ELAN_CAP_BITMAPSIZE(cap); -+ -+ if (cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) -+ return (nbits); -+ -+ for (i = 0, c = 0; i < nbits; i++) -+ if (BT_TEST (cap->cap_bitmap, i)) -+ c++; -+ -+ return (c); -+} -+ -+/* Return the number of local processes on a given node as described by a capability */ -+int -+elan_nlocal (int node, ELAN_CAPABILITY *cap) -+{ -+ int vp; -+ ELAN_LOCATION loc; -+ int nLocal = 0; -+ -+ for (vp = 0; vp < elan_nvps(cap); vp++) -+ { -+ loc = elan_vp2location(vp, cap); -+ if (loc.loc_node == node) -+ nLocal++; -+ } -+ -+ return (nLocal); -+} -+ -+/* Return the maximum number of local processes on any node as described by a capability */ -+int -+elan_maxlocal (ELAN_CAPABILITY *cap) -+{ -+ return(cap->cap_highcontext - cap->cap_lowcontext + 1); -+} -+ -+/* Return the vps of the local processes on a given node as described by a capability */ -+int -+elan_localvps (int node, ELAN_CAPABILITY *cap, int *vps, int size) -+{ -+ int context; -+ ELAN_LOCATION loc; -+ int nLocal = 0; -+ -+ loc.loc_node = node; -+ -+ for (context = 0; context < MIN(size, elan_maxlocal(cap)); context++) -+ { -+ loc.loc_context = context; -+ -+ /* Should return -1 if none found */ -+ if ( (vps[context] = elan_location2vp( loc, cap )) != -1) -+ nLocal++; -+ } -+ -+ return (nLocal); -+} -+ -+/* Return the number of rails that this capability utilises */ -+int -+elan_nrails (ELAN_CAPABILITY *cap) -+{ -+ int nrails = 0; -+ unsigned int railmask; -+ -+ /* Test for a multi-rail capability */ -+ if (cap->cap_type & ELAN_CAP_TYPE_MULTI_RAIL) -+ { -+ /* Grab rail bitmask from capability */ -+ railmask = cap->cap_railmask; -+ -+ while (railmask) -+ { -+ if (railmask & 1) -+ nrails++; -+ -+ railmask >>= 1; -+ } -+ } -+ else -+ /* Default to just one rail */ -+ nrails = 1; -+ -+ return (nrails); -+} -+ -+/* Fill out an array giving the physical rail numbers utilised by a capability */ -+int -+elan_rails (ELAN_CAPABILITY *cap, int *rails) -+{ -+ int nrails, rail; -+ unsigned int railmask; -+ -+ /* Test for a multi-rail capability */ -+ if (cap->cap_type & ELAN_CAP_TYPE_MULTI_RAIL) -+ { -+ /* Grab rail bitmask from capability */ -+ railmask = cap->cap_railmask; -+ -+ nrails = rail = 0; -+ while (railmask) -+ { -+ if (railmask & 1) -+ rails[nrails++] = rail; -+ -+ rail++; -+ railmask >>= 1; -+ } -+ } -+ else -+ { -+ /* Default to just one rail */ -+ rails[0] = 0; -+ nrails = 1; -+ } -+ -+ return( nrails ); -+} -+ -+int -+elan_cap_overlap(ELAN_CAPABILITY *cap1, ELAN_CAPABILITY *cap2) -+{ -+ /* by context */ -+ if ( cap1->cap_highcontext < cap2->cap_lowcontext ) return (0); -+ if ( cap1->cap_lowcontext > cap2->cap_highcontext) return (0); -+ -+ /* by node */ -+ if ( cap1->cap_highnode < cap2->cap_lownode ) return (0); -+ if ( cap1->cap_lownode > cap2->cap_highnode) return (0); -+ -+ /* by rail */ -+ /* they overlap if they have a rail in common */ -+ return (cap1->cap_railmask & cap2->cap_railmask); -+} -+ -+#if !defined(__KERNEL__) -+ -+/* Fill out an array that hints at the best use of the rails on a -+ * per process basis. The library user can then decide whether or not -+ * to take this into account (e.g. TPORTs) -+ * All processes calling this fn will be returned the same information. -+ */ -+int -+elan_prefrails(ELAN_CAPABILITY *cap, int *pref, int nvp) -+{ -+ int i; -+ int nrails = elan_nrails(cap); -+ int maxlocal = elan_maxlocal(cap); -+ -+ /* Test for a multi-rail capability */ -+ if (! (cap->cap_type & ELAN_CAP_TYPE_MULTI_RAIL)) -+ { -+ /* Default to just one rail */ -+ for (i = 0; i < nvp; i++) -+ pref[i] = 0; -+ -+ return( 0 ); -+ } -+ -+ /* -+ * We allocate rails on a per node basis sharing our the rails -+ * equally amongst the local processes. However, if there is only -+ * one process per node and multiple rails, then we use a different -+ * algorithm where rails are allocated across all the processes in -+ * a round-robin fashion -+ */ -+ -+ if (maxlocal == 1) -+ { -+ /* Allocate rails in a round-robin manner */ -+ for (i = 0; i < nvp; i++) -+ *pref++ = i % nrails; -+ } -+ else -+ { -+ int node; -+ int *vps; -+ int nnodes = cap->cap_highnode - cap->cap_lownode + 1; -+ -+ vps = (int *) malloc(sizeof(int)*maxlocal); -+ -+ /* Grab the local process info for each node and allocate -+ * rails to those vps on an equal basis -+ */ -+ for (node = 0; node < nnodes; node++) -+ { -+ int nlocal; -+ int pprail; -+ -+ /* Grab an array of local vps */ -+ nlocal = elan_localvps(node, cap, vps, maxlocal); -+ -+ /* Calculate the number processes per rail */ -+ if ((pprail = nlocal/nrails) == 0) -+ pprail = 1; -+ -+ /* Allocate processes to rails */ -+ for (i = 0; i < nlocal; i++) -+ { -+ pref[vps[i]] = (i / pprail) % nrails; -+ } -+ } -+ -+ free(vps); -+ } -+ -+ return( 0 ); -+} -+ -+void -+elan_get_random_key(ELAN_USERKEY *key) -+{ -+ int i; -+ for (i = 0; i < sizeof(key->key_values) / sizeof(key->key_values[0]); i++) -+ key->key_values[i] = lrand48(); -+} -+ -+int elan_lowcontext(ELAN_CAPABILITY *cap) -+{ -+ return(cap->cap_lowcontext); -+} -+ -+int elan_mycontext(ELAN_CAPABILITY *cap) -+{ -+ return(cap->cap_mycontext); -+} -+ -+int elan_highcontext(ELAN_CAPABILITY *cap) -+{ -+ return(cap->cap_highcontext); -+} -+ -+int elan_lownode(ELAN_CAPABILITY *cap) -+{ -+ return(cap->cap_lownode); -+} -+ -+int elan_highnode(ELAN_CAPABILITY *cap) -+{ -+ return(cap->cap_highnode); -+} -+ -+int elan_captype(ELAN_CAPABILITY *cap) -+{ -+ return(cap->cap_type); -+} -+ -+int elan_railmask(ELAN_CAPABILITY *cap) -+{ -+ return(cap->cap_railmask); -+} -+ -+#endif -+ -+/* -+ * Local variables: -+ * c-file-style: "linux" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/elan/device.c linux-2.6.9/drivers/net/qsnet/elan/device.c ---- clean/drivers/net/qsnet/elan/device.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan/device.c 2005-04-13 05:31:47.000000000 -0400 -@@ -0,0 +1,147 @@ -+/* -+ * Copyright (c) 2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: device.c,v 1.6 2005/04/13 09:31:47 addy Exp $" -+/* $Source: /cvs/master/quadrics/elanmod/modsrc/device.c,v $*/ -+ -+#include -+#include -+ -+static LIST_HEAD(elan_dev_list); -+ -+ELAN_DEV_STRUCT * -+elan_dev_find (ELAN_DEV_IDX devidx) -+{ -+ struct list_head *tmp; -+ ELAN_DEV_STRUCT *ptr=NULL; -+ -+ list_for_each(tmp, &elan_dev_list) { -+ ptr = list_entry(tmp, ELAN_DEV_STRUCT , node); -+ if (ptr->devidx == devidx) -+ return ptr; -+ if (ptr->devidx > devidx) -+ return ERR_PTR(-ENXIO); -+ } -+ -+ return ERR_PTR(-EINVAL); -+} -+ -+ELAN_DEV_STRUCT * -+elan_dev_find_byrail (unsigned short deviceid, unsigned rail) -+{ -+ struct list_head *tmp; -+ ELAN_DEV_STRUCT *ptr=NULL; -+ -+ list_for_each(tmp, &elan_dev_list) { -+ ptr = list_entry(tmp, ELAN_DEV_STRUCT , node); -+ -+ ELAN_DEBUG5 (ELAN_DBG_ALL,"elan_dev_find_byrail devidx %d - %04x %04x, %d %d \n", ptr->devidx, -+ ptr->devinfo->dev_device_id, deviceid, ptr->devinfo->dev_rail, rail); -+ -+ if (ptr->devinfo->dev_device_id == deviceid && ptr->devinfo->dev_rail == rail) -+ return ptr; -+ } -+ -+ return NULL; -+} -+ -+ELAN_DEV_IDX -+elan_dev_register (ELAN_DEVINFO *devinfo, ELAN_DEV_OPS *ops, void * user_data) -+{ -+ ELAN_DEV_STRUCT *ptr; -+ ELAN_DEV_IDX devidx = 0; -+ struct list_head *tmp; -+ -+ ELANMOD_RWLOCK_WRITE(&elan_rwlock); -+ -+ /* is it already registered */ -+ if ((ptr = elan_dev_find_byrail(devinfo->dev_device_id, devinfo->dev_rail)) != NULL) -+ { -+ ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock); -+ return EINVAL; -+ } -+ -+ /* find a free device idx */ -+ list_for_each (tmp, &elan_dev_list) { -+ if (list_entry (tmp, ELAN_DEV_STRUCT, node)->devidx != devidx) -+ break; -+ devidx++; -+ } -+ -+ /* create it and add */ -+ KMEM_ALLOC(ptr, ELAN_DEV_STRUCT *, sizeof(ELAN_DEV_STRUCT), 1); -+ if (ptr == NULL) -+ { -+ ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock); -+ return ENOMEM; -+ } -+ -+ ptr->devidx = devidx; -+ ptr->ops = ops; -+ ptr->devinfo = devinfo; -+ ptr->user_data = user_data; -+ -+ /* insert this entry *before* the last entry we've found */ -+ list_add_tail(&ptr->node, tmp); -+ -+ ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock); -+ return ESUCCESS; -+} -+ -+int -+elan_dev_deregister (ELAN_DEVINFO *devinfo) -+{ -+ ELAN_DEV_STRUCT *target; -+ -+ ELANMOD_RWLOCK_WRITE(&elan_rwlock); -+ -+ if ((target = elan_dev_find_byrail (devinfo->dev_device_id, devinfo->dev_rail)) == NULL) -+ { -+ ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock); -+ return EINVAL; -+ } -+ -+ list_del(&target->node); -+ -+ /* delete target entry */ -+ KMEM_FREE(target, sizeof(ELAN_DEV_STRUCT)); -+ -+ ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock); -+ return ESUCCESS; -+} -+ -+int -+elan_dev_dump () -+{ -+ struct list_head *tmp; -+ ELAN_DEV_STRUCT *ptr=NULL; -+ -+ ELANMOD_RWLOCK_READ(&elan_rwlock); -+ -+ list_for_each(tmp, &elan_dev_list) { -+ ptr = list_entry(tmp, ELAN_DEV_STRUCT , node); -+ -+ ELAN_DEBUG3 (ELAN_DBG_ALL,"dev dump: index %u rail %u elan%c\n", -+ ptr->devidx, ptr->devinfo->dev_rail, '3' + ptr->devinfo->dev_device_id); -+ ELAN_DEBUG5 (ELAN_DBG_ALL,"dev dump: Vid %x Did %x Rid %x DR %d DVal %x\n", -+ ptr->devinfo->dev_vendor_id, -+ ptr->devinfo->dev_device_id, -+ ptr->devinfo->dev_revision_id, -+ ptr->devinfo->dev_driver_version, -+ ptr->devinfo->dev_num_down_links_value); -+ -+ } -+ -+ ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock); -+ return ESUCCESS; -+} -+ -+/* -+ * Local variables: -+ * c-file-style: "linux" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/elan/devinfo.c linux-2.6.9/drivers/net/qsnet/elan/devinfo.c ---- clean/drivers/net/qsnet/elan/devinfo.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan/devinfo.c 2005-04-13 05:31:47.000000000 -0400 -@@ -0,0 +1,78 @@ -+/* -+ * Copyright (c) 2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: devinfo.c,v 1.6 2005/04/13 09:31:47 addy Exp $" -+/* $Source: /cvs/master/quadrics/elanmod/modsrc/devinfo.c,v $*/ -+ -+#include -+#include -+ -+int -+elan_get_devinfo(ELAN_DEV_IDX devidx, ELAN_DEVINFO *devinfo) -+{ -+ ELAN_DEV_STRUCT *target; -+ int res; -+ -+ ELANMOD_RWLOCK_READ(&elan_rwlock); -+ -+ target = elan_dev_find (devidx); -+ -+ if (IS_ERR (target)) -+ res = PTR_ERR(target); -+ else -+ { -+ copyout(target->devinfo, devinfo, sizeof(ELAN_DEVINFO)); -+ res = ESUCCESS; -+ } -+ -+ ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock); -+ return res; -+} -+ -+int -+elan_get_position(ELAN_DEV_IDX devidx, ELAN_POSITION *position) -+{ -+ ELAN_DEV_STRUCT *target; -+ int res; -+ -+ ELANMOD_RWLOCK_READ(&elan_rwlock); -+ -+ target = elan_dev_find(devidx); -+ -+ if (IS_ERR (target)) -+ res = PTR_ERR(target); -+ else -+ res = target->ops->get_position(target->user_data, position); -+ -+ ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock); -+ return res; -+} -+ -+int -+elan_set_position(ELAN_DEV_IDX devidx, unsigned short nodeId, unsigned short numNodes) -+{ -+ ELAN_DEV_STRUCT *target; -+ int res; -+ -+ ELANMOD_RWLOCK_WRITE(&elan_rwlock); -+ -+ target = elan_dev_find(devidx); -+ -+ if (IS_ERR (target)) -+ res = PTR_ERR (target); -+ else -+ res = target->ops->set_position(target->user_data, nodeId, numNodes); -+ -+ ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock); -+ return res; -+} -+ -+/* -+ * Local variables: -+ * c-file-style: "linux" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/elan/elanmod.c linux-2.6.9/drivers/net/qsnet/elan/elanmod.c ---- clean/drivers/net/qsnet/elan/elanmod.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan/elanmod.c 2005-04-13 05:31:47.000000000 -0400 -@@ -0,0 +1,149 @@ -+/* -+ * Copyright (c) 2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+#ident "@(#)$Id: elanmod.c,v 1.12 2005/04/13 09:31:47 addy Exp $" -+/* $Source: /cvs/master/quadrics/elanmod/modsrc/elanmod.c,v $*/ -+ -+#include -+#include -+ -+ELANMOD_RWLOCK elan_rwlock; -+ -+int -+elan_init() -+{ -+ ELANMOD_RWLOCK_INIT(&elan_rwlock); -+ return (ESUCCESS); -+} -+ -+int -+elan_fini() -+{ -+ ELANMOD_RWLOCK_DESTROY(&elan_rwlock); -+ return (ESUCCESS); -+} -+ -+int -+elanmod_classify_cap (ELAN_POSITION *position, ELAN_CAPABILITY *cap, unsigned use) -+{ -+ if (cap->cap_version != ELAN_CAP_VERSION_NUMBER) -+ { -+ ELAN_DEBUG2 (ELAN_DBG_VP, "elanmod_classify_cap: (cap->Version != ELAN_CAP_VERSION) %d %d\n", cap->cap_version, ELAN_CAP_VERSION_NUMBER); -+ return (-EINVAL); -+ } -+ -+ if (cap->cap_lowcontext == ELAN_CAP_UNINITIALISED || cap->cap_highcontext == ELAN_CAP_UNINITIALISED) -+ { -+ ELAN_DEBUG3 (ELAN_DBG_VP, "elanmod_classify_cap: LowContext %d HighContext %d MyContext %d\n", -+ cap->cap_lowcontext , cap->cap_highcontext, cap->cap_mycontext); -+ return (-EINVAL); -+ } -+ -+ if (cap->cap_lowcontext > cap->cap_highcontext) -+ { -+ ELAN_DEBUG2 (ELAN_DBG_VP, "elanmod_classify_cap: (cap->cap_lowcontext > cap->cap_highcontext) %d %d\n",cap->cap_lowcontext , cap->cap_highcontext); -+ return (-EINVAL); -+ } -+ -+ -+ switch (cap->cap_type & ELAN_CAP_TYPE_MASK) -+ { -+ case ELAN_CAP_TYPE_BLOCK: -+ case ELAN_CAP_TYPE_CYCLIC: -+ if (position->pos_mode == ELAN_POS_UNKNOWN) -+ { -+ ELAN_DEBUG0 (ELAN_DBG_VP, "elanmod_classify_cap: Position Unknown \n"); -+ return (-EAGAIN); -+ } -+ -+ if ( ! ( ELAN_USER_CONTEXT(cap->cap_lowcontext) && ELAN_USER_CONTEXT(cap->cap_highcontext))) -+ { -+ ELAN_DEBUG4 (ELAN_DBG_VP, "elanmod_classify_cap: USER_BASE_CONTEXT %d %d %d %d \n" , ELAN_USER_BASE_CONTEXT_NUM,cap->cap_lowcontext, cap->cap_highcontext ,ELAN_USER_TOP_CONTEXT_NUM); -+ return (-EINVAL); -+ } -+ if (cap->cap_lownode == ELAN_CAP_UNINITIALISED) -+ cap->cap_lownode = position->pos_nodeid; -+ if (cap->cap_highnode == ELAN_CAP_UNINITIALISED) -+ cap->cap_highnode = position->pos_nodeid; -+ -+ if (cap->cap_lownode < 0 || cap->cap_highnode >= position->pos_nodes || cap->cap_lownode > cap->cap_highnode) -+ { -+ ELAN_DEBUG3 ( ELAN_DBG_VP,"elanmod_classify_cap: low %d high %d pos %d \n" , cap->cap_lownode ,cap->cap_highnode, position->pos_nodes); -+ -+ return (-EINVAL); -+ } -+ -+ if ((cap->cap_highnode < position->pos_nodeid) || (cap->cap_lownode > position->pos_nodeid)) -+ { -+ ELAN_DEBUG3 (ELAN_DBG_VP, "elanmod_classify_cap: node not i range low %d high %d this %d\n", -+ cap->cap_lownode, cap->cap_highnode, position->pos_nodeid); -+ return (-EINVAL); -+ } -+ -+ break; -+ default: -+ ELAN_DEBUG1 (ELAN_DBG_VP, "elanmod_classify_cap: cant decode type %x \n", cap->cap_type & ELAN_CAP_TYPE_MASK); -+ return (-EINVAL); -+ -+ } -+ -+ switch (use) -+ { -+ case ELAN_USER_ATTACH: -+ case ELAN_USER_DETACH: -+ if (cap->cap_mycontext == ELAN_CAP_UNINITIALISED) -+ { -+ ELAN_DEBUG0 (ELAN_DBG_VP, "elanmod_classify_cap: cap->cap_mycontext == ELAN_CAP_UNINITIALISED"); -+ return (-EINVAL); -+ } -+ -+ if ((cap->cap_mycontext != ELAN_CAP_UNINITIALISED) && -+ (cap->cap_mycontext < cap->cap_lowcontext || cap->cap_mycontext > cap->cap_highcontext)) -+ { -+ ELAN_DEBUG3 (ELAN_DBG_VP, "elanmod_classify_cap: cap->cap_mycontext out of range %d %d %d \n", cap->cap_lowcontext,cap->cap_mycontext,cap->cap_highcontext); -+ return (-EINVAL); -+ } -+ break; -+ -+ case ELAN_USER_P2P: -+ break; -+ -+ case ELAN_USER_BROADCAST: -+ if (! (cap->cap_type & ELAN_CAP_TYPE_BROADCASTABLE)) { -+ ELAN_DEBUG0 (ELAN_DBG_VP, "elanmod_classify_cap: use ELAN_USER_BROADCAST but cap not ELAN_CAP_TYPE_BROADCASTABLE\n"); -+ return (-EINVAL); -+ } -+ break; -+ -+ default: -+ ELAN_DEBUG1 (ELAN_DBG_VP, "elanmod_classify_cap: unknown use (%d)\n",use); -+ return (-EINVAL); -+ } -+ -+ -+ -+ /* is any ctxt an rms one ?? */ -+ if (ELAN_RMS_CONTEXT(cap->cap_lowcontext) || ELAN_RMS_CONTEXT(cap->cap_highcontext)) -+ { -+ /* so both low and high must be */ -+ if (!(ELAN_RMS_CONTEXT(cap->cap_lowcontext) && ELAN_RMS_CONTEXT(cap->cap_highcontext))) -+ { -+ ELAN_DEBUG2 (ELAN_DBG_VP, "elanmod_classify_cap: not rms ctxt %x %x\n",cap->cap_lowcontext,cap->cap_highcontext ); -+ return (-EINVAL); -+ } -+ ELAN_DEBUG0 (ELAN_DBG_VP, "elanmod_classify_cap: returning ELAN_CAP_RMS\n"); -+ return (ELAN_CAP_RMS); -+ } -+ -+ ELAN_DEBUG0 (ELAN_DBG_VP, "elanmod_classify_cap: returning ELAN_CAP_OK\n"); -+ return (ELAN_CAP_OK); -+} -+ -+/* -+ * Local variables: -+ * c-file-style: "linux" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/elan/elanmod_linux.c linux-2.6.9/drivers/net/qsnet/elan/elanmod_linux.c ---- clean/drivers/net/qsnet/elan/elanmod_linux.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan/elanmod_linux.c 2005-09-07 10:35:03.000000000 -0400 -@@ -0,0 +1,544 @@ -+/* -+ * Copyright (c) 2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: elanmod_linux.c,v 1.23.2.6 2005/09/07 14:35:03 mike Exp $" -+/* $Source: /cvs/master/quadrics/elanmod/modsrc/elanmod_linux.c,v $*/ -+ -+#include -+ -+#include -+#include -+ -+#include -+#include -+#include -+ -+#include -+ -+MODULE_AUTHOR("Quadrics Ltd."); -+MODULE_DESCRIPTION("Elan support module"); -+ -+MODULE_LICENSE("GPL"); -+ -+/* elanmod.c */ -+EXPORT_SYMBOL(elanmod_classify_cap); -+ -+/* bitmap.c */ -+#include -+ -+EXPORT_SYMBOL(bt_freebit); -+EXPORT_SYMBOL(bt_lowbit); -+EXPORT_SYMBOL(bt_nextbit); -+EXPORT_SYMBOL(bt_copy); -+EXPORT_SYMBOL(bt_zero); -+EXPORT_SYMBOL(bt_fill); -+EXPORT_SYMBOL(bt_cmp); -+EXPORT_SYMBOL(bt_intersect); -+EXPORT_SYMBOL(bt_remove); -+EXPORT_SYMBOL(bt_add); -+EXPORT_SYMBOL(bt_spans); -+EXPORT_SYMBOL(bt_subset); -+EXPORT_SYMBOL(bt_up); -+EXPORT_SYMBOL(bt_down); -+EXPORT_SYMBOL(bt_nbits); -+ -+/* capability.c */ -+EXPORT_SYMBOL(elan_nullcap); -+EXPORT_SYMBOL(elan_detach_cap); -+EXPORT_SYMBOL(elan_attach_cap); -+EXPORT_SYMBOL(elan_validate_map); -+ -+/* stats.c */ -+EXPORT_SYMBOL(elan_stats_register); -+EXPORT_SYMBOL(elan_stats_deregister); -+ -+/* device.c */ -+EXPORT_SYMBOL(elan_dev_deregister); -+EXPORT_SYMBOL(elan_dev_register); -+ -+/* debug */ -+int elan_debug_mode = QSNET_DEBUG_BUFFER; -+int elan_debug_mask; -+ -+static struct proc_dir_entry *elan_procfs_root; -+ -+extern void elan_procfs_init(void); -+extern void elan_procfs_fini(void); -+ -+static int elan_open (struct inode *ino, struct file *fp); -+static int elan_release (struct inode *ino, struct file *fp); -+static int elan_ioctl (struct inode *ino, struct file *fp, unsigned int cmd, unsigned long arg); -+ -+static int elan_user_open (struct inode *ino, struct file *fp); -+static int elan_user_release (struct inode *ino, struct file *fp); -+static int elan_user_ioctl (struct inode *ino, struct file *fp, unsigned int cmd, unsigned long arg); -+ -+static struct file_operations elan_fops = -+{ -+ ioctl: elan_ioctl, -+ open: elan_open, -+ release: elan_release, -+}; -+ -+static struct file_operations elan_user_fops = -+{ -+ ioctl: elan_user_ioctl, -+ open: elan_user_open, -+ release: elan_user_release, -+}; -+ -+static int __init elan_start(void) -+{ -+ int res; -+ -+ elan_procfs_init(); -+ -+ if ((res = elan_init()) != ESUCCESS) -+ { -+ elan_procfs_fini(); -+ return (-res); -+ } -+ -+ return (0); -+} -+ -+static void __exit elan_exit(void) -+{ -+ elan_fini(); -+ elan_procfs_fini(); -+} -+ -+ -+/* Declare the module init and exit functions */ -+void -+elan_procfs_init() -+{ -+ struct proc_dir_entry *p; -+ -+ elan_procfs_root = proc_mkdir("elan", qsnet_procfs_root); -+ -+ if (elan_procfs_root == NULL) -+ return; -+ -+ qsnet_proc_register_hex(elan_procfs_root, "debug_mask", &elan_debug_mask, 0); -+ qsnet_proc_register_hex(elan_procfs_root, "debug_mode", &elan_debug_mode, 0); -+ -+ if ((p = create_proc_entry ("ioctl", 0, elan_procfs_root)) != NULL) -+ { -+ p->proc_fops = &elan_fops; -+ p->data = 0; -+ p->owner = THIS_MODULE; -+ } -+ -+ /* user entry point */ -+ if ((p = create_proc_entry ("user", 0, elan_procfs_root)) != NULL) -+ { -+ p->proc_fops = &elan_user_fops; -+ p->data = 0; -+ p->owner = THIS_MODULE; -+ } -+} -+ -+void -+elan_procfs_fini() -+{ -+ if (elan_procfs_root == NULL) -+ return; -+ -+ remove_proc_entry ("debug_mask", elan_procfs_root); -+ remove_proc_entry ("debug_mode", elan_procfs_root); -+ -+ remove_proc_entry ("ioctl", elan_procfs_root); -+ -+ /* remove user entry point */ -+ remove_proc_entry ("user", elan_procfs_root); -+ -+ remove_proc_entry ("elan", qsnet_procfs_root); -+} -+ -+module_init(elan_start); -+module_exit(elan_exit); -+ -+static int -+elan_open (struct inode *inode, struct file *fp) -+{ -+ MOD_INC_USE_COUNT; -+ fp->private_data = NULL; -+ return (0); -+} -+ -+static int -+elan_release (struct inode *inode, struct file *fp) -+{ -+ /* mark all caps owned by fp to be destroyed */ -+ elan_destroy_cap(fp,NULL); -+ -+ MOD_DEC_USE_COUNT; -+ return (0); -+} -+ -+static int -+elan_ioctl(struct inode *inode, struct file *fp, unsigned int cmd, unsigned long arg) -+{ -+ int rep = 0; -+ -+ switch (cmd) -+ { -+ case ELANCTRL_STATS_GET_NEXT : -+ { -+ ELANCTRL_STATS_GET_NEXT_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELANCTRL_STATS_GET_NEXT_STRUCT))) -+ return (-EFAULT); -+ -+ /* uses copyin/copyout */ -+ if (elan_stats_get_next_index(args.statidx, args.next_statidx) != 0 ) -+ return (-EINVAL); -+ -+ break; -+ } -+ case ELANCTRL_STATS_FIND_INDEX : -+ { -+ ELANCTRL_STATS_FIND_INDEX_STRUCT args; -+ char block_name[ELAN_STATS_NAME_MAX_LEN+1]; -+ int res; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELANCTRL_STATS_FIND_INDEX_STRUCT))) -+ return (-EFAULT); -+ -+ res = strncpy_from_user (block_name, args.block_name, sizeof (block_name)); -+ -+ if (res == 0 || res == sizeof (block_name)) -+ return -ERANGE; -+ if (res < 0) -+ return res; -+ -+ /* uses copyin/copyout */ -+ if (elan_stats_find_index(block_name, args.statidx, args.num_entries) != 0 ) -+ return (-EINVAL); -+ -+ break; -+ } -+ case ELANCTRL_STATS_GET_BLOCK_INFO : -+ { -+ ELANCTRL_STATS_GET_BLOCK_INFO_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELANCTRL_STATS_GET_BLOCK_INFO_STRUCT))) -+ return (-EFAULT); -+ -+ /* uses copyin/copyout */ -+ if (elan_stats_get_block_info(args.statidx, args.block_name, args.num_entries) != 0 ) -+ return (-EINVAL); -+ break; -+ } -+ case ELANCTRL_STATS_GET_INDEX_NAME : -+ { -+ ELANCTRL_STATS_GET_INDEX_NAME_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELANCTRL_STATS_GET_INDEX_NAME_STRUCT))) -+ return (-EFAULT); -+ -+ /* uses copyin/copyout */ -+ if (elan_stats_get_index_name(args.statidx, args.index, args.name) != 0 ) -+ return (-EINVAL); -+ break; -+ } -+ case ELANCTRL_STATS_CLEAR_BLOCK : -+ { -+ ELANCTRL_STATS_CLEAR_BLOCK_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELANCTRL_STATS_CLEAR_BLOCK_STRUCT))) -+ return (-EFAULT); -+ -+ /* statidx is not a pointer */ -+ if (elan_stats_clear_block(args.statidx) != 0 ) -+ return (-EINVAL); -+ break; -+ } -+ case ELANCTRL_STATS_GET_BLOCK : -+ { -+ ELANCTRL_STATS_GET_BLOCK_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELANCTRL_STATS_GET_BLOCK_STRUCT))) -+ return (-EFAULT); -+ -+ /* uses copyin/copyout */ -+ if (elan_stats_get_block(args.statidx, args.entries, args.values) != 0 ) -+ return (-EINVAL); -+ break; -+ } -+ case ELANCTRL_GET_DEVINFO : -+ { -+ ELANCTRL_GET_DEVINFO_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELANCTRL_GET_DEVINFO_STRUCT))) -+ return (-EFAULT); -+ -+ /* uses copyin/copyout */ -+ if (elan_get_devinfo(args.devidx, args.devinfo) != 0 ) -+ return (-EINVAL); -+ break; -+ } -+ case ELANCTRL_GET_POSITION : -+ { -+ ELANCTRL_GET_POSITION_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELANCTRL_GET_POSITION_STRUCT))) -+ return (-EFAULT); -+ -+ /* uses copyin/copyout */ -+ if (elan_get_position(args.devidx, args.position) != 0 ) -+ return (-EINVAL); -+ break; -+ } -+ case ELANCTRL_SET_POSITION : -+ { -+ ELANCTRL_SET_POSITION_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELANCTRL_SET_POSITION_STRUCT))) -+ return (-EFAULT); -+ -+ /* uses copyin/copyout */ -+ if (elan_set_position(args.devidx, args.nodeId, args.numNodes) != 0 ) -+ return (-EINVAL); -+ break; -+ } -+ case ELANCTRL_CREATE_CAP : -+ { -+ ELANCTRL_CREATE_CAP_STRUCT *args; -+ -+ /* get space for args */ -+ KMEM_ALLOC(args, ELANCTRL_CREATE_CAP_STRUCT *, sizeof(ELANCTRL_CREATE_CAP_STRUCT), 1); -+ if (args == NULL) -+ return(-ENOMEM); -+ -+ /* copy them */ -+ if (copy_from_user (args, (void *) arg, sizeof (ELANCTRL_CREATE_CAP_STRUCT))) -+ return (-EFAULT); -+ else -+ { -+ if (((rep = elan_validate_cap(&args->cap)) != 0) || ((rep = elan_create_cap(fp,&args->cap)) != 0)) -+ rep = (-rep); -+ } -+ -+ /* free the space */ -+ KMEM_FREE(args, sizeof(ELANCTRL_CREATE_CAP_STRUCT)); -+ -+ break; -+ } -+ case ELANCTRL_DESTROY_CAP : -+ { -+ ELANCTRL_DESTROY_CAP_STRUCT *args; -+ -+ /* get space for args */ -+ KMEM_ALLOC(args, ELANCTRL_DESTROY_CAP_STRUCT *, sizeof(ELANCTRL_DESTROY_CAP_STRUCT), 1); -+ if (args == NULL) -+ return(-ENOMEM); -+ -+ /* copy them */ -+ if (copy_from_user (args, (void *) arg, sizeof (ELANCTRL_DESTROY_CAP_STRUCT))) -+ rep = (-EFAULT); -+ else -+ { -+ if (elan_destroy_cap(fp, &args->cap) != 0 ) -+ rep = (-EINVAL); -+ } -+ -+ /* free the space */ -+ KMEM_FREE(args, sizeof(ELANCTRL_DESTROY_CAP_STRUCT)); -+ -+ break; -+ } -+ case ELANCTRL_CREATE_VP : -+ { -+ ELANCTRL_CREATE_VP_STRUCT *args; -+ -+ /* get space for args */ -+ KMEM_ALLOC(args, ELANCTRL_CREATE_VP_STRUCT *, sizeof(ELANCTRL_CREATE_VP_STRUCT), 1); -+ if (args == NULL) -+ return(-ENOMEM); -+ -+ /* copy them */ -+ if (copy_from_user (args, (void *) arg, sizeof (ELANCTRL_CREATE_VP_STRUCT))) -+ return (-EFAULT); -+ else -+ { -+ if ((elan_validate_cap( &args->map) != 0) || (elan_create_vp(fp, &args->cap, &args->map) != 0 )) -+ rep = (-EINVAL); -+ } -+ -+ KMEM_FREE(args, sizeof(ELANCTRL_CREATE_VP_STRUCT )); -+ -+ break; -+ } -+ case ELANCTRL_DESTROY_VP : -+ { -+ ELANCTRL_DESTROY_VP_STRUCT *args; -+ -+ /* get space for args */ -+ KMEM_ALLOC(args, ELANCTRL_DESTROY_VP_STRUCT *, sizeof(ELANCTRL_DESTROY_VP_STRUCT), 1); -+ if (args == NULL) -+ return(-ENOMEM); -+ -+ /* copy them */ -+ if (copy_from_user (args, (void *) arg, sizeof (ELANCTRL_DESTROY_VP_STRUCT))) -+ rep = (-EFAULT); -+ else -+ { -+ if (elan_destroy_vp(fp, &args->cap, &args->map) != 0 ) -+ rep = (-EINVAL); -+ } -+ -+ KMEM_FREE(args, sizeof(ELANCTRL_DESTROY_VP_STRUCT )); -+ -+ break; -+ } -+ -+ case ELANCTRL_GET_CAPS : -+ { -+ ELANCTRL_GET_CAPS_STRUCT args; -+ if (copy_from_user (&args, (void *) arg, sizeof (ELANCTRL_GET_CAPS_STRUCT))) -+ return (-EFAULT); -+ -+ /* uses copyin/copyout */ -+ if (elan_get_caps(args.number_of_results, args.array_size, args.caps) != 0 ) -+ return (-EINVAL); -+ break; -+ } -+ case ELANCTRL_DEBUG_DUMP : -+ { -+ elan_cap_dump(); -+ elan_dev_dump(); -+ -+ break; -+ } -+ case ELANCTRL_DEBUG_BUFFER : -+ { -+ ELANCTRL_DEBUG_BUFFER_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELANCTRL_DEBUG_BUFFER_STRUCT))) -+ return (-EFAULT); -+ -+ /* uses copyin/copyout */ -+ if ((args.size = qsnet_debug_buffer (args.buffer, args.size)) != -1 && -+ copy_to_user ((void *) arg, &args, sizeof (ELANCTRL_DEBUG_BUFFER_STRUCT))) -+ return (-EFAULT); -+ break; -+ } -+ default: -+ return (-EINVAL); -+ break; -+ } -+ -+ return (rep); -+} -+ -+ -+static int -+elan_user_open (struct inode *inode, struct file *fp) -+{ -+ MOD_INC_USE_COUNT; -+ fp->private_data = NULL; -+ return (0); -+} -+ -+static int -+elan_user_release (struct inode *inode, struct file *fp) -+{ -+ struct elan_cap_node_struct *cap_ptr = (struct elan_cap_node_struct *)fp->private_data; -+ -+ if (cap_ptr) { -+ /* Remove this process from usercopy system */ -+ /* GNAT 7498: New to pass in a common owner pointer */ -+ if (elan_usercopy_detach (cap_ptr, fp) == 0) -+ fp->private_data = NULL; -+ } -+ -+ MOD_DEC_USE_COUNT; -+ return (0); -+} -+ -+static int -+elan_user_ioctl (struct inode *inode, struct file *fp, unsigned int cmd, unsigned long arg) -+{ -+ int rep = 0; -+#if !defined(NO_MMPUT) && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 9) -+ struct elan_cap_node_struct *cap_ptr = (struct elan_cap_node_struct *)fp->private_data; -+#endif -+ -+ switch (cmd) -+ { -+#if !defined(NO_MMPUT) && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 9) -+ case ELANCTRL_USERCOPY_ATTACH: -+ { -+ ELANCTRL_USERCOPY_ATTACH_STRUCT args; -+ -+ /* Are we already attached ? */ -+ if (cap_ptr != NULL) -+ return -EAGAIN; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELANCTRL_USERCOPY_ATTACH_STRUCT))) -+ return -EFAULT; -+ -+ /* Lookup the associated cap node and can check we are allowed to -+ * access it using the supplied capability. If allowed, then associate -+ * our task with that cap node -+ * We also reference count it as we then hang it off the file pointer -+ */ -+ /* GNAT 7498: New to pass in a common owner pointer */ -+ if ((rep = elan_usercopy_attach(&args.cap, &cap_ptr, current, fp)) < 0) -+ return -EAGAIN; -+ -+ /* Hang cap node off file pointer for future usercopy ioctls */ -+ fp->private_data = (void *) cap_ptr; -+ -+ break; -+ } -+ case ELANCTRL_USERCOPY_DETACH: -+ { -+ /* Detach process */ -+ if (cap_ptr) { -+ /* Remove this process from usercopy system */ -+ /* GNAT 7498: New to pass in a common owner pointer */ -+ if ((rep = elan_usercopy_detach (cap_ptr, fp)) == 0) -+ fp->private_data = NULL; -+ } -+ else -+ rep = -EINVAL; -+ -+ break; -+ } -+ case ELANCTRL_USERCOPY: -+ { -+ ELANCTRL_USERCOPY_STRUCT args; -+ -+ /* Check that we have previously successfully attached */ -+ if (cap_ptr == NULL) -+ return -EAGAIN; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELANCTRL_USERCOPY_STRUCT))) -+ return (-EFAULT); -+ -+ /* Perform user-to-user copy */ -+ rep = elan_usercopy(args.remote, args.local, args.len, args.write, args.ctxId, cap_ptr); -+ -+ break; -+ } -+#endif /* !defined(NO_MMPUT) && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 9) */ -+ default: -+ return (-EINVAL); -+ break; -+ } -+ -+ return (rep); -+} -+ -+/* -+ * Local variables: -+ * c-file-style: "linux" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/elan/Makefile linux-2.6.9/drivers/net/qsnet/elan/Makefile ---- clean/drivers/net/qsnet/elan/Makefile 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan/Makefile 2005-10-10 17:47:30.000000000 -0400 -@@ -0,0 +1,15 @@ -+# -+# Makefile for Quadrics QsNet -+# -+# Copyright (c) 2002-2004 Quadrics Ltd -+# -+# File: drivers/net/qsnet/elan/Makefile -+# -+ -+ -+# -+ -+obj-$(CONFIG_QSNET) += elan.o -+elan-objs := elanmod.o device.o stats.o devinfo.o capability.o usercopy.o elanmod_linux.o capability_general.o bitmap.o -+ -+EXTRA_CFLAGS += -DDEBUG -DDEBUG_PRINTF -DDEBUG_ASSERT -diff -urN clean/drivers/net/qsnet/elan/Makefile.conf linux-2.6.9/drivers/net/qsnet/elan/Makefile.conf ---- clean/drivers/net/qsnet/elan/Makefile.conf 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan/Makefile.conf 2005-09-07 10:39:36.000000000 -0400 -@@ -0,0 +1,10 @@ -+# Flags for generating QsNet Linux Kernel Makefiles -+MODNAME = elan.o -+MODULENAME = elan -+KOBJFILES = elanmod.o device.o stats.o devinfo.o capability.o usercopy.o elanmod_linux.o capability_general.o bitmap.o -+EXPORT_KOBJS = elanmod_linux.o -+CONFIG_NAME = CONFIG_QSNET -+SGALFC = -+# EXTRALINES START -+ -+# EXTRALINES END -diff -urN clean/drivers/net/qsnet/elan/quadrics_version.h linux-2.6.9/drivers/net/qsnet/elan/quadrics_version.h ---- clean/drivers/net/qsnet/elan/quadrics_version.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan/quadrics_version.h 2005-09-07 10:39:49.000000000 -0400 -@@ -0,0 +1 @@ -+#define QUADRICS_VERSION "5.11.3qsnet" -diff -urN clean/drivers/net/qsnet/elan/stats.c linux-2.6.9/drivers/net/qsnet/elan/stats.c ---- clean/drivers/net/qsnet/elan/stats.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan/stats.c 2005-04-13 05:31:47.000000000 -0400 -@@ -0,0 +1,277 @@ -+/* -+ * Copyright (c) 2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: stats.c,v 1.7 2005/04/13 09:31:47 addy Exp $" -+/* $Source: /cvs/master/quadrics/elanmod/modsrc/stats.c,v $*/ -+ -+#include -+#include -+ -+static LIST_HEAD(elan_stats_list); -+static ELAN_STATS_IDX elan_next_statidx=0; -+ -+ELAN_STATS_STRUCT * -+elan_stats_find(ELAN_STATS_IDX statidx) -+{ -+ struct list_head *tmp; -+ ELAN_STATS_STRUCT *ptr=NULL; -+ -+ list_for_each(tmp, &elan_stats_list) { -+ ptr = list_entry(tmp, ELAN_STATS_STRUCT , node); -+ if ( ptr->statidx == statidx ) -+ return ptr; -+ } -+ -+ ELAN_DEBUG1 (ELAN_DBG_CTRL, "elan_stats_find failed %d\n", statidx); -+ return NULL; -+} -+ -+ELAN_STATS_STRUCT * -+elan_stats_find_by_name(caddr_t block_name) -+{ -+ struct list_head *tmp; -+ ELAN_STATS_STRUCT *ptr=NULL; -+ -+ list_for_each(tmp, &elan_stats_list) { -+ ptr = list_entry(tmp, ELAN_STATS_STRUCT , node); -+ if (!strcmp(ptr->block_name, block_name)) -+ { -+ ELAN_DEBUG3 (ELAN_DBG_CTRL, "elan_stats_find_by_name found %s (%d,%d)\n", block_name, ptr->statidx, ptr->num_entries); -+ return ptr; -+ } -+ } -+ -+ ELAN_DEBUG1 (ELAN_DBG_CTRL, "elan_stats_find_by_name failed %s\n", block_name); -+ return NULL; -+} -+ -+ELAN_STATS_STRUCT * -+elan_stats_find_next(ELAN_STATS_IDX statidx) -+{ -+ struct list_head *tmp; -+ ELAN_STATS_STRUCT *ptr=NULL; -+ -+ list_for_each(tmp, &elan_stats_list) { -+ ptr = list_entry(tmp, ELAN_STATS_STRUCT , node); -+ -+ if ( ptr->statidx > statidx ) -+ return ptr; -+ } -+ -+ return NULL; -+} -+ -+int -+elan_stats_get_next_index (ELAN_STATS_IDX statidx, ELAN_STATS_IDX *next_block) -+{ -+ ELAN_STATS_STRUCT *target; -+ ELAN_STATS_IDX next = 0; -+ -+ ELANMOD_RWLOCK_READ(&elan_rwlock); -+ -+ if ((target = elan_stats_find_next(statidx)) != NULL) -+ next = target->statidx; -+ -+ copyout(&next, next_block, sizeof(ELAN_STATS_IDX) ); -+ -+ ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock); -+ return 0; -+} -+ -+int -+elan_stats_find_index (caddr_t block_name, ELAN_STATS_IDX *statidx, uint *num_entries) -+ -+{ -+ ELAN_STATS_STRUCT *target; -+ ELAN_STATS_IDX index = 0; -+ uint entries = 0; -+ -+ ELANMOD_RWLOCK_READ(&elan_rwlock); -+ -+ ELAN_DEBUG1(ELAN_DBG_CTRL, "elan_stats_find_index %s \n", block_name); -+ -+ if ((target = elan_stats_find_by_name(block_name)) != NULL) -+ { -+ index = target->statidx; -+ entries = target->num_entries; -+ } -+ -+ ELAN_DEBUG3(ELAN_DBG_CTRL, "elan_stats_find_index found %d %d (target=%p)\n", index, entries, target); -+ -+ copyout(&index, statidx, sizeof(ELAN_STATS_IDX)); -+ copyout(&entries, num_entries, sizeof(uint)); -+ -+ ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock); -+ return ESUCCESS; -+} -+ -+int -+elan_stats_get_block_info (ELAN_STATS_IDX statidx, caddr_t block_name, uint *num_entries) -+{ -+ ELAN_STATS_STRUCT *target; -+ int res=EINVAL; -+ -+ ELANMOD_RWLOCK_READ(&elan_rwlock); -+ -+ ELAN_DEBUG1(ELAN_DBG_CTRL, "elan_stats_get_block_info statidx %d\n",statidx); -+ -+ if ((target = elan_stats_find(statidx)) != NULL) -+ { -+ ELAN_DEBUG2(ELAN_DBG_CTRL, "elan_stats_get_block_info name %s entries %d\n",block_name, *num_entries); -+ -+ copyout( target->block_name, block_name, ELAN_STATS_NAME_MAX_LEN); -+ copyout(&target->num_entries, num_entries, sizeof(uint)); -+ -+ res = ESUCCESS; -+ } -+ -+ ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock); -+ return res; -+} -+ -+int -+elan_stats_get_index_name (ELAN_STATS_IDX statidx, uint index, caddr_t name) -+{ -+ ELAN_STATS_STRUCT *target; -+ int res=EINVAL; -+ -+ ELANMOD_RWLOCK_READ(&elan_rwlock); -+ -+ ELAN_DEBUG2(ELAN_DBG_CTRL, "elan_stats_get_index_name statidx %d index %d\n",statidx, index); -+ -+ if ((target = elan_stats_find(statidx)) != NULL) -+ { -+ if ( target->ops->elan_stats_get_name== NULL) -+ { -+ ELAN_DEBUG0(ELAN_DBG_CTRL, "elan_stats_get_index_name no callback\n"); -+ ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock); -+ return res; -+ } -+ -+ if ((res = target->ops->elan_stats_get_name(target->arg, index, name)) == 0) -+ ELAN_DEBUG1(ELAN_DBG_CTRL, "elan_stats_get_index_name name %s\n",name); -+ -+ } -+ ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock); -+ return res; -+} -+ -+int -+elan_stats_get_block (ELAN_STATS_IDX statidx, uint entries, ulong *values) -+{ -+ ELAN_STATS_STRUCT *target; -+ int res=EINVAL; -+ -+ ELANMOD_RWLOCK_READ(&elan_rwlock); -+ -+ -+ if ((target = elan_stats_find(statidx)) != NULL) -+ { -+ if ( target->ops->elan_stats_get_block == NULL) -+ { -+ ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock); -+ return res; -+ } -+ -+ res = target->ops->elan_stats_get_block(target->arg, entries, values); -+ } -+ -+ ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock); -+ return res; -+} -+ -+int -+elan_stats_clear_block (ELAN_STATS_IDX statidx) -+{ -+ ELAN_STATS_STRUCT *target; -+ int res=EINVAL; -+ -+ ELANMOD_RWLOCK_WRITE(&elan_rwlock); -+ -+ if ((target = elan_stats_find(statidx)) != NULL) -+ { -+ if ( target->ops->elan_stats_clear_block == NULL) -+ { -+ ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock); -+ return res; -+ } -+ -+ res = target->ops->elan_stats_clear_block(target->arg); -+ } -+ ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock); -+ return res; -+} -+ -+void -+elan_stats_next_statidx(void) -+{ -+ /* XXXXX need to put not in use check here incase we loop MRH */ -+ /* tho its a bigish loop :) */ -+ elan_next_statidx++; -+ if (!elan_next_statidx) -+ elan_next_statidx++; -+} -+ -+int -+elan_stats_register (ELAN_STATS_IDX *statidx, -+ char *block_name, -+ uint num_entries, -+ ELAN_STATS_OPS *ops, -+ void *arg) -+{ -+ ELAN_STATS_STRUCT *target; -+ -+ ELANMOD_RWLOCK_WRITE(&elan_rwlock); -+ -+ /* create it and add */ -+ KMEM_ALLOC(target, ELAN_STATS_STRUCT *, sizeof(ELAN_STATS_STRUCT), 1); -+ if (target == NULL) -+ { -+ ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock); -+ return ENOMEM; -+ } -+ -+ elan_stats_next_statidx(); -+ -+ *statidx = elan_next_statidx; -+ -+ target->statidx = elan_next_statidx; -+ target->num_entries = num_entries; -+ target->ops = ops; -+ target->arg = arg; -+ strcpy(target->block_name, block_name); -+ -+ list_add_tail(&target->node, &elan_stats_list); -+ -+ ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock); -+ return 0; -+} -+ -+int -+elan_stats_deregister (ELAN_STATS_IDX statidx) -+{ -+ ELAN_STATS_STRUCT *target; -+ -+ ELANMOD_RWLOCK_WRITE(&elan_rwlock); -+ if ((target = elan_stats_find(statidx)) != NULL) -+ { -+ -+ list_del(&target->node); -+ -+ /* delete target entry */ -+ KMEM_FREE(target, sizeof(ELAN_STATS_STRUCT)); -+ } -+ ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock); -+ -+ return target == NULL ? EINVAL : 0; -+} -+ -+/* -+ * Local variables: -+ * c-file-style: "linux" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/elan/usercopy.c linux-2.6.9/drivers/net/qsnet/elan/usercopy.c ---- clean/drivers/net/qsnet/elan/usercopy.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan/usercopy.c 2005-09-06 05:06:58.000000000 -0400 -@@ -0,0 +1,198 @@ -+/* -+ * Copyright (c) 2005 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: usercopy.c,v 1.10.2.6 2005/09/06 09:06:58 addy Exp $" -+/*$Source: /cvs/master/quadrics/elanmod/modsrc/usercopy.c,v $*/ -+ -+#include -+#include -+ -+#include -+#include -+ -+#include -+#include -+#include -+#include -+ -+/* -+ * Access another process' address space copying directly to/from user space (current) -+ * -+ * Remote is the non-local process memory address, which we access using get_user_pages() and kmap() -+ * For the local memory (i.e. owned by current task) we use the standard copy_[to|from]_user interfaces -+ * -+ * Code based on linux/kernel/ptrace.c -+ */ -+ -+#if defined(NO_MMPUT) || LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 9) -+static size_t -+rw_process_vm (struct task_struct *tsk, unsigned long remote, void *local, size_t len, int write) -+{ -+#warning "NO EXPORTED MMPUT - usercopy not possible" -+ -+ /* Without an exported mmput() function we cannot make this -+ * safe as the remote process may be torn down during the copy -+ * I experimented with taking a write lock on the remote mmap_sem -+ * but this seemed to lead to deadlocks when pagefaulting -+ */ -+ /* GNAT 7768: We have also found that some older versions of the get_task_mm() code -+ * in linux/sched.h call mmgrab() which is not exported in any 2.6.X kernel -+ */ -+ return 0; -+} -+ -+#else -+static size_t -+rw_process_vm (struct task_struct *tsk, unsigned long remote, void *local, size_t len, int write) -+{ -+ struct mm_struct *mm; -+ struct vm_area_struct *vma; -+ struct page *page; -+ void *old_buf = local; -+ -+ if (write) -+ ELAN_DEBUG5(ELAN_DBG_USERCOPY, "%p remote write from %p to %lx len %ld tsk %p\n", -+ current, local, remote, (long)len, tsk); -+ else -+ ELAN_DEBUG5(ELAN_DBG_USERCOPY, "%p remote read from %lx to %p len %ld tsk %p\n", -+ current, remote, local, (long)len, tsk); -+ -+ /* This locks the task, grabs a reference to the mm and then unlocks the task */ -+ mm = get_task_mm(tsk); -+ -+ if (!mm) -+ { -+ /* GNAT 7777: Must drop lock before returning */ -+ ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock); -+ return 0; -+ } -+ -+ /* Do not try and copy from ourselves! */ -+ if (mm == current->mm) -+ { -+ /* GNAT 7777: Must now drop the elanmod lock as otherwise we can create a deadlock -+ * during the mmput() due it it calling exit_mmap() for the remote process -+ */ -+ ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock); -+ mmput(mm); -+ return 0; -+ } -+ -+ down_read(&mm->mmap_sem); -+ -+ /* ignore errors, just check how much was sucessfully transfered */ -+ while (len) { -+ size_t bytes, ret, offset; -+ void *maddr; -+ -+ ret = get_user_pages(tsk, mm, remote, 1, write, 1, &page, &vma); -+ if (ret <= 0) -+ break; -+ -+ bytes = len; -+ offset = remote & (PAGE_SIZE-1); -+ if (bytes > PAGE_SIZE-offset) -+ bytes = PAGE_SIZE-offset; -+ -+ maddr = kmap(page); -+ if (write) { -+ if (copy_from_user(/* remote to */maddr + offset, /* user from */local, bytes)) { -+ kunmap(page); -+ page_cache_release(page); -+ break; -+ } -+ set_page_dirty_lock(page); -+ } else { -+ if (copy_to_user(/* user to */local, /* remote from */maddr + offset, bytes)) { -+ kunmap(page); -+ page_cache_release(page); -+ break; -+ } -+ } -+ kunmap(page); -+ page_cache_release(page); -+ len -= bytes; -+ local += bytes; -+ remote += bytes; -+ } -+ -+ up_read(&mm->mmap_sem); -+ -+ /* GNAT 7777: Must now drop the elanmod lock as otherwise can we create a deadlock -+ * during the mmput() due it it calling exit_mmap() in the remote process -+ */ -+ ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock); -+ mmput(mm); -+ -+ /* Return num bytes copied */ -+ return local - old_buf; -+} -+#endif /* !defined(NO_MMPUT) || LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 9) */ -+ -+int -+elan_usercopy (void *remote, void *local, size_t len, int write, int ctxId, struct elan_cap_node_struct *cap_ptr) -+{ -+ int ret = 0; -+ size_t bytes; -+ -+ struct task_struct *tsk; -+ -+ /* Grab a read lock on elanmod lock -+ * -+ * This prevents any process from exiting whilst the copy is in progress -+ * as it will need to take a write lock on the elanmod lock in order to do so -+ * As exit_fs() is called before the task is destroyed this should prevent -+ * the remote tsk from being torn down during the copy -+ * -+ * It would be much easier if we could just use get_task_struct()/put_task_struct() -+ * but __put_task_struct() is not exported by the 2.6.X kernels - sigh. -+ */ -+ ELANMOD_RWLOCK_READ(&elan_rwlock); -+ -+ /* Get the task handle from the cap node for the supplied ctxId */ -+ if ((ret = elan_usercopy_handle(cap_ptr, ctxId, (void **)&tsk)) < 0) -+ { -+ ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock); -+ return ret; -+ } -+ -+ ELAN_DEBUG6(ELAN_DBG_USERCOPY, -+ "elan_usercopy: remote %p local %p len %ld write %d ctxId %d tsk %p\n", -+ remote, local, (long) len, write, ctxId, tsk); -+ -+ ASSERT(tsk); -+ -+ /* The BKL - why ??? (arch/[i386|ia64]/kernel/ptrace.c seems to hold it) */ -+// lock_kernel(); -+ -+ bytes = rw_process_vm(tsk, (unsigned long)remote, local, len, write); -+ -+ if (bytes != len) -+ { -+ ELAN_DEBUG2(ELAN_DBG_USERCOPY, "elan_usercopy: Failed to read %ld bytes (%ld copied)\n", -+ (long)len, (long)bytes); -+ ret = -EPERM; -+ } -+ -+ /* The BKL - why ??? (arch/[i386|ia64]/kernel/ptrace.c seems to hold it) */ -+// unlock_kernel(); -+ -+ /* GNAT 7777: rw_process_vm() now drops the elanmod lock -+ * -+ * ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock); -+ */ -+ -+ return ret; -+} -+ -+ -+ -+/* -+ * Local variables: -+ * c-file-style: "linux" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/elan3/context.c linux-2.6.9/drivers/net/qsnet/elan3/context.c ---- clean/drivers/net/qsnet/elan3/context.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan3/context.c 2005-07-20 07:35:36.000000000 -0400 -@@ -0,0 +1,2101 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: context.c,v 1.117.2.1 2005/07/20 11:35:36 mike Exp $" -+/* $Source: /cvs/master/quadrics/elan3mod/elan3/os/context.c,v $ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+/* -+ * Global variables configurable from /etc/system file -+ * (OR /etc/sysconfigtab on Digital UNIX) -+ */ -+int ntrapped_threads = 64; -+int ntrapped_dmas = 64; -+int ntrapped_events = E3_NonSysCntxQueueSize + 128; -+int ntrapped_commands = 64; -+int noverflow_commands = 1024; -+int nswapped_threads = 64; -+int nswapped_dmas = 64; -+ -+#define NUM_HALTOPS 8 -+ -+void *SwapListsLockInfo; -+void *CmdLockInfo; -+ -+static void HaltSwapContext (ELAN3_DEV *dev, void *arg); -+ -+static char *OthersStateStrings[] = {"others_running", "others_halting", "others_swapping", -+ "others_halting_more", "others_swapping_more", "others_swapped"}; -+ -+ELAN3_CTXT * -+elan3_alloc (ELAN3_DEV *dev, int kernel) -+{ -+ ELAN3_CTXT *ctxt; -+ int i; -+ unsigned long flags; -+ -+ PRINTF1 (DBG_DEVICE, DBG_FN, "elan3_alloc: %s\n", kernel ? "kernel" : "user"); -+ -+ KMEM_ZALLOC (ctxt, ELAN3_CTXT *, sizeof (ELAN3_CTXT), TRUE); -+ -+ if (ctxt == NULL) -+ return (NULL); -+ -+ elan_nullcap (&ctxt->Capability); -+ -+ ctxt->Device = dev; -+ ctxt->OthersState = CTXT_OTHERS_SWAPPED; -+ ctxt->RefCnt = 1; -+ ctxt->Position = dev->Position; -+ -+ if (kernel) -+ ctxt->Status = CTXT_DETACHED | CTXT_SWAPPED_OUT | CTXT_KERNEL; -+ else -+ ctxt->Status = CTXT_DETACHED | CTXT_SWAPPED_OUT | CTXT_NO_LWPS; -+ -+ ctxt->Elan3mmu = elan3mmu_alloc (ctxt); -+ -+ kcondvar_init (&ctxt->Wait); -+ kcondvar_init (&ctxt->CommandPortWait); -+ kcondvar_init (&ctxt->LwpWait); -+ kcondvar_init (&ctxt->HaltWait); -+ -+ spin_lock_init (&ctxt->InputFaultLock); -+ -+ kmutex_init (&ctxt->SwapListsLock); -+ kmutex_init (&ctxt->CmdPortLock); -+ kmutex_init (&ctxt->NetworkErrorLock); -+ kmutex_init (&ctxt->CmdLock); -+ -+ krwlock_init (&ctxt->VpLock); -+ -+ KMEM_GETPAGES (ctxt->FlagPage, ELAN3_FLAGSTATS *, 1, TRUE); -+ if (!ctxt->FlagPage) -+ goto error; -+ bzero ((char *) ctxt->FlagPage, PAGESIZE); -+ -+ KMEM_ZALLOC (ctxt->CommandTraps, COMMAND_TRAP *, sizeof (COMMAND_TRAP) * ntrapped_commands, TRUE); -+ if (!ctxt->CommandTraps) -+ goto error; -+ -+ KMEM_ZALLOC (ctxt->ThreadTraps, THREAD_TRAP *, sizeof (THREAD_TRAP) * ntrapped_threads, TRUE); -+ if (!ctxt->ThreadTraps) -+ goto error; -+ -+ KMEM_ZALLOC (ctxt->DmaTraps, DMA_TRAP *, sizeof (DMA_TRAP) * ntrapped_dmas, TRUE); -+ if (!ctxt->DmaTraps) -+ goto error; -+ -+ KMEM_ZALLOC (ctxt->EventCookies, EVENT_COOKIE *, sizeof (EVENT_COOKIE) * ntrapped_events, TRUE); -+ if (!ctxt->EventCookies) -+ goto error; -+ -+ KMEM_ZALLOC (ctxt->Commands, CProcTrapBuf_BE *, sizeof (CProcTrapBuf_BE) * noverflow_commands,TRUE); -+ if (!ctxt->Commands) -+ goto error; -+ -+ KMEM_ZALLOC (ctxt->SwapThreads, E3_Addr *, sizeof (E3_Addr) * nswapped_threads, TRUE); -+ if (!ctxt->SwapThreads) -+ goto error; -+ -+ KMEM_ZALLOC (ctxt->SwapDmas, E3_DMA_BE *, sizeof (E3_DMA_BE) * nswapped_dmas, TRUE); -+ if (!ctxt->SwapDmas) -+ goto error; -+ -+ /* -+ * "slop" is defined as follows : -+ * number of entries REQUIRED to be left spare to consume all other traps -+ * up until the time that the context can be swapped out. -+ * -+ * CommandTrapQ : 1 command issued by main + 1 issued by the thread processor per elan -+ * ThreadTrapQ : 2 from command + 2 input -+ * DmaTrapQ : 2 from command + 2 input -+ * EventTrapQ : 2 from command + 1 thread + 1 dma + 2 input + E3_NonSysCntxQueueSize -+ */ -+ spin_lock_irqsave (&dev->IntrLock, flags); -+ ELAN3_QUEUE_INIT (ctxt->CommandTrapQ, ntrapped_commands, 2); -+ ELAN3_QUEUE_INIT (ctxt->ThreadTrapQ, ntrapped_threads, 4); -+ ELAN3_QUEUE_INIT (ctxt->DmaTrapQ, ntrapped_dmas, 4); -+ ELAN3_QUEUE_INIT (ctxt->EventCookieQ, ntrapped_events, MIN(E3_NonSysCntxQueueSize + 6, ntrapped_events - 6)); -+ ELAN3_QUEUE_INIT (ctxt->CommandQ, noverflow_commands, 0); -+ ELAN3_QUEUE_INIT (ctxt->SwapThreadQ, nswapped_threads, 0); -+ ELAN3_QUEUE_INIT (ctxt->SwapDmaQ, nswapped_dmas, 0); -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+ -+#if defined(DIGITAL_UNIX) -+ /* Allocate the segelan for the command port */ -+ if (! kernel && elan3_segelan3_create (ctxt) == NULL) -+ { -+ elan3_detach(ctxt); -+ elan3_free (ctxt); -+ return ((ELAN3_CTXT *) NULL); -+ } -+#endif -+ -+ /* -+ * Initialise the Input Fault list -+ */ -+ spin_lock (&ctxt->InputFaultLock); -+ for (i = 0; i < NUM_INPUT_FAULT_SAVE; i++) -+ ctxt->InputFaults[i].Next = (i == (NUM_INPUT_FAULT_SAVE-1)) ? NULL : &ctxt->InputFaults[i+1]; -+ ctxt->InputFaultList = &ctxt->InputFaults[0]; -+ spin_unlock (&ctxt->InputFaultLock); -+ -+ ReserveHaltOperations (dev, NUM_HALTOPS, TRUE); -+ -+ if ((ctxt->RouteTable = AllocateRouteTable (ctxt->Device, ELAN3_MAX_VPS)) == NULL) -+ { -+ PRINTF0 (DBG_DEVICE, DBG_FN, "elan3_alloc: cannot map route table\n"); -+ elan3_detach(ctxt); -+ elan3_free (ctxt); -+ return ((ELAN3_CTXT *) NULL); -+ } -+ -+ return (ctxt); -+ -+ -+ error: -+ -+ elan3_detach(ctxt); -+ elan3_free (ctxt); -+ if (ctxt->FlagPage) -+ KMEM_FREEPAGES ((void *) ctxt->FlagPage, 1); -+ if (ctxt->CommandTraps) -+ KMEM_FREE ((void *) ctxt->CommandTraps, sizeof (COMMAND_TRAP) * ntrapped_commands); -+ if (ctxt->ThreadTraps) -+ KMEM_FREE ((void *) ctxt->ThreadTraps, sizeof (THREAD_TRAP) * ntrapped_threads); -+ if (ctxt->DmaTraps) -+ KMEM_FREE ((void *) ctxt->DmaTraps, sizeof (DMA_TRAP) * ntrapped_dmas); -+ if (ctxt->EventCookies) -+ KMEM_FREE ((void *) ctxt->EventCookies, sizeof (EVENT_COOKIE) * ntrapped_events); -+ if (ctxt->Commands) -+ KMEM_FREE ((void *) ctxt->Commands, sizeof (CProcTrapBuf_BE) * noverflow_commands); -+ if (ctxt->SwapThreads) -+ KMEM_FREE ((void *) ctxt->SwapThreads, sizeof (E3_Addr) * nswapped_threads); -+ if (ctxt->SwapDmas) -+ KMEM_FREE ((void *) ctxt->SwapDmas, sizeof (E3_DMA_BE) * nswapped_dmas); -+ -+ kcondvar_destroy (&ctxt->Wait); -+ kcondvar_destroy (&ctxt->CommandPortWait); -+ kcondvar_destroy (&ctxt->LwpWait); -+ kcondvar_destroy (&ctxt->HaltWait); -+ -+ kmutex_destroy (&ctxt->SwapListsLock); -+ kmutex_destroy (&ctxt->CmdLock); -+ kmutex_destroy (&ctxt->NetworkErrorLock); -+ spin_lock_destroy (&ctxt->InputFaultLock); -+ -+ krwlock_destroy (&ctxt->VpLock); -+ -+ KMEM_FREE (ctxt, sizeof (ELAN3_CTXT)); -+ -+ return (NULL); -+} -+ -+void -+elan3_free (ELAN3_CTXT *ctxt) -+{ -+ ELAN3_DEV *dev = ctxt->Device; -+ NETERR_FIXUP *nef; -+ -+ PRINTF1 (ctxt, DBG_FN, "elan3_free: %p \n", ctxt); -+ -+ elan3_removevp (ctxt, ELAN3_INVALID_PROCESS); /* Remove any virtual process mappings */ -+ -+#if defined(DIGITAL_UNIX) -+ WaitForContext (ctxt); /* wait for all references to this context to go away */ -+#endif -+ -+ if (ctxt->RouteTable) -+ FreeRouteTable (dev, ctxt->RouteTable); -+ ctxt->RouteTable = NULL; -+ -+ elan3mmu_free (ctxt->Elan3mmu); /* free of our Elan3mmu */ -+ -+ if (ctxt->Private) /* Call back to "user" to free off */ -+ ELAN3_OP_FREE_PRIVATE (ctxt); /* private data */ -+ -+#if defined(DIGITAL_UNIX) -+ if (! CTXT_IS_KERNEL(ctxt)) -+ elan3_segelan3_destroy (ctxt); /* Unmap the command port from the users address space. */ -+#endif -+ -+ ReleaseHaltOperations (dev, NUM_HALTOPS); -+ -+ if (ctxt->Input0Resolver) -+ CancelNetworkErrorResolver (ctxt->Input0Resolver); -+ -+ if (ctxt->Input1Resolver) -+ CancelNetworkErrorResolver (ctxt->Input1Resolver); -+ -+ while ((nef = ctxt->NetworkErrorFixups) != NULL) -+ { -+ ctxt->NetworkErrorFixups = nef->Next; -+ -+ CompleteNetworkErrorFixup (ctxt, nef, ESRCH); -+ } -+ -+ KMEM_FREEPAGES ((void *) ctxt->FlagPage, 1); -+ -+ KMEM_FREE ((void *) ctxt->CommandTraps, sizeof (COMMAND_TRAP) * ntrapped_commands); -+ KMEM_FREE ((void *) ctxt->ThreadTraps, sizeof (THREAD_TRAP) * ntrapped_threads); -+ KMEM_FREE ((void *) ctxt->DmaTraps, sizeof (DMA_TRAP) * ntrapped_dmas); -+ KMEM_FREE ((void *) ctxt->EventCookies, sizeof (EVENT_COOKIE) * ntrapped_events); -+ KMEM_FREE ((void *) ctxt->Commands, sizeof (CProcTrapBuf_BE) * noverflow_commands); -+ KMEM_FREE ((void *) ctxt->SwapThreads, sizeof (E3_Addr) * nswapped_threads); -+ KMEM_FREE ((void *) ctxt->SwapDmas, sizeof (E3_DMA_BE) * nswapped_dmas); -+ -+ kcondvar_destroy (&ctxt->Wait); -+ kcondvar_destroy (&ctxt->CommandPortWait); -+ kcondvar_destroy (&ctxt->LwpWait); -+ kcondvar_destroy (&ctxt->HaltWait); -+ -+ kmutex_destroy (&ctxt->SwapListsLock); -+ kmutex_destroy (&ctxt->CmdLock); -+ kmutex_destroy (&ctxt->NetworkErrorLock); -+ spin_lock_destroy (&ctxt->InputFaultLock); -+ -+ krwlock_destroy (&ctxt->VpLock); -+ -+ KMEM_FREE (ctxt, sizeof (ELAN3_CTXT)); -+} -+ -+int -+elan3_doattach(ELAN3_CTXT *ctxt, ELAN_CAPABILITY *cap) -+{ -+ unsigned long pgnum = ((cap->cap_mycontext & MAX_ROOT_CONTEXT_MASK) * sizeof (E3_CommandPort)) / PAGE_SIZE; -+ unsigned long pgoff = ((cap->cap_mycontext & MAX_ROOT_CONTEXT_MASK) * sizeof (E3_CommandPort)) & (PAGE_SIZE-1); -+ ELAN3_DEV *dev = ctxt->Device; -+ int res = ESUCCESS; -+ unsigned long flags; -+ -+ /* Map in the command port for this context */ -+ if (MapDeviceRegister (dev, ELAN3_BAR_COMMAND_PORT, &ctxt->CommandPage, pgnum * PAGE_SIZE, PAGE_SIZE, &ctxt->CommandPageHandle) != ESUCCESS) -+ { -+ PRINTF0 (ctxt, DBG_FN, "elan3_doattach: MapDeviceRegister failed"); -+ return (EINVAL); -+ } -+ -+ ctxt->CommandPort = ctxt->CommandPage + pgoff; -+ -+ spin_lock_irqsave (&dev->IntrLock, flags); -+ -+ res = 0; -+ if (ELAN3_DEV_CTX_TABLE(dev,cap->cap_mycontext) != NULL) -+ res = EBUSY; -+ else -+ { -+ if ((res = elan3mmu_attach (ctxt->Device, cap->cap_mycontext, ctxt->Elan3mmu, -+ ctxt->RouteTable->Table, ctxt->RouteTable->Size-1)) == 0) -+ { -+ ELAN3_DEV_CTX_TABLE(dev,cap->cap_mycontext) = ctxt; -+ ctxt->Capability = *cap; -+ } -+ } -+ -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+ -+ if (res == ESUCCESS) -+ elan3_swapin (ctxt, CTXT_DETACHED); -+ else -+ { -+ UnmapDeviceRegister (dev, &ctxt->CommandPageHandle); -+ ctxt->CommandPage = (ioaddr_t) 0; -+ ctxt->CommandPort = (ioaddr_t) 0; -+ } -+ -+ return (res); -+} -+ -+void -+elan3_destroy_callback( void * args, ELAN_CAPABILITY *cap, ELAN_CAPABILITY *map) -+{ -+ if (map == NULL) -+ { -+ /* the cap is being destroyed */ -+ PRINTF0 (NULL, DBG_VP, "elan3_destroy_callback: the cap is being destroyed \n"); -+ } -+ else -+ { -+ /* the map is being destroyed */ -+ PRINTF0 (NULL, DBG_VP, "elan3_destroy_callback: the map is being destroyed \n"); -+ } -+} -+ -+int -+elan3_attach (ELAN3_CTXT *ctxt, ELAN_CAPABILITY *cap) -+{ -+ ELAN3_DEV *dev = ctxt->Device; -+ int type; -+ int res; -+ -+ switch (type = elan3_validate_cap (dev, cap, ELAN_USER_ATTACH)) -+ { -+ case ELAN_CAP_OK: -+ /* nothing */ -+ break; -+ -+ case ELAN_CAP_RMS: -+ if ((res = elan_attach_cap(cap, dev->Devinfo.dev_rail, ctxt, elan3_destroy_callback)) != 0) -+ return res; -+ break; -+ -+ default: -+ return (EINVAL); -+ } -+ -+ if (((res = elan3_doattach(ctxt,cap)) != ESUCCESS) && (type == ELAN_CAP_RMS)) -+ elan_detach_cap(cap, dev->Devinfo.dev_rail); -+ -+ return res; -+} -+ -+void -+elan3_detach ( ELAN3_CTXT *ctxt ) -+{ -+ ELAN3_DEV *dev = ctxt->Device; -+ int need_to_call_elanmod_detach = 0; -+ unsigned long flags; -+ -+ PRINTF1 (ctxt, DBG_FN, "elan3_detach: %p \n", ctxt ); -+ -+ if (ctxt->Capability.cap_mycontext == ELAN_CAP_UNINITIALISED) -+ { -+ PRINTF0 (ctxt, DBG_FN, "elan3_detach: context not attached \n"); -+ return ; -+ } -+ -+ /* must you be in the ctx_table ?? */ -+ -+ switch (ctxt->Capability.cap_type & ELAN_CAP_TYPE_MASK) -+ { -+ case ELAN_CAP_TYPE_BLOCK: -+ case ELAN_CAP_TYPE_CYCLIC: -+ { -+ if (ELAN3_SYSTEM_CONTEXT (ctxt->Capability.cap_mycontext)) -+ return ; -+ -+ if (! (ctxt->Capability.cap_type & ELAN_CAP_TYPE_HWTEST)) -+ need_to_call_elanmod_detach = 1; -+ -+ break; -+ } -+ default: -+ return ; -+ } -+ -+ elan3_swapout (ctxt, CTXT_DETACHED); -+ -+ spin_lock_irqsave (&dev->IntrLock, flags); -+ -+ elan3mmu_detach (dev, ctxt->Capability.cap_mycontext); -+ ELAN3_DEV_CTX_TABLE(dev,ctxt->Capability.cap_mycontext) = NULL; -+ -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+ -+ if (ctxt->CommandPage) -+ { -+ UnmapDeviceRegister (dev, &ctxt->CommandPageHandle); -+ ctxt->CommandPage = (ioaddr_t) 0; -+ } -+ -+ if (need_to_call_elanmod_detach) -+ elan_detach_cap(&ctxt->Capability, dev->Devinfo.dev_rail); -+ -+ elan_nullcap (&ctxt->Capability); -+ -+} -+ -+void -+elan3_dodetach ( ELAN3_CTXT *ctxt ) -+{ -+ ELAN3_DEV *dev = ctxt->Device; -+ unsigned long flags; -+ -+ PRINTF1 (ctxt, DBG_FN, "elan3_dodetach: %p \n", ctxt ); -+ -+ if (ctxt->Capability.cap_mycontext == ELAN_CAP_UNINITIALISED) -+ { -+ PRINTF0 (ctxt, DBG_FN, "elan3_dodetach: context not attached \n"); -+ return ; -+ } -+ -+ elan3_swapout (ctxt, CTXT_DETACHED); -+ -+ spin_lock_irqsave (&dev->IntrLock, flags); -+ -+ elan3mmu_detach (dev, ctxt->Capability.cap_mycontext); -+ ELAN3_DEV_CTX_TABLE(dev,ctxt->Capability.cap_mycontext) = NULL; -+ -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+ -+ if (ctxt->CommandPage) -+ { -+ UnmapDeviceRegister (dev, &ctxt->CommandPageHandle); -+ ctxt->CommandPage = (ioaddr_t) 0; -+ } -+ -+ elan_nullcap (&ctxt->Capability); -+} -+ -+void -+elan3_swapin (ELAN3_CTXT *ctxt, int reason) -+{ -+ ELAN3_DEV *dev = ctxt->Device; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&dev->IntrLock, flags); -+ -+ ASSERT (ctxt->Status & CTXT_SWAPPED_REASONS); -+ -+ PRINTF3 (ctxt, DBG_SWAP, "elan3_swapin: status %x State %s reason %x\n", -+ ctxt->Status, OthersStateStrings[ctxt->OthersState], reason); -+ -+ while (ctxt->Status & CTXT_SWAPPING_OUT) /* In transition */ -+ kcondvar_wait (&ctxt->LwpWait, &dev->IntrLock, &flags); -+ -+ if (reason == CTXT_NO_LWPS && ctxt->LwpCount++ != 0) /* Added another LWP */ -+ { -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+ return; -+ } -+ -+ if ((ctxt->Status & ~reason) & CTXT_SWAPPED_REASONS) -+ ctxt->Status &= ~reason; -+ else -+ { -+ ASSERT (ctxt->Status & CTXT_SWAPPED_OUT); -+ ASSERT (ctxt->OthersState == CTXT_OTHERS_SWAPPED); -+ -+ /* -+ * Will not be swapped out anymore, so ask the "user" to perform -+ * any swapping in he needs before letting the context run again. -+ */ -+ -+ ctxt->Status &= ~(CTXT_SWAPPED_OUT | CTXT_QUEUES_EMPTY | reason); -+ ctxt->OthersState = CTXT_OTHERS_RUNNING; -+ -+ if (ctxt->Input0Trap.State == CTXT_STATE_OK && ctxt->Input1Trap.State == CTXT_STATE_OK) -+ SetInputterStateForContext (ctxt, 0, NULL); -+ -+ kcondvar_wakeupall (&ctxt->Wait, &dev->IntrLock); -+ } -+ -+ PRINTF2 (ctxt, DBG_SWAP, "elan3_swapin: all done - status %x state %s\n", -+ ctxt->Status, OthersStateStrings[ctxt->OthersState]); -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+} -+ -+ -+void -+elan3_swapout (ELAN3_CTXT *ctxt, int reason) -+{ -+ ELAN3_DEV *dev = ctxt->Device; -+ int cansleep; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&dev->IntrLock, flags); -+ -+ PRINTF3 (ctxt, DBG_SWAP, "elan3_swapout: status %x state %s reason %x\n", -+ ctxt->Status, OthersStateStrings[ctxt->OthersState], reason); -+ -+ if (reason == CTXT_NO_LWPS) -+ { -+ if (--ctxt->LwpCount != 0) /* Still other LWPs running */ -+ { -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+ return; -+ } -+ -+ kcondvar_wakeupall (&ctxt->LwpWait, &dev->IntrLock); /* Wakeup anyone waiting on LwpCount */ -+ } -+ -+ ctxt->Status |= reason; -+ -+ while (ctxt->Status & CTXT_SWAPPING_OUT) /* wait for someone else to finish swapping */ -+ kcondvar_wait (&ctxt->LwpWait, &dev->IntrLock, &flags); /* out */ -+ -+ if (ctxt->Status & CTXT_SWAPPED_OUT) -+ { -+ if (reason == CTXT_NO_LWPS) /* Wakeup other thread waiting on LWP exit */ -+ kcondvar_wakeupall (&ctxt->LwpWait, &dev->IntrLock); -+ -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+ return; -+ } -+ -+ /* -+ * mark the context as swapping out. -+ */ -+ ctxt->Status |= CTXT_SWAPPING_OUT; -+ -+ if (reason != CTXT_FIXUP_NETERR) -+ { -+ /* -+ * Stop all of the lwps. -+ */ -+ while (ctxt->LwpCount) -+ { -+ kcondvar_wakeupall (&ctxt->Wait, &dev->IntrLock); /* Wake up any lwps */ -+ kcondvar_wait (&ctxt->LwpWait, &dev->IntrLock, &flags); /* then wait for them to enter elan3_swapout */ -+ } -+ } -+ -+ StartSwapoutContext (ctxt, 0, NULL); -+ for (;;) -+ { -+ PRINTF0 (ctxt, DBG_SWAP, "elan3_swapout: HandleExceptions\n"); -+ -+ cansleep = (HandleExceptions(ctxt, &flags) == ESUCCESS); -+ -+ PRINTF2 (ctxt, DBG_SWAP, "elan3_swapout: OthersState=%d cansleep=%d\n", ctxt->OthersState, cansleep); -+ -+ if (ctxt->OthersState == CTXT_OTHERS_SWAPPED) -+ break; -+ -+ if (cansleep) -+ kcondvar_wait (&ctxt->Wait, &dev->IntrLock, &flags); -+ } -+ PRINTF0 (ctxt, DBG_SWAP, "elan3_swapout: swapped out\n"); -+ -+ ASSERT (ELAN3_QUEUE_EMPTY (ctxt->DmaTrapQ)); -+ ASSERT (ELAN3_QUEUE_EMPTY (ctxt->ThreadTrapQ)); -+ -+ ctxt->Status |= CTXT_SWAPPED_OUT; -+ ctxt->Status &= ~CTXT_SWAPPING_OUT; -+ -+ kcondvar_wakeupall (&ctxt->LwpWait, &dev->IntrLock); -+ -+ PRINTF2 (ctxt, DBG_SWAP, "elan3_swapout: all done - status %x state %s\n", -+ ctxt->Status, OthersStateStrings[ctxt->OthersState]); -+ -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+} -+ -+int -+elan3_pagefault (ELAN3_CTXT *ctxt, E3_FaultSave_BE *FaultSave, int npages) -+{ -+ E3_Addr elanAddr = FaultSave->s.FaultAddress; -+ int writeable; -+ int res; -+ -+ PRINTF3 (ctxt, DBG_FAULT, "elan3_pagefault: elanAddr %08x FSR %08x : %s\n", elanAddr, FaultSave->s.FSR.Status, -+ FaultSave->s.FSR.s.ProtFault ? "protection fault" : "pte invalid"); -+ -+ /* Look at the FSR to determine the fault type etc */ -+ -+ if (FaultSave->s.FSR.Status == 0) /* this is a target abort/parity error, so look */ -+ { /* at the PCI config space registers to determine */ -+ ElanBusError (ctxt->Device); -+ return (EFAULT); -+ } -+ -+ if (FaultSave->s.FSR.s.AlignmentErr) /* Alignment errors are always fatal. */ -+ { -+ PRINTF0 (ctxt, DBG_FAULT, "elan3_pagefault: Alignment error\n"); -+ return (EFAULT); -+ } -+ -+ if (FaultSave->s.FSR.s.WalkBadData) /* Memory ECC error during a walk */ -+ { -+ PRINTF0 (ctxt, DBG_FAULT, "elan3_pagefault: Memory ECC error during walk\n"); -+ return (EFAULT); -+ } -+ -+ if (!FaultSave->s.FSR.s.ProtFault && /* DMA memory type changed */ -+ !FaultSave->s.FSR.s.Walking) -+ { -+ PRINTF0 (ctxt, DBG_FAULT, "elan3_pagefault: DMA memory type changed\n"); -+ return (EFAULT); -+ } -+ -+ ASSERT (FaultSave->s.FSR.s.ProtFault ? /* protection errors, should always have a valid pte */ -+ (!FaultSave->s.FSR.s.Walking || !(FaultSave->s.FSR.s.Level==3) || FaultSave->s.FSR.s.FaultPte == ELAN3_ET_PTE) : -+ FaultSave->s.FSR.s.FaultPte == ELAN3_ET_INVALID); /* otherwise it must be an invalid pte */ -+ -+ /* -+ * Determine whether to fault for a 'write' from the access permissions we need, and not -+ * from the access type (WrAcc). -+ */ -+ writeable = (FaultSave->s.FSR.s.AccTypePerm & (1 << FSR_WritePermBit)); -+ -+ /* Check that we have the right permissions for this access type. */ -+ if ((res = elan3mmu_checkperm (ctxt->Elan3mmu, (elanAddr&PAGEMASK), npages*PAGESIZE, FaultSave->s.FSR.s.AccTypePerm)) != 0) -+ { -+ PRINTF1 (ctxt, DBG_FAULT, "elan3_pagefault: %s\n", (res == ENOMEM) ? "no protection mapping" : "protection error"); -+ -+ return (res); -+ } -+ -+ res = LoadElanTranslation (ctxt, (elanAddr&PAGEMASK), npages*PAGESIZE, FaultSave->s.FSR.s.ProtFault, writeable); -+ -+ if (res == ESUCCESS) -+ { -+ BumpStat (ctxt->Device, PageFaults); -+ BumpUserStat (ctxt, PageFaults); -+ } -+ -+ PRINTF1 (ctxt, DBG_FAULT, "elan3_pagefault: -> %d\n", res); -+ -+ return (res); -+} -+ -+void -+elan3_block_inputter (ELAN3_CTXT *ctxt, int block) -+{ -+ ELAN3_DEV *dev = ctxt->Device; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&dev->IntrLock, flags); -+ -+ if (block) -+ ctxt->Status |= CTXT_USER_FILTERING; -+ else -+ ctxt->Status &= ~CTXT_USER_FILTERING; -+ -+ if (ctxt->Capability.cap_mycontext != ELAN_CAP_UNINITIALISED) -+ SetInputterStateForContext (ctxt, 0, NULL); -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+} -+ -+int -+FixupNetworkErrors (ELAN3_CTXT *ctxt, unsigned long *flags) -+{ -+ ELAN3_DEV *dev = ctxt->Device; -+ NETERR_FIXUP *nef; -+ -+ ASSERT (SPINLOCK_HELD (&dev->IntrLock)); -+ -+ if (ctxt->NetworkErrorFixups == NULL) -+ return (ESUCCESS); -+ -+ spin_unlock_irqrestore (&dev->IntrLock, *flags); -+ -+ kmutex_lock (&ctxt->NetworkErrorLock); /* single thread while fixing up errors */ -+ elan3_swapout (ctxt, CTXT_FIXUP_NETERR); -+ -+ spin_lock_irqsave (&dev->IntrLock, *flags); -+ while ((nef = ctxt->NetworkErrorFixups) != NULL) -+ { -+ ctxt->NetworkErrorFixups = nef->Next; -+ spin_unlock_irqrestore (&dev->IntrLock, *flags); -+ -+ if (ELAN3_OP_FIXUP_NETWORK_ERROR (ctxt, nef) == OP_FAILED) -+ CompleteNetworkErrorFixup (ctxt, nef, EINVAL); -+ -+ spin_lock_irqsave (&dev->IntrLock, *flags); -+ } -+ spin_unlock_irqrestore (&dev->IntrLock, *flags); -+ -+ elan3_swapin (ctxt, CTXT_FIXUP_NETERR); -+ -+ kmutex_unlock (&ctxt->NetworkErrorLock); -+ spin_lock_irqsave (&dev->IntrLock, *flags); -+ return (EAGAIN); -+} -+ -+int -+CompleteNetworkErrorResolver (ELAN3_CTXT *ctxt, INPUT_TRAP *trap, NETERR_RESOLVER *rvp) -+{ -+ int state; -+ -+ switch (rvp->Status) -+ { -+ case ESUCCESS: -+ /* -+ * the item still existed at the source - if it's a wait for EOP transaction -+ * then the source will retry - otherwise the remote event will have been -+ * cleared and we should execute it -+ */ -+ PRINTF1 (ctxt, DBG_NETERR, "CompleteNetworkErrorResolver: ESUCCESS zero WaitForEopTransaction %p\n", trap->WaitForEopTransaction); -+ -+ state = trap->WaitForEopTransaction ? CTXT_STATE_OK : CTXT_STATE_NEEDS_RESTART; -+ -+ break; -+ -+ case ESRCH: -+ /* -+ * the item was not found at the source - we should always execute the transaction -+ * since it will never be resent -+ */ -+ PRINTF1 (ctxt, DBG_NETERR, "CompleteNetworkErrorResolver: ESRCH execute WaitForEopTransaction %p\n", trap->WaitForEopTransaction); -+ state = CTXT_STATE_NEEDS_RESTART; -+ break; -+ -+ default: /* other errors */ -+ PRINTF1 (ctxt, DBG_NETERR, "CompleteNetworkErrorResolver: %d\n", rvp->Status); -+ if (ElanException (ctxt, EXCEPTION_NETWORK_ERROR, INPUT_PROC, trap, &rvp) == OP_HANDLED) -+ state = CTXT_STATE_NEEDS_RESTART; -+ else -+ state = CTXT_STATE_OK; -+ break; -+ } -+ -+ FreeNetworkErrorResolver (rvp); -+ -+ return (state); -+} -+ -+int -+HandleExceptions (ELAN3_CTXT *ctxt, unsigned long *flags) -+{ -+ ELAN3_DEV *dev = ctxt->Device; -+ THREAD_TRAP tproc; -+ DMA_TRAP dproc; -+ NETERR_RESOLVER *rvp; -+ int state; -+ -+ if (ctxt->Status & CTXT_COMMAND_OVERFLOW_ERROR) -+ { -+ ctxt->Status &= ~CTXT_COMMAND_OVERFLOW_ERROR; -+ spin_unlock_irqrestore (&dev->IntrLock, *flags); -+ ElanException (ctxt, EXCEPTION_COMMAND_OVERFLOW, COMMAND_PROC, NULL); -+ spin_lock_irqsave (&dev->IntrLock, *flags); -+ return (EAGAIN); -+ } -+ -+ if (! ELAN3_QUEUE_BACK_EMPTY (ctxt->CommandTrapQ)) -+ { -+ /* XXXX: unmap translations to the command port */ -+ -+ spin_unlock_irqrestore (&dev->IntrLock, *flags); -+ ResolveCProcTrap (ctxt); -+ spin_lock_irqsave (&dev->IntrLock, *flags); -+ return (EAGAIN); -+ } -+ -+ if (ctxt->Input0Trap.State == CTXT_STATE_TRAPPED) -+ { -+ ctxt->Input0Trap.State = CTXT_STATE_RESOLVING; -+ -+ spin_unlock_irqrestore (&dev->IntrLock, *flags); -+ ResolveIProcTrap (ctxt, &ctxt->Input0Trap, &ctxt->Input0Resolver); -+ spin_lock_irqsave (&dev->IntrLock, *flags); -+ return (EAGAIN); -+ } -+ -+ if (ctxt->Input1Trap.State == CTXT_STATE_TRAPPED) -+ { -+ ctxt->Input1Trap.State = CTXT_STATE_RESOLVING; -+ -+ spin_unlock_irqrestore (&dev->IntrLock, *flags); -+ ResolveIProcTrap (ctxt, &ctxt->Input1Trap, &ctxt->Input1Resolver); -+ spin_lock_irqsave (&dev->IntrLock, *flags); -+ return (EAGAIN); -+ } -+ -+ if ((rvp = ctxt->Input0Resolver) != NULL && rvp->Completed) -+ { -+ ASSERT (ctxt->Input0Trap.State == CTXT_STATE_NETWORK_ERROR); -+ -+ ctxt->Input0Resolver = NULL; -+ -+ spin_unlock_irqrestore (&dev->IntrLock, *flags); -+ state = CompleteNetworkErrorResolver (ctxt, &ctxt->Input0Trap, rvp); -+ spin_lock_irqsave (&dev->IntrLock, *flags); -+ ctxt->Input0Trap.State = state; -+ return (EAGAIN); -+ } -+ -+ if ((rvp = ctxt->Input1Resolver) != NULL && rvp->Completed) -+ { -+ ASSERT (ctxt->Input1Trap.State == CTXT_STATE_NETWORK_ERROR); -+ -+ ctxt->Input1Resolver = NULL; -+ -+ spin_unlock_irqrestore (&dev->IntrLock, *flags); -+ state = CompleteNetworkErrorResolver (ctxt,&ctxt->Input1Trap, rvp); -+ spin_lock_irqsave (&dev->IntrLock, *flags); -+ ctxt->Input1Trap.State = state; -+ return (EAGAIN); -+ } -+ -+ if (NextTProcTrap (ctxt, &tproc)) -+ { -+ spin_unlock_irqrestore (&dev->IntrLock, *flags); -+ ResolveTProcTrap (ctxt, &tproc); -+ spin_lock_irqsave (&dev->IntrLock, *flags); -+ return (EAGAIN); -+ } -+ ctxt->Status &= ~CTXT_THREAD_QUEUE_FULL; -+ -+ if (NextDProcTrap (ctxt, &dproc)) -+ { -+ spin_unlock_irqrestore (&dev->IntrLock, *flags); -+ ResolveDProcTrap (ctxt, &dproc); -+ spin_lock_irqsave (&dev->IntrLock, *flags); -+ return (EAGAIN); -+ } -+ ctxt->Status &= ~CTXT_DMA_QUEUE_FULL; -+ -+ /* Handle all event interrupts. */ -+ if (! ELAN3_QUEUE_EMPTY (ctxt->EventCookieQ)) -+ { -+ while (! ELAN3_QUEUE_EMPTY (ctxt->EventCookieQ)) -+ { -+ E3_uint32 cookie = *ELAN3_QUEUE_FRONT (ctxt->EventCookieQ, ctxt->EventCookies); -+ -+ ELAN3_QUEUE_REMOVE (ctxt->EventCookieQ); -+ -+ spin_unlock_irqrestore (&dev->IntrLock, *flags); -+ if (ELAN3_OP_EVENT (ctxt, cookie, OP_LWP) != OP_DEFER) -+ spin_lock_irqsave (&dev->IntrLock, *flags); -+ else -+ { -+ spin_lock_irqsave (&dev->IntrLock, *flags); /* place the cookie back on the queue. */ -+ /* note we place it on the front to ensure */ -+ ELAN3_QUEUE_ADD_FRONT (ctxt->EventCookieQ); /* event ordering. */ -+ *ELAN3_QUEUE_FRONT (ctxt->EventCookieQ, ctxt->EventCookies) = cookie; -+ } -+ } -+ return (EAGAIN); -+ } -+ ctxt->Status &= ~CTXT_EVENT_QUEUE_FULL; -+ -+ if (! ELAN3_QUEUE_EMPTY (ctxt->SwapDmaQ)) -+ { -+ while (! ELAN3_QUEUE_EMPTY (ctxt->SwapDmaQ)) -+ { -+ E3_DMA_BE DmaDesc = *ELAN3_QUEUE_FRONT (ctxt->SwapDmaQ, ctxt->SwapDmas); -+ -+ ELAN3_QUEUE_REMOVE (ctxt->SwapDmaQ); -+ -+ spin_unlock_irqrestore (&dev->IntrLock, *flags); -+ RestartDmaDesc (ctxt, &DmaDesc); -+ spin_lock_irqsave (&dev->IntrLock, *flags); -+ } -+ return (EAGAIN); -+ } -+ -+ if (! ELAN3_QUEUE_EMPTY (ctxt->SwapThreadQ)) -+ { -+ while (! ELAN3_QUEUE_EMPTY (ctxt->SwapThreadQ)) -+ { -+ E3_Addr StackPointer = *ELAN3_QUEUE_FRONT (ctxt->SwapThreadQ, ctxt->SwapThreads); -+ -+ ELAN3_QUEUE_REMOVE (ctxt->SwapThreadQ); -+ -+ spin_unlock_irqrestore (&dev->IntrLock, *flags); -+ ReissueStackPointer (ctxt, StackPointer); -+ spin_lock_irqsave (&dev->IntrLock, *flags); -+ } -+ return (EAGAIN); -+ } -+ -+ switch (ctxt->OthersState) -+ { -+ case CTXT_OTHERS_SWAPPING: -+ if (! (ctxt->Status & CTXT_OTHERS_REASONS)) -+ ctxt->OthersState = CTXT_OTHERS_RUNNING; -+ else -+ ctxt->OthersState = CTXT_OTHERS_SWAPPED; -+ -+ PRINTF1 (ctxt, DBG_LWP, "HandleExceptions: OthersState : swapping -> %s\n", OthersStateStrings[ctxt->OthersState]); -+ -+ break; -+ -+ case CTXT_OTHERS_SWAPPING_MORE: -+ ctxt->OthersState = CTXT_OTHERS_HALTING_MORE; -+ QueueHaltOperation (dev, 0, NULL, INT_DProcHalted | INT_TProcHalted, HaltSwapContext, ctxt); -+ -+ PRINTF1 (ctxt, DBG_LWP, "HandleExceptions: OthersState : swapping_more -> %s\n", OthersStateStrings[ctxt->OthersState]); -+ break; -+ } -+ return (ESUCCESS); -+} -+ -+int -+RestartContext (ELAN3_CTXT *ctxt, unsigned long *flags) -+{ -+ ELAN3_DEV *dev = ctxt->Device; -+ int res; -+ -+ ASSERT (SPINLOCK_HELD (&dev->IntrLock)); -+ -+ PRINTF1 (ctxt, DBG_LWP, "RestartContext: status %x\n", ctxt->Status); -+ -+ if (! (ctxt->Status & CTXT_OTHERS_REASONS)) -+ { -+ if (! ELAN3_QUEUE_FRONT_EMPTY (ctxt->CommandTrapQ) || ! ELAN3_QUEUE_EMPTY(ctxt->CommandQ)) -+ { -+ spin_unlock_irqrestore (&dev->IntrLock, *flags); -+ RestartCProcTrap (ctxt); -+ spin_lock_irqsave (&dev->IntrLock, *flags); -+ return (EAGAIN); -+ } -+ -+ if (ctxt->Input0Trap.State == CTXT_STATE_NEEDS_RESTART) -+ { -+ ctxt->Input0Trap.State = CTXT_STATE_EXECUTING; -+ -+ spin_unlock_irqrestore (&dev->IntrLock, *flags); -+ res = RestartIProcTrap (ctxt, &ctxt->Input0Trap); -+ spin_lock_irqsave (&dev->IntrLock, *flags); -+ -+ if (res == ESUCCESS) -+ ctxt->Input0Trap.State = CTXT_STATE_OK; -+ else -+ ctxt->Input0Trap.State = CTXT_STATE_NEEDS_RESTART; -+ return (EAGAIN); -+ } -+ -+ if (ctxt->Input1Trap.State == CTXT_STATE_NEEDS_RESTART) -+ { -+ ctxt->Input1Trap.State = CTXT_STATE_EXECUTING; -+ -+ spin_unlock_irqrestore (&dev->IntrLock, *flags); -+ res = RestartIProcTrap (ctxt, &ctxt->Input1Trap); -+ spin_lock_irqsave (&dev->IntrLock, *flags); -+ -+ if (res == ESUCCESS) -+ ctxt->Input1Trap.State = CTXT_STATE_OK; -+ else -+ ctxt->Input1Trap.State = CTXT_STATE_NEEDS_RESTART; -+ return (EAGAIN); -+ } -+ -+ if (SetEventsNeedRestart (ctxt)) -+ { -+ spin_unlock_irqrestore (&dev->IntrLock, *flags); -+ RestartSetEvents (ctxt); -+ spin_lock_irqsave (&dev->IntrLock, *flags); -+ return (EAGAIN); -+ } -+ -+ SetInputterStateForContext (ctxt, 0, NULL); -+ -+ if (TProcNeedsRestart (ctxt)) -+ { -+ spin_unlock_irqrestore (&dev->IntrLock, *flags); -+ -+ LoadCommandPortTranslation (ctxt); -+ RestartTProcItems (ctxt); -+ spin_lock_irqsave (&dev->IntrLock, *flags); -+ return (EAGAIN); -+ } -+ -+ if (DProcNeedsRestart (ctxt)) -+ { -+ spin_unlock_irqrestore (&dev->IntrLock, *flags); -+ RestartDProcItems (ctxt); -+ spin_lock_irqsave (&dev->IntrLock, *flags); -+ return (EAGAIN); -+ } -+ -+ if (ELAN3_QUEUE_EMPTY (ctxt->CommandTrapQ)) -+ { -+ PRINTF1 (ctxt, DBG_LWP, "RestartContext: setting Command Flag at %p to 0\n", &ctxt->FlagPage->CommandFlag); -+ -+ ctxt->FlagPage->CommandFlag = 0; -+ -+ if (ctxt->Status & CTXT_WAITING_COMMAND) -+ { -+ PRINTF0 (ctxt, DBG_LWP, "RestartContext: waking up threads waiting for commandport\n"); -+ -+ ctxt->Status &= ~CTXT_WAITING_COMMAND; -+ -+ kcondvar_wakeupall (&ctxt->CommandPortWait, &dev->IntrLock); -+ } -+ } -+ } -+ -+ return (ESUCCESS); -+} -+ -+static void -+HaltSwapContext (ELAN3_DEV *dev, void *arg) -+{ -+ ELAN3_CTXT *ctxt = (ELAN3_CTXT *) arg; -+ int SysCntx = (ctxt->Capability.cap_mycontext & SYS_CONTEXT_BIT); -+ E3_ThreadQueue_BE thread; -+ E3_DMA_BE dma; -+ sdramaddr_t FPtr, BPtr; -+ sdramaddr_t Base, Top; -+ u_int *runCount; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&dev->IntrLock, flags); -+ -+ ASSERT (ctxt->OthersState == CTXT_OTHERS_HALTING || ctxt->OthersState == CTXT_OTHERS_HALTING_MORE); -+ -+ PRINTF2 (ctxt, DBG_SWAP, "HaltSwapContext: status %x state %s\n", ctxt->Status, OthersStateStrings[ctxt->OthersState]); -+ -+ if (! (ctxt->Status & CTXT_OTHERS_REASONS)) -+ { -+ if (ctxt->OthersState == CTXT_OTHERS_HALTING_MORE) -+ { -+ runCount = SysCntx ? &dev->HaltAllCount : &dev->HaltNonContext0Count; -+ -+ if (--(*runCount) == 0) -+ SetSchedStatusRegister (dev, 0, NULL); -+ } -+ ctxt->OthersState = CTXT_OTHERS_RUNNING; -+ -+ PRINTF0 (ctxt, DBG_SWAP, "HaltSwapContext: no more reason to swap -> others_running\n"); -+ -+ kcondvar_wakeupall (&ctxt->Wait, &dev->IntrLock); -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+ return; -+ } -+ -+ /* -+ * Capture all other processors since we're not being responsive to -+ * the command processor interrupt. -+ */ -+ CAPTURE_CPUS(); -+ -+ if (SysCntx) -+ { -+ FPtr = read_reg32 (dev, TProc_SysCntx_FPtr); -+ BPtr = read_reg32 (dev, TProc_SysCntx_BPtr); -+ Base = dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxThreadQueue[0]); -+ Top = dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxThreadQueue[E3_SysCntxQueueSize-1]); -+ } -+ else -+ { -+ FPtr = read_reg32 (dev, TProc_NonSysCntx_FPtr); -+ BPtr = read_reg32 (dev, TProc_NonSysCntx_BPtr); -+ Base = dev->TAndQBase + offsetof (E3_TrapAndQueue, NonSysCntxThreadQueue[0]); -+ Top = dev->TAndQBase + offsetof (E3_TrapAndQueue, NonSysCntxThreadQueue[E3_NonSysCntxQueueSize-1]); -+ } -+ -+ while (FPtr != BPtr) -+ { -+ elan3_sdram_copyq_from_sdram (dev, FPtr, (void *) &thread, sizeof (E3_ThreadQueue_BE)); -+ -+ if (thread.s.Context == ctxt->Capability.cap_mycontext) -+ { -+ if (ELAN3_QUEUE_FULL (ctxt->SwapThreadQ)) -+ break; -+ -+ *ELAN3_QUEUE_BACK(ctxt->SwapThreadQ, ctxt->SwapThreads) = thread.s.Thread; -+ ELAN3_QUEUE_ADD (ctxt->SwapThreadQ); -+ -+ /* -+ * Remove this entry from the queue by replacing it with -+ * the "magic" thread value. -+ * -+ * NOTE: we must preserve the SYS_CONTEXT_BIT since the Elan uses this -+ * to mark the approriate run queue as empty. -+ */ -+ thread.s.Context = SysCntx ? SYS_CONTEXT_BIT : 0; -+ thread.s.Thread = VanishingStackPointer; -+ -+ elan3_sdram_copyq_to_sdram (dev, (void *) &thread, FPtr, sizeof (E3_ThreadQueue_BE)); -+ } -+ -+ FPtr = (FPtr == Top) ? Base : FPtr + sizeof (E3_ThreadQueue); -+ } -+ -+ ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProc.s.FSR)) == 0); -+ ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData0.s.FSR.Status)) == 0); -+ ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData1.s.FSR.Status)) == 0); -+ ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData2.s.FSR.Status)) == 0); -+ ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData3.s.FSR.Status)) == 0); -+ -+ if (SysCntx) -+ { -+ FPtr = read_reg32 (dev, DProc_SysCntx_FPtr); -+ BPtr = read_reg32 (dev, DProc_SysCntx_BPtr); -+ Base = dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxDmaQueue[0]); -+ Top = dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxDmaQueue[E3_SysCntxQueueSize-1]); -+ } -+ else -+ { -+ FPtr = read_reg32 (dev, DProc_NonSysCntx_FPtr); -+ BPtr = read_reg32 (dev, DProc_NonSysCntx_BPtr); -+ Base = dev->TAndQBase + offsetof (E3_TrapAndQueue, NonSysCntxDmaQueue[0]); -+ Top = dev->TAndQBase + offsetof (E3_TrapAndQueue, NonSysCntxDmaQueue[E3_NonSysCntxQueueSize-1]); -+ } -+ -+ while (FPtr != BPtr) -+ { -+ elan3_sdram_copyq_from_sdram (dev, FPtr, &dma, sizeof (E3_DMA_BE)); -+ -+ if (dma.s.dma_u.s.Context == ctxt->Capability.cap_mycontext) -+ { -+ if (ELAN3_QUEUE_FULL (ctxt->SwapDmaQ)) -+ break; -+ -+ *ELAN3_QUEUE_BACK (ctxt->SwapDmaQ, ctxt->SwapDmas) = dma; -+ ELAN3_QUEUE_ADD (ctxt->SwapDmaQ); -+ -+ /* -+ * Remove the DMA from the queue by replacing it with one with -+ * zero size and no events. -+ * -+ * NOTE: we must preserve the SYS_CONTEXT_BIT since the Elan uses this -+ * to mark the approriate run queue as empty. -+ */ -+ dma.s.dma_type = ((SysCntx ? SYS_CONTEXT_BIT : 0) << 16); -+ dma.s.dma_size = 0; -+ dma.s.dma_source = (E3_Addr) 0; -+ dma.s.dma_dest = (E3_Addr) 0; -+ dma.s.dma_destCookieVProc = (E3_Addr) 0; -+ dma.s.dma_srcEvent = (E3_Addr) 0; -+ dma.s.dma_srcCookieVProc = (E3_Addr) 0; -+ -+ elan3_sdram_copyq_to_sdram (dev, &dma, FPtr, sizeof (E3_DMA_BE)); -+ } -+ -+ FPtr = (FPtr == Top) ? Base : FPtr + sizeof (E3_DMA); -+ } -+ -+ /* -+ * Release the other processors now before signalling the LWP. -+ */ -+ RELEASE_CPUS(); -+ -+ if (! ELAN3_QUEUE_FULL (ctxt->SwapDmaQ) && !ELAN3_QUEUE_FULL (ctxt->SwapThreadQ)) -+ { -+ /* -+ * We've compleletly emptied the elan queues of items in this -+ * context, so we now mark it as fully swapped out. -+ */ -+ if (ctxt->OthersState == CTXT_OTHERS_HALTING_MORE) -+ { -+ runCount = SysCntx ? &dev->HaltAllCount : &dev->HaltNonContext0Count; -+ -+ if (--(*runCount) == 0) -+ SetSchedStatusRegister (dev, 0, NULL); -+ -+ } -+ PRINTF0 (ctxt, DBG_SWAP, "HaltSwapContext: queues emptied -> others_swapping\n"); -+ -+ ctxt->OthersState = CTXT_OTHERS_SWAPPING; -+ kcondvar_wakeupall (&ctxt->Wait, &dev->IntrLock); -+ } -+ else -+ { -+ if (ctxt->OthersState == CTXT_OTHERS_HALTING) -+ { -+ runCount = SysCntx ? &dev->HaltAllCount : &dev->HaltNonContext0Count; -+ -+ if ((*runCount)++ == 0) -+ SetSchedStatusRegister (dev, 0, NULL); -+ } -+ PRINTF0 (ctxt, DBG_SWAP, "HaltSwapContext: queues not emptied -> others_swapping_more\n"); -+ -+ ctxt->OthersState = CTXT_OTHERS_SWAPPING_MORE; -+ kcondvar_wakeupone (&ctxt->Wait, &dev->IntrLock); -+ } -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+} -+ -+void -+UnloadCommandPageMapping (ELAN3_CTXT *ctxt) -+{ -+ /* -+ * Unload the Elan translations, and flag the main processor to stall after -+ * issueing its next command. -+ */ -+ if (ctxt->CommandPageMapping != NULL && (ctxt->Status & CTXT_COMMAND_MAPPED_ELAN)) -+ { -+ ELAN3MMU_RGN *rgn = elan3mmu_rgnat_main (ctxt->Elan3mmu, ctxt->CommandPageMapping); -+ -+ if (rgn != NULL) -+ { -+ E3_Addr eaddr = rgn->rgn_ebase + (ctxt->CommandPageMapping - rgn->rgn_mbase); -+ -+ PRINTF1 (ctxt, DBG_INTR, "UnloadCommandPageMapping: unmapping command port at addr %08x\n", eaddr); -+ -+ elan3mmu_unload (ctxt->Elan3mmu, eaddr, PAGESIZE, PTE_UNLOAD); -+ } -+ -+ ctxt->Status &= ~CTXT_COMMAND_MAPPED_ELAN; -+ } -+} -+ -+void -+StartSwapoutContext (ELAN3_CTXT *ctxt, E3_uint32 Pend, E3_uint32 *Maskp) -+{ -+ ELAN3_DEV *dev = ctxt->Device; -+ int SysCntx = (ctxt->Capability.cap_mycontext & SYS_CONTEXT_BIT); -+ u_int *runCount; -+ -+ ASSERT (SPINLOCK_HELD (&dev->IntrLock)); -+ -+ PRINTF2 (ctxt, DBG_SWAP, "StartSwapoutContext: Status %x OthersState %s\n", -+ ctxt->Status, OthersStateStrings [ctxt->OthersState]); -+ /* -+ * Disable the inputters, we should already have a reason for it. -+ */ -+ SetInputterStateForContext (ctxt, Pend, Maskp); -+ -+ UnloadCommandPageMapping (ctxt); -+ -+ /* -+ * Flag main processor to stall after issueing next command -+ */ -+ PRINTF1 (ctxt, DBG_SWAP, "StartSwapoutContext: setting Command Flag at %p to 1\n", &ctxt->FlagPage->CommandFlag); -+ -+ ctxt->FlagPage->CommandFlag = 1; -+ -+ PRINTF1 (ctxt, DBG_SWAP, "StartSwapoutContext: OthersState=%d\n", ctxt->OthersState); -+ -+ /* -+ * And queue a haltop to stop the queues and clear it out. -+ */ -+ switch (ctxt->OthersState) -+ { -+ case CTXT_OTHERS_RUNNING: -+ PRINTF0 (ctxt, DBG_SWAP, "StartSwapoutContext: -> others_halting\n"); -+ -+ ctxt->OthersState = CTXT_OTHERS_HALTING; -+ -+ QueueHaltOperation (dev, Pend, Maskp, INT_DProcHalted | INT_TProcHalted, HaltSwapContext, ctxt); -+ break; -+ -+ case CTXT_OTHERS_SWAPPING: -+ PRINTF0 (ctxt, DBG_SWAP, "StartSwapoutContext: -> others_swapping_more\n"); -+ ctxt->OthersState = CTXT_OTHERS_SWAPPING_MORE; -+ -+ runCount = SysCntx ? &dev->HaltAllCount : &dev->HaltNonContext0Count; -+ -+ if ((*runCount)++ == 0) -+ SetSchedStatusRegister (dev, Pend, Maskp); -+ break; -+ default: -+ PRINTF1 (ctxt, DBG_SWAP, "StartSwapoutContext: OthersState=%d\n", ctxt->OthersState); -+ break; -+ } -+} -+ -+#if defined(DIGITAL_UNIX) -+/* temporary tweaks to priority bump */ -+int lwp_do_prio = 1; -+int lwp_do_nxm = 1; -+int lwp_prio = BASEPRI_USER-1; -+#elif defined(LINUX) -+/* This is the default nice level for the helper LWP */ -+int LwpNice = -1; -+#endif -+ -+int -+elan3_lwp (ELAN3_CTXT *ctxt) -+{ -+ ELAN3_DEV *dev = ctxt->Device; -+ int res; -+ unsigned long flags; -+ -+ PRINTF1 (ctxt, DBG_LWP, "elan3_lwp: started, context 0x%x\n", ctxt->Capability.cap_mycontext); -+ -+#if defined(DIGITAL_UNIX) -+ { -+ thread_t mythread = current_thread(); -+ if (lwp_do_prio && (lwp_do_nxm || !IS_NXM_TASK(mythread->task))) -+ { -+ mythread->priority = mythread->sched_pri = lwp_prio; -+ mythread->max_priority = BASEPRI_HIGHEST; -+ (void) thread_priority(mythread, lwp_prio, 0, 1); -+ } -+ } -+#elif defined(LINUX) -+ { -+ /* Do the priority trick for the helper LWP so that it -+ * runs in preferance to the user threads which may be -+ * burning CPU waiting for a trap to be fixed up -+ */ -+#ifdef NO_O1_SCHED -+ if (LwpNice >= -20 && LwpNice < 20) -+ current->nice = LwpNice; -+#else -+ set_user_nice(current, LwpNice); -+#endif -+ } -+#endif -+ -+ elan3_swapin (ctxt, CTXT_NO_LWPS); -+ -+ spin_lock_irqsave (&dev->IntrLock, flags); -+ -+ /* If we're swapped out, and not detached (or exiting) then wait until we're swapped back in */ -+ /* since otherwise we could "spin" forever continually calling elan3_lwp() */ -+ if ((ctxt->Status & CTXT_SWAPPED_REASONS) && ! (ctxt->Status & (CTXT_DETACHED|CTXT_EXITING))) -+ kcondvar_waitsig (&ctxt->Wait, &dev->IntrLock, &flags); -+ -+ for (;;) -+ { -+#if defined(DIGITAL_UNIX) -+ if (thread_should_halt(current_thread()) || -+ CURSIG_CHECK(task_to_proc(current_thread()->task), u.np_uthread)) -+ { -+ PRINTF1 (ctxt, DBG_LWP, "elan3_lwp: exiting on %s\n", -+ thread_should_halt(current_thread()) ? "halt" : "signal"); -+ break; -+ } -+#endif -+ -+ if (ctxt->Status & CTXT_SWAPPED_REASONS) -+ { -+ PRINTF0 (ctxt, DBG_LWP, "elan3_lwp: exiting on swapped reasons\n"); -+ break; -+ } -+ -+ if (! (ctxt->inhibit)) -+ { -+ if (FixupNetworkErrors (ctxt, &flags) == ESUCCESS && -+ HandleExceptions (ctxt, &flags) == ESUCCESS && -+ RestartContext (ctxt, &flags) == ESUCCESS) -+ { -+ if (kcondvar_waitsig (&ctxt->Wait, &dev->IntrLock, &flags) == 0) -+ { -+ PRINTF0 (ctxt, DBG_LWP, "elan3_lwp: exiting by kcondvar_wait_sig()\n"); -+ break; -+ } -+ } -+ } -+ else -+ { -+ printk("elan3_lwp :: skipping as inhibited\n"); -+ if (kcondvar_waitsig (&ctxt->Wait, &dev->IntrLock, &flags) == 0) -+ { -+ PRINTF0 (ctxt, DBG_LWP, "elan3_lwp: exiting by kcondvar_wait_sig()\n"); -+ break; -+ } -+ } -+ -+ } -+ -+ /* Return EINVAL to elan3_syscall_lwp() when we want it to exit */ -+ res = (ctxt->Status & (CTXT_DETACHED|CTXT_EXITING)) ? EINVAL : 0; -+ -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+ -+ elan3_swapout (ctxt, CTXT_NO_LWPS); -+ -+ spin_lock_irqsave (&dev->IntrLock, flags); -+ FixupNetworkErrors (ctxt, &flags); -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+ -+ return (res); -+} -+ -+void -+SetInputterStateForContext (ELAN3_CTXT *ctxt, E3_uint32 Pend, E3_uint32 *Maskp) -+{ -+ ELAN3_DEV *dev = NULL; -+ int new_disabled = 0; -+ int ctxnum; -+ -+ ASSERT (ctxt != NULL); -+ dev = ctxt->Device; -+ ASSERT (SPINLOCK_HELD (&dev->IntrLock)); -+ -+ new_disabled = (ctxt->Input0Trap.State != CTXT_STATE_OK || -+ ctxt->Input1Trap.State != CTXT_STATE_OK || -+ (ctxt->Status & CTXT_INPUTTER_REASONS) != 0); -+ -+ -+ ctxnum = ctxt->Capability.cap_mycontext; -+ -+#ifndef __lock_lint -+ PRINTF2 (ctxt , DBG_IPROC, "SetInputterState: ctxnum %x %s attached\n", ctxnum, ctxt->Disabled ? "disabled " : ""); -+#endif /* __lock_lint */ -+ -+ if (ctxt->Disabled != new_disabled) -+ { -+ PRINTF2 (ctxt, DBG_IPROC, "SetInputterState: ctxnum %x change %s\n", ctxnum, new_disabled ? "enabled to disabled" : "disabled to enabled"); -+ -+ ctxt->Disabled = new_disabled; -+ -+ /* synchronize the context filter for this context */ -+ elan3mmu_set_context_filter (dev, ctxnum, new_disabled, Pend, Maskp); -+ } -+} -+ -+int -+CheckCommandQueueFlushed (ELAN3_CTXT *ctxt, E3_uint32 cflags, int how, unsigned long *flags) -+{ -+ ELAN3_DEV *dev = ctxt->Device; -+ int delay = 1; -+ int i, SeenComQueueEmpty; -+ -+ ASSERT (SPINLOCK_HELD (&dev->IntrLock)); -+ ASSERT (cflags != DmaComQueueNotEmpty || dev->HaltDmaDequeueCount != 0); -+ -+ /* -+ * Flush the command processor queues and poll the queue to see it it empties. -+ */ -+ if (dev->FlushCommandCount++ == 0) -+ SetSchedStatusRegister (dev, 0, NULL); -+ -+ /* -+ * Ensure previous writes have been flushed through the write buffers -+ */ -+ wmb(); mmiob(); -+ -+ /* -+ * If the command processor traps, or it's taking too long to observe -+ * the queue as emtpy, then we need to force the interrupt handler to -+ * run for us. So queue a halt operation for the dma processor. -+ */ -+ SeenComQueueEmpty = !(read_reg32 (dev, ComQueueStatus) & cflags); -+ for (i = 20; i > 0 || (how & ISSUE_COMMAND_CANT_WAIT); i--) -+ { -+ if (SeenComQueueEmpty || (read_reg32 (dev, Exts.InterruptReg) & (INT_CProc | INT_ComQueue))) -+ break; -+ -+ mb(); -+ DELAY (delay); -+ -+ if ((delay <<= 1) == 0) delay = 1; -+ -+ SeenComQueueEmpty = !(read_reg32 (dev, ComQueueStatus) & cflags); -+ } -+ -+ if (--dev->FlushCommandCount == 0) -+ SetSchedStatusRegister (dev, 0, NULL); -+ -+ /* -+ * If we've seen the command queue that we're interested in with nothing in it -+ * and the command processor has not trapped then the commands we've -+ * issued have been successfully processed. -+ */ -+ if (SeenComQueueEmpty && ! (read_reg32 (dev, Exts.InterruptReg) & (INT_CProc | INT_ComQueue))) -+ { -+ PRINTF0 (ctxt, DBG_CMD, "CheckCommandQueueFlushed: observed dma queue empty and command proc not trapped\n"); -+ -+ if (cflags == DmaComQueueNotEmpty && --dev->HaltDmaDequeueCount == 0) -+ SetSchedStatusRegister (dev, 0, NULL); -+ -+ return (ISSUE_COMMAND_OK); -+ } -+ -+ if ((how & ISSUE_COMMAND_CANT_WAIT) != 0) -+ return (ISSUE_COMMAND_WAIT); -+ -+ /* -+ * Halt the dma processor and wait for it to halt, if the command we've issued has -+ * trapped then the interrupt handler will have moved it to the context structure. -+ */ -+ PRINTF0 (ctxt, DBG_CMD, "CheckCommandQueueFlushed: waiting for dproc to halt\n"); -+ QueueHaltOperation (dev, 0, NULL, INT_DProcHalted, WakeupLwp, ctxt); -+ while (! ctxt->Halted) -+ { -+ PRINTF1 (ctxt, DBG_CMD, "CheckCommandQueueFlushed: waiting for Halted - %d\n", ctxt->Halted); -+ -+ kcondvar_wait (&ctxt->HaltWait, &dev->IntrLock, flags); -+ -+ PRINTF1 (ctxt, DBG_CMD, "CheckCommandQueueFlushed: woken for Halted - %d\n", ctxt->Halted); -+ } -+ ctxt->Halted = 0; -+ -+ PRINTF0 (ctxt, DBG_CMD, "CheckCommandQueueFlushed: dproc halted, checking for trap\n"); -+ -+ if (cflags == DmaComQueueNotEmpty && --dev->HaltDmaDequeueCount == 0) -+ SetSchedStatusRegister (dev, 0, NULL); -+ -+ return (ELAN3_QUEUE_BACK_EMPTY (ctxt->CommandTrapQ) ? ISSUE_COMMAND_OK : ISSUE_COMMAND_TRAPPED); -+} -+ -+int -+WaitForCommandPort (ELAN3_CTXT *ctxt) -+{ -+ ELAN3_DEV *dev = ctxt->Device; -+ int res; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&dev->IntrLock, flags); -+ -+ if (ctxt->Status & CTXT_DETACHED) -+ res = EINVAL; -+ else -+ { -+ if (! ELAN3_QUEUE_EMPTY (ctxt->CommandTrapQ) || (ctxt->Status & CTXT_OTHERS_REASONS)) -+ { -+ ctxt->Status |= CTXT_WAITING_COMMAND; -+ if (CTXT_IS_KERNEL(ctxt)) -+ kcondvar_wait (&ctxt->CommandPortWait, &dev->IntrLock, &flags); -+ else -+ kcondvar_waitsig (&ctxt->CommandPortWait, &dev->IntrLock, &flags); -+ } -+ -+ res = (!ELAN3_QUEUE_EMPTY(ctxt->CommandTrapQ) || (ctxt->Status & CTXT_OTHERS_REASONS)) ? EAGAIN : 0; -+ } -+ -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+ -+ return (res); -+} -+ -+static char * -+CommandName (int offset) -+{ -+ switch (offset) -+ { -+ case offsetof (E3_CommandPort, PutDma): return ("PutDma"); -+ case offsetof (E3_CommandPort, GetDma): return ("GetDma"); -+ case offsetof (E3_CommandPort, RunThread): return ("RunThread"); -+ case offsetof (E3_CommandPort, WaitEvent0): return ("WaitEvent0"); -+ case offsetof (E3_CommandPort, WaitEvent1): return ("WaitEvent1"); -+ case offsetof (E3_CommandPort, SetEvent): return ("SetEvent"); -+ default: return ("Bad Command"); -+ } -+} -+ -+int -+IssueCommand (ELAN3_CTXT *ctxt, unsigned cmdoff, E3_Addr value, int cflags) -+{ -+ ELAN3_DEV *dev = ctxt->Device; -+ int res; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&dev->IntrLock, flags); -+ -+ if ((! (cflags & ISSUE_COMMAND_FOR_CPROC) && !ELAN3_QUEUE_EMPTY (ctxt->CommandTrapQ)) || (ctxt->Status & CTXT_OTHERS_REASONS)) -+ { -+ /* -+ * Cannot issue commands for non-cproc traps if command port is trapped, -+ * nor if the dma/thread trap queues are full, or we're swapping out -+ */ -+ PRINTF2 (ctxt, DBG_CMD, "IssueCommand: %s %08x -> ISSUE_COMMAND_RETRY\n", -+ CommandName (cmdoff), value); -+ -+ res = ISSUE_COMMAND_RETRY; -+ } -+ else -+ { -+ PRINTF2 (ctxt, DBG_CMD, "IssueCommand: %s %08x -> ISSUE_COMMAND_OK\n", -+ CommandName (cmdoff), value); -+ -+ mb(); /* ensure writes to main memory completed */ -+ writel (value, (void *)(ctxt->CommandPort + cmdoff)); /* issue command */ -+ mmiob(); /* and flush through IO writes */ -+ -+ res = ISSUE_COMMAND_OK; -+ } -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+ -+ return (res); -+} -+ -+int -+IssueDmaCommand (ELAN3_CTXT *ctxt, E3_Addr value, void *item, int how) -+{ -+ ELAN3_DEV *dev = ctxt->Device; -+ int res; -+ unsigned long flags; -+ -+ /* -+ * Since we may be issuing a command that could trap, and we're interested in -+ * the outcome, the command port trap resolving code must be locked out. -+ */ -+ kmutex_lock (&ctxt->CmdLock); -+ spin_lock_irqsave (&dev->IntrLock, flags); -+ -+ if ((! (how & ISSUE_COMMAND_FOR_CPROC) && !ELAN3_QUEUE_EMPTY (ctxt->CommandTrapQ)) || (ctxt->Status & CTXT_OTHERS_REASONS)) -+ { -+ PRINTF2 (ctxt, DBG_CMD, "IssueDmaCommand: PutDma %08x [%p] -> ISSUE_COMMAND_RETRY\n", value, item); -+ -+ /* -+ * Cannot issue commands for non-cproc traps if command port is trapped, -+ * nor if the dma/thread trap queues are full, or we're swapping out -+ */ -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+ kmutex_unlock (&ctxt->CmdLock); -+ return (ISSUE_COMMAND_RETRY); -+ } -+ -+ ASSERT (item == NULL || ctxt->CommandPortItem == NULL); -+ -+ /* -+ * Stop the DMA processor from removing entries from the -+ * command port, and force the command processor to do this. -+ * This means that if a trap occurs then it will be the command -+ * processor that traps. -+ */ -+ if (dev->HaltDmaDequeueCount++ == 0) -+ SetSchedStatusRegister (dev, 0, NULL); -+ -+ PRINTF2 (ctxt, DBG_CMD, "IssueDmaCommand: PutDma %08x [%p]\n", value, item); -+ -+ /* -+ * Always issue the DMA to the 'write' command, since we've asserted HaltDmaDequeue -+ * the command processor will read the descriptor and transfer it to the run queue. -+ * The command processor looks at the dma_direction field to determine whether it is -+ * a read or a write and whether to alter the dma_souce of the descriptr on the run -+ * queue -+ */ -+ mb(); /* ensure writes to main memory ccompleted */ -+ writel (value, (void *) (ctxt->CommandPort + offsetof (E3_CommandPort, PutDma))); -+ mmiob(); /* and flush through IO writes */ -+ -+ res = CheckCommandQueueFlushed (ctxt, DmaComQueueNotEmpty, how, &flags); -+ -+ if (res == ISSUE_COMMAND_TRAPPED) -+ { -+ PRINTF2 (ctxt, DBG_CMD, "IssueDmaCommand: PutDma %08x [%p] -> ISSUE_COMMAND_TRAPPED\n", value, item); -+ /* -+ * Remember the item we're issueing so that if the command port traps the item will not -+ * get freed off until the descriptor has been read after the command trap has been fixed -+ * up. -+ */ -+ if (item != NULL) -+ ctxt->CommandPortItem = item; -+ } -+ -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+ kmutex_unlock (&ctxt->CmdLock); -+ -+ return (res); -+} -+ -+int -+WaitForDmaCommand (ELAN3_CTXT *ctxt, void *item, int how) -+{ -+ ELAN3_DEV *dev = ctxt->Device; -+ int res; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&dev->IntrLock, flags); -+ -+ res = CheckCommandQueueFlushed (ctxt, DmaComQueueNotEmpty, how, &flags); -+ -+ if (res == ISSUE_COMMAND_TRAPPED && item != NULL) -+ ctxt->CommandPortItem = item; -+ -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+ -+ return (res); -+} -+ -+void -+FixupEventTrap (ELAN3_CTXT *ctxt, int proc, void *trap, E3_uint32 TrapType, E3_FaultSave_BE *FaultSaveArea, int flags) -+{ -+ ASSERT (! CTXT_IS_KERNEL (ctxt)); -+ -+ /* -+ * This code re-issues the part of the set event that trapped. -+ */ -+ switch (TrapType) -+ { -+ case MI_ChainedEventError: -+ ElanException (ctxt, EXCEPTION_CHAINED_EVENT, proc, trap, FaultSaveArea->s.EventAddress); -+ break; -+ -+ -+ case MI_SetEventReadWait: -+ /* -+ * Fault occured on the read for the event location. Just re-issue -+ * setevent using EventAddress in E3_FaultSave -+ */ -+ PRINTF1 (ctxt, DBG_EVENT, "FixupEventTrap: MI_SetEventReadWait: re-issuing setevent %08x\n", -+ FaultSaveArea->s.EventAddress); -+ -+ ReissueEvent (ctxt, (E3_Addr) FaultSaveArea->s.EventAddress, flags); -+ break; -+ -+ case MI_DoSetEvent: -+ { -+ /* -+ * Fault occured because the block write of a block copy event trapped. -+ * Must grab the event type, source and dest then simulate the block copy and then -+ * perform the set. Once the block copy is started the event location cannot be read -+ * again. -+ */ -+ E3_Event *EventPtr = (E3_Event *) elan3mmu_mainaddr (ctxt->Elan3mmu, FaultSaveArea->s.EventAddress); -+ E3_uint32 EventType = fuword (&EventPtr->ev_Type); -+ -+ /* -+ * Check that the event has the block copy bit -+ * set in it, since we couldn't trap here if it -+ * didn't -+ */ -+ if ((EventType & EV_TYPE_BCOPY) != EV_TYPE_BCOPY) -+ { -+ PRINTF1 (ctxt, DBG_EVENT, "FixupEventTrap: MI_DoSetEvent: Unexpected type=%x\n", EventType); -+ ElanException (ctxt, EXCEPTION_BAD_EVENT, proc, trap, FaultSaveArea, TrapType); -+ break; -+ } -+ -+ PRINTF1 (ctxt, DBG_EVENT, "FixupEventTrap: MI_DoSetEvent: RunEventType %x\n", EventType); -+ -+ if (RunEventType (ctxt, FaultSaveArea, EventType)) -+ ElanException (ctxt, EXCEPTION_BAD_EVENT, proc, trap, FaultSaveArea, TrapType); -+ -+ break; -+ } -+ -+ case MI_ThreadUpdateNonSysCntxBack: -+ case MI_ThreadUpdateSysCntxBack: -+ { -+ /* -+ * Fault occured because the block write of a block copy event trapped. -+ * Must grab the event type, source and dest then simulate the block copy and then -+ * run the thread. Once the block copy is started the event location cannot be read -+ * again. -+ */ -+ E3_Event *EventPtr = (E3_Event *) elan3mmu_mainaddr (ctxt->Elan3mmu, FaultSaveArea->s.EventAddress); -+ E3_uint32 EventType = fuword (&EventPtr->ev_Type); -+ -+ /* -+ * Check for the correct EventPtr type -+ */ -+ if ((EventType & (EV_TYPE_MASK_THREAD|EV_TYPE_MASK_BCOPY)) != (EV_TYPE_BCOPY | EV_TYPE_THREAD)) -+ { -+ PRINTF1 (ctxt, DBG_EVENT, "FixupEventTrap: MI_ThreadUpdateCntx0Back: Unexpected type=%x for setevent trap. Should be thread\n", EventType); -+ ElanException (ctxt, EXCEPTION_BAD_EVENT, proc, trap, FaultSaveArea, TrapType); -+ break; -+ } -+ -+ PRINTF1 (ctxt, DBG_EVENT, "FixupEventTrap: MI_ThreadUpdateCntx0Back: RunEventType %x\n", EventType); -+ if (RunEventType (ctxt, FaultSaveArea, EventType)) -+ ElanException (ctxt, EXCEPTION_BAD_EVENT, proc, trap, FaultSaveArea, TrapType); -+ break; -+ } -+ -+ case MI_EventIntUpdateBPtr: -+ { -+ /* -+ * Fault occured because the block write of a block copy event trapped. -+ * Must grab the event type, source and dest then simulate the block copy and then -+ * run the dma. Once the block copy is started the event location cannot be read -+ * again. -+ */ -+ E3_Event *EventPtr = (E3_Event *) elan3mmu_mainaddr (ctxt->Elan3mmu, FaultSaveArea->s.EventAddress); -+ E3_uint32 EventType = fuword (&EventPtr->ev_Type); -+ -+ /* -+ * Check for the correct EventPtr type -+ */ -+ if ((EventType & (EV_TYPE_MASK_EVIRQ|EV_TYPE_MASK_BCOPY)) != (EV_TYPE_BCOPY | EV_TYPE_EVIRQ)) -+ { -+ PRINTF1 (ctxt, DBG_EVENT, "FixupEventTrap: MI_EventIntUpdateBPtr: Unexpected type=%x\n", EventType); -+ ElanException (ctxt, EXCEPTION_BAD_EVENT, proc, trap, FaultSaveArea, TrapType); -+ break; -+ } -+ -+ PRINTF1 (ctxt, DBG_EVENT, "FixupEventTrap: MI_EventIntUpdateBPtr: RunEventType %x\n", EventType); -+ if (RunEventType(ctxt, FaultSaveArea, EventType)) -+ ElanException (ctxt, EXCEPTION_BAD_EVENT, proc, trap, FaultSaveArea, TrapType); -+ break; -+ } -+ -+ case MI_RunDmaDesc: -+ { -+ /* -+ * Fault occured because the block write of a block copy event trapped. -+ * Must grab the event type, source and dest then simulate the block copy and then -+ * run the dma. Once the block copy is started the event location cannot be read -+ * again. -+ */ -+ E3_Event *EventPtr = (E3_Event *) elan3mmu_mainaddr (ctxt->Elan3mmu, FaultSaveArea->s.EventAddress); -+ E3_uint32 EventType = fuword (&EventPtr->ev_Type); -+ -+ /* -+ * Check for the correct EventPtr type -+ */ -+ if ((EventType & (EV_TYPE_MASK_DMA|EV_TYPE_MASK_BCOPY)) != (EV_TYPE_BCOPY | EV_TYPE_DMA)) -+ { -+ PRINTF1 (ctxt, DBG_EVENT, "FixupEventTrap: MI_RunDmaDesc: Unexpected type=%x\n", EventType); -+ ElanException (ctxt, EXCEPTION_BAD_EVENT, proc, trap, FaultSaveArea, TrapType); -+ break; -+ } -+ -+ PRINTF1 (ctxt, DBG_EVENT, "FixupEventTrap: MI_RunDmaDesc: RunEventType %x\n", EventType); -+ if (RunEventType(ctxt, FaultSaveArea, EventType)) -+ ElanException (ctxt, EXCEPTION_BAD_EVENT, proc, trap, FaultSaveArea, TrapType); -+ break; -+ } -+ -+ case MI_WaitForCntxDmaDescRead: -+ case MI_WaitForNonCntxDmaDescRead: -+ /* -+ * Fault occured on the read of the dma descriptor. Run dma using the -+ * Fault Address in FaultSave. -+ */ -+ PRINTF1 (ctxt, DBG_EVENT, "FixupEventTrap: MI_WaitForCntxDmaDescRead: re-issue dma at %08x\n", FaultSaveArea->s.FaultAddress); -+ -+ RestartDmaPtr (ctxt, FaultSaveArea->s.FaultAddress); -+ break; -+ -+ case MI_FinishedSetEvent: -+ /* -+ * Fault occured because the block write of a block copy event trapped. -+ * Simulate the block copy. -+ */ -+ if (SimulateBlockCopy (ctxt, FaultSaveArea->s.EventAddress)) -+ ElanException (ctxt, EXCEPTION_BAD_EVENT, proc, trap, FaultSaveArea, TrapType); -+ break; -+ -+ case MI_BlockCopyEvent: -+ case MI_BlockCopyWaitForReadData: -+ { -+ /* -+ * Fault occured on the read or write of the data for a block copy -+ * event. Simulate the block copy using EventAddress in E3_FaultSave. Must also sample -+ * the event type and then perform a run. -+ */ -+ E3_Event *EventPtr = (E3_Event *) elan3mmu_mainaddr (ctxt->Elan3mmu, FaultSaveArea->s.EventAddress); -+ E3_uint32 EventType = fuword (&EventPtr->ev_Type); -+ -+ PRINTF0 (ctxt, DBG_EVENT, "FixupEventTrap: MI_BlockCopyWaitForReadData: BCopy read fault in BCopy event. Simulating BCopy.\n"); -+ -+ if (RunEventType(ctxt, FaultSaveArea, EventType)) -+ ElanException (ctxt, EXCEPTION_BAD_EVENT, proc, trap, FaultSaveArea, TrapType); -+ break; -+ } -+ -+ case MI_EventQueueOverflow: -+ case MI_ThreadQueueOverflow: -+ case MI_DmaQueueOverflow: -+ /* XXXX: should handle queue overflow */ -+ PRINTF0 (ctxt, DBG_EVENT, "FixupEventTrap: Queue overflow\n"); -+ -+ ElanException (ctxt, EXCEPTION_QUEUE_OVERFLOW, proc, trap, FaultSaveArea, TrapType); -+ break; -+ -+ default: -+ ElanException (ctxt, EXCEPTION_BUS_ERROR, proc, trap, FaultSaveArea, TrapType); -+ break; -+ } -+} -+ -+int -+SimulateBlockCopy (ELAN3_CTXT *ctxt, E3_Addr EventAddress) -+{ -+ E3_Addr SourcePtrElan; -+ E3_Addr DestPtrElan; -+ unsigned DataType; -+ int i; -+ -+ if (ELAN3_OP_START_FAULT_CHECK (ctxt)) -+ { -+ ELAN3_OP_END_FAULT_CHECK (ctxt); -+ -+ ElanException (ctxt, EXCEPTION_FAULTED, EVENT_PROC, NULL, EventAddress); -+ return (TRUE); -+ } -+ -+ SourcePtrElan = ELAN3_OP_LOAD32 (ctxt, EventAddress + offsetof (E3_BlockCopyEvent, ev_Source)); -+ DestPtrElan = ELAN3_OP_LOAD32 (ctxt, EventAddress + offsetof (E3_BlockCopyEvent, ev_Dest)); -+ DataType = DestPtrElan & EV_BCOPY_DTYPE_MASK; -+ DestPtrElan &= ~EV_BCOPY_DTYPE_MASK; -+ -+ -+ PRINTF3 (ctxt, DBG_EVENT, "SimulateBlockCopy: Event %08x SourcePtr %08x DestPtr %08x\n", -+ EventAddress, SourcePtrElan, DestPtrElan); -+ -+ if (SourcePtrElan & EV_WCOPY) -+ ELAN3_OP_STORE32 (ctxt, DestPtrElan, SourcePtrElan); -+ else -+ { -+ /* -+ * NOTE: since the block copy could be to sdram, we issue the writes backwards, -+ * except we MUST ensure that the last item in the block is written last. -+ */ -+#if defined(__LITTLE_ENDIAN__) -+ /* -+ * For little endian cpu's we don't need to worry about the data type. -+ */ -+ for (i = E3_BLK_SIZE-(2*sizeof (E3_uint64)); i >= 0; i -= sizeof (E3_uint64)) -+ ELAN3_OP_STORE64 (ctxt, DestPtrElan + i, ELAN3_OP_LOAD64 (ctxt, SourcePtrElan + i)); -+ -+ i = E3_BLK_SIZE - sizeof (E3_uint64); -+ ELAN3_OP_STORE64 (ctxt, DestPtrElan + i, ELAN3_OP_LOAD64 (ctxt, SourcePtrElan + i)); -+#else -+ switch (DataType) -+ { -+ case EV_TYPE_BCOPY_BYTE: -+ for (i = E3_BLK_SIZE-(2*sizeof (E3_uint8)); i >= 0; i -= sizeof (E3_uint8)) -+ ELAN3_OP_STORE8 (ctxt, DestPtrElan + i, ELAN3_OP_LOAD8 (ctxt, SourcePtrElan + i)); -+ -+ i = E3_BLK_SIZE - sizeof (E3_uint8); -+ ELAN3_OP_STORE8 (ctxt, DestPtrElan + i, ELAN3_OP_LOAD8 (ctxt, SourcePtrElan + i)); -+ break; -+ -+ case EV_TYPE_BCOPY_HWORD: -+ for (i = E3_BLK_SIZE-(2*sizeof (E3_uint16)); i >= 0; i -= sizeof (E3_uint16)) -+ ELAN3_OP_STORE16 (ctxt, DestPtrElan + i, ELAN3_OP_LOAD16 (ctxt, SourcePtrElan + i)); -+ -+ i = E3_BLK_SIZE - sizeof (E3_uint16); -+ ELAN3_OP_STORE16 (ctxt, DestPtrElan + i, ELAN3_OP_LOAD16 (ctxt, SourcePtrElan + i)); -+ break; -+ -+ case EV_TYPE_BCOPY_WORD: -+ for (i = E3_BLK_SIZE-(2*sizeof (E3_uint32)); i >= 0; i -= sizeof (E3_uint32)) -+ ELAN3_OP_STORE32 (ctxt, DestPtrElan + i, ELAN3_OP_LOAD32 (ctxt, SourcePtrElan + i)); -+ -+ i = E3_BLK_SIZE - sizeof (E3_uint32); -+ ELAN3_OP_STORE32 (ctxt, DestPtrElan + i, ELAN3_OP_LOAD32 (ctxt, SourcePtrElan + i)); -+ break; -+ -+ case EV_TYPE_BCOPY_DWORD: -+ for (i = E3_BLK_SIZE-(2*sizeof (E3_uint64)); i >= 0; i -= sizeof (E3_uint64)) -+ ELAN3_OP_STORE64 (ctxt, DestPtrElan + i, ELAN3_OP_LOAD64 (ctxt, SourcePtrElan + i)); -+ -+ i = E3_BLK_SIZE - sizeof (E3_uint64); -+ ELAN3_OP_STORE64 (ctxt, DestPtrElan + i, ELAN3_OP_LOAD64 (ctxt, SourcePtrElan + i)); -+ break; -+ } -+#endif -+ } -+ ELAN3_OP_END_FAULT_CHECK (ctxt); -+ -+ return (FALSE); -+} -+ -+void -+ReissueEvent (ELAN3_CTXT *ctxt, E3_Addr addr, int flags) -+{ -+ PRINTF1 (ctxt, DBG_CMD, "ReissueEvent : Event=%08x\n", addr); -+ -+ if (IssueCommand (ctxt, offsetof (E3_CommandPort, SetEvent), addr, flags) == ISSUE_COMMAND_RETRY) -+ { -+ PRINTF1 (ctxt, DBG_CMD, "ReissueEvent: queue event %08x\n", addr); -+ -+ kmutex_lock (&ctxt->SwapListsLock); -+ ctxt->ItemCount[LIST_SETEVENT]++; -+ ELAN3_OP_PUT_WORD_ITEM (ctxt, LIST_SETEVENT, addr); -+ kmutex_unlock (&ctxt->SwapListsLock); -+ } -+} -+ -+int -+SetEventsNeedRestart (ELAN3_CTXT *ctxt) -+{ -+ return (ctxt->ItemCount[LIST_SETEVENT] != 0); -+} -+ -+void -+RestartSetEvents (ELAN3_CTXT *ctxt) -+{ -+ void *item; -+ E3_uint32 EventPointer; -+ -+ kmutex_lock (&ctxt->SwapListsLock); -+ -+ while (ctxt->ItemCount[LIST_SETEVENT]) -+ { -+ if (! ELAN3_OP_GET_WORD_ITEM (ctxt, LIST_SETEVENT, &item, &EventPointer)) -+ ctxt->ItemCount[LIST_SETEVENT] = 0; -+ else -+ { -+ if (IssueCommand (ctxt, offsetof (E3_CommandPort, SetEvent), EventPointer, FALSE) == ISSUE_COMMAND_RETRY) -+ { -+ ELAN3_OP_PUTBACK_ITEM (ctxt, LIST_SETEVENT, item); -+ kmutex_unlock (&ctxt->SwapListsLock); -+ return; -+ } -+ -+ ctxt->ItemCount[LIST_SETEVENT]--; -+ ELAN3_OP_FREE_WORD_ITEM (ctxt, item); -+ } -+ } -+ kmutex_unlock (&ctxt->SwapListsLock); -+} -+ -+int -+RunEventType(ELAN3_CTXT *ctxt, E3_FaultSave_BE *FaultSaveArea, E3_uint32 EventType) -+{ -+ int failed = FALSE; -+ -+ if ((EventType & EV_TYPE_BCOPY) != 0) -+ failed = SimulateBlockCopy(ctxt, FaultSaveArea->s.EventAddress); -+ -+ if ((EventType & EV_TYPE_MASK) == EV_TYPE_THREAD) -+ ReissueStackPointer (ctxt, EventType & ~(EV_TYPE_MASK_THREAD|EV_TYPE_MASK_BCOPY)); -+ else if ((EventType & EV_TYPE_MASK) == EV_TYPE_DMA) -+ RestartDmaPtr (ctxt, EventType & ~(EV_TYPE_MASK_DMA|EV_TYPE_MASK_BCOPY)); -+ else if ((EventType & EV_TYPE_EVIRQ) != 0) -+ QueueEventInterrupt (ctxt, EventType & ~(EV_TYPE_MASK_EVIRQ|EV_TYPE_MASK_BCOPY)); -+ else /* Chained event */ -+ { -+ if ((EventType & ~EV_TYPE_BCOPY) != 0) /* not null setevent */ -+ ReissueEvent (ctxt, EventType & ~(EV_TYPE_MASK_CHAIN|EV_TYPE_MASK_BCOPY), FALSE); -+ } -+ -+ return (failed); -+} -+ -+void -+WakeupLwp (ELAN3_DEV *dev, void *arg) -+{ -+ ELAN3_CTXT *ctxt = (ELAN3_CTXT *) arg; -+ unsigned long flags; -+ -+ PRINTF1 (ctxt, DBG_INTR, "WakeupLwp: %d\n", SPINLOCK_HELD (&dev->IntrLock)); -+ -+ spin_lock_irqsave (&dev->IntrLock, flags); -+ ctxt->Halted = 1; -+ kcondvar_wakeupone (&ctxt->HaltWait, &dev->IntrLock); -+ -+ PRINTF0 (ctxt, DBG_INTR, "WakeupLwp: woken up context\n"); -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+} -+ -+void -+QueueEventInterrupt (ELAN3_CTXT *ctxt, E3_uint32 cookie) -+{ -+ ELAN3_DEV *dev = ctxt->Device; -+ unsigned long flags; -+ -+ PRINTF1 (ctxt, DBG_EVENT, "QueueEventInterrupt: cookie %08x\n", cookie); -+ -+ if (ELAN3_OP_EVENT (ctxt, cookie, OP_INTR) == OP_DEFER) -+ { -+ spin_lock_irqsave (&ctxt->Device->IntrLock, flags); -+ -+ if (ELAN3_QUEUE_REALLY_FULL (ctxt->EventCookieQ)) -+ { -+ ctxt->Status |= CTXT_COMMAND_OVERFLOW_ERROR; -+ StartSwapoutContext (ctxt, 0, NULL); -+ } -+ else -+ { -+ *(ELAN3_QUEUE_BACK (ctxt->EventCookieQ, ctxt->EventCookies)) = cookie; -+ -+ ELAN3_QUEUE_ADD (ctxt->EventCookieQ); -+ kcondvar_wakeupone (&ctxt->Wait, &dev->IntrLock); -+ if (ELAN3_QUEUE_FULL (ctxt->EventCookieQ)) -+ { -+ ctxt->Status |= CTXT_EVENT_QUEUE_FULL; -+ StartSwapoutContext (ctxt, 0, NULL); -+ } -+ } -+ spin_unlock_irqrestore (&ctxt->Device->IntrLock, flags); -+ } -+} -+ -+int -+ElanException (ELAN3_CTXT *ctxt, int type, int proc, void *trap, ...) -+{ -+ int res; -+ va_list ap; -+ -+ va_start (ap, trap); -+ -+ PRINTF2 (ctxt, DBG_FN, "ElanException: proc %d type %d\n", proc, type); -+ -+ res = ELAN3_OP_EXCEPTION (ctxt, type, proc, trap, ap); -+ -+ va_end (ap); -+ -+ return (res); -+} -+ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/elan3/context_linux.c linux-2.6.9/drivers/net/qsnet/elan3/context_linux.c ---- clean/drivers/net/qsnet/elan3/context_linux.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan3/context_linux.c 2004-10-28 07:51:00.000000000 -0400 -@@ -0,0 +1,229 @@ -+/* -+ * Copyright (c) 2003 by Quadrics Limited. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: context_linux.c,v 1.32 2004/10/28 11:51:00 david Exp $" -+/* $Source: /cvs/master/quadrics/elan3mod/elan3/os/context_linux.c,v $*/ -+ -+#include -+#include -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+int -+LoadElanTranslation (ELAN3_CTXT *ctxt, E3_Addr addr, int len, int protFault, int writeable) -+{ -+ ELAN3MMU *elan3mmu = ctxt->Elan3mmu; -+ ELAN3MMU_RGN *rgn; -+ caddr_t mainAddr; -+ int perm; -+ unsigned int off; -+ unsigned long flags; -+ -+ ASSERT (PAGE_ALIGNED (addr) && PAGE_ALIGNED (len)); -+ -+ PRINTF (ctxt, DBG_FAULT, "LoadElanTranslation: addr %08x len %08x%s%s\n", -+ addr, len, protFault ? " prot fault" : "", writeable ? " writeable" : ""); -+ -+ /* Ensure there's enough elan mmu tables for us to use */ -+ elan3mmu_expand (elan3mmu, addr, len, PTBL_LEVEL_3, 0); -+ -+ while (len > 0) -+ { -+ /* -+ * Retrieve permission region and calculate main address -+ */ -+ spin_lock (&elan3mmu->elan3mmu_lock); -+ -+ rgn = elan3mmu_rgnat_elan (elan3mmu, addr); -+ if (rgn == NULL) { -+ PRINTF (ctxt, DBG_FAULT, "LoadElanTranslation: no permission region at %lx %p\n", -+ (u_long) addr, rgn); -+ spin_unlock (&elan3mmu->elan3mmu_lock); -+ return (EFAULT); -+ } -+ mainAddr = rgn->rgn_mbase + (addr - rgn->rgn_ebase); -+ -+ ASSERT (PAGE_ALIGNED ((unsigned long)mainAddr)); -+ -+ spin_unlock (&elan3mmu->elan3mmu_lock); -+ -+ /* -+ * If we're tying to load a translation to the elan command port, -+ * then don't do it now, but mark the context to have it reloaded -+ * just before we restart any threads. We do this because we don't -+ * want to call into the segment driver since we could then block -+ * waiting for the command port to become available. -+ */ -+ if (mainAddr == ctxt->CommandPageMapping) -+ { -+ PRINTF (ctxt, DBG_FAULT, "LoadElanTranslation: addr=%08x maps command port\n", addr); -+ -+ spin_lock_irqsave (&ctxt->Device->IntrLock, flags); -+ UnloadCommandPageMapping (ctxt); -+ spin_unlock_irqrestore (&ctxt->Device->IntrLock, flags); -+ } -+ else -+ { -+ struct vm_area_struct *area; -+ struct mm_struct *mm = current->mm; -+ pte_t *ptep_ptr; -+ pte_t ptep_value; -+ -+ down_read (¤t->mm->mmap_sem); -+ -+ if ((area = find_vma_intersection(mm, (unsigned long)mainAddr, (unsigned long)mainAddr + PAGESIZE)) == NULL) -+ { -+ PRINTF (ctxt, DBG_FAULT, "LoadElanTranslation: %p no vma\n", mainAddr); -+ up_read (¤t->mm->mmap_sem); -+ return EFAULT; -+ } -+ -+ if (writeable && !(area->vm_flags & VM_WRITE)) -+ { -+ PRINTF (ctxt, DBG_FAULT, "LoadElanTranslation: %p not writeable\n", mainAddr); -+ up_read (¤t->mm->mmap_sem); -+ return EFAULT; -+ } -+ -+ spin_lock (&mm->page_table_lock); -+ -+ /* dont deference the pointer after the unmap */ -+ ptep_ptr = find_pte_map (mm, (unsigned long)mainAddr); -+ if (ptep_ptr) { -+ ptep_value = *ptep_ptr; -+ pte_unmap(ptep_ptr); -+ } -+ -+ PRINTF (ctxt, DBG_FAULT, "LoadElanTranslation: %p %s %s\n", -+ mainAddr, writeable ? "writeable" : "readonly", -+ !ptep_ptr ? "invalid" : pte_none(ptep_value) ? "none " : !pte_present(ptep_value) ? "swapped " : -+ writeable && !pte_write(ptep_value) ? "COW" : "OK"); -+ -+ if (!ptep_ptr || pte_none(ptep_value) || !pte_present(ptep_value) || (writeable && !pte_write(ptep_value))) -+ { -+ spin_unlock (&mm->page_table_lock); -+ -+ get_user_pages (current, current->mm, (unsigned long) mainAddr, PAGE_SIZE, -+ (area->vm_flags & VM_WRITE), 0, NULL, NULL); -+ -+ spin_lock (&mm->page_table_lock); -+ -+ /* dont deference the pointer after the unmap */ -+ ptep_ptr = find_pte_map (mm, (unsigned long)mainAddr); -+ if (ptep_ptr) { -+ ptep_value = *ptep_ptr; -+ pte_unmap(ptep_ptr); -+ } -+ -+ if (!ptep_ptr || pte_none(ptep_value) || !pte_present(ptep_value) || (writeable && !pte_write(ptep_value))) -+ { -+ spin_unlock (&mm->page_table_lock); -+ up_read (¤t->mm->mmap_sem); -+ return EFAULT; -+ } -+ } -+ -+ /* don't allow user write access to kernel pages if not kernel */ -+ if (!pte_read(ptep_value)) -+ { -+ spin_unlock (&mm->page_table_lock); -+ up_read (¤t->mm->mmap_sem); -+ return EFAULT; -+ } -+ -+ if (writeable) -+ pte_mkdirty(ptep_value); -+ pte_mkyoung (ptep_value); -+ -+ /* now load the elan pte */ -+ if (writeable) -+ perm = rgn->rgn_perm; -+ else -+ perm = ELAN3_PERM_READONLY(rgn->rgn_perm & ELAN3_PTE_PERM_MASK) | (rgn->rgn_perm & ~ELAN3_PTE_PERM_MASK); -+ -+ for (off = 0; off < PAGE_SIZE; off += ELAN3_PAGE_SIZE) -+ elan3mmu_pteload (elan3mmu, PTBL_LEVEL_3, addr + off, pte_phys(ptep_value) + off, perm, PTE_LOAD | PTE_NO_SLEEP); -+ -+ spin_unlock (&mm->page_table_lock); -+ up_read (¤t->mm->mmap_sem); -+ } -+ -+ len -= PAGESIZE; -+ addr += PAGESIZE; -+ } -+ return (ESUCCESS); -+} -+ -+ -+/* -+ * LoadCommandPortTranslation: -+ * explicitly load an elan translation to the command port. -+ * but only do it if the command port is accessible. -+ * -+ * we call this function just after we have restarted -+ * and trapped commands, since when a command traps -+ * the elan translation to the command port is unloaded. -+ */ -+void -+LoadCommandPortTranslation (ELAN3_CTXT *ctxt) -+{ -+ ELAN3MMU *elan3mmu = ctxt->Elan3mmu; -+ ELAN3MMU_RGN *rgn; -+ E3_Addr addr; -+ int perm; -+ physaddr_t phys; -+ unsigned int off; -+ unsigned long flags; -+ -+ PRINTF (ctxt, DBG_FAULT, "LoadCommandPortTranslation: SegAddr=%p Status=%x\n", ctxt->CommandPageMapping, ctxt->Status); -+ -+ if (ctxt->CommandPageMapping != NULL && !(ctxt->Status & CTXT_COMMAND_MAPPED_ELAN)) -+ { -+ spin_lock (&elan3mmu->elan3mmu_lock); -+ -+ rgn = elan3mmu_rgnat_main (elan3mmu, ctxt->CommandPageMapping); -+ if (rgn == (ELAN3MMU_RGN *) NULL) -+ { -+ PRINTF(ctxt, DBG_FAULT, "LoadCommandPortTranslation: no permission for command port\n"); -+ spin_unlock (&elan3mmu->elan3mmu_lock); -+ return; -+ } -+ -+ addr = rgn->rgn_ebase + (ctxt->CommandPageMapping - rgn->rgn_mbase); -+ perm = rgn->rgn_perm; -+ phys = kmem_to_phys((caddr_t) ctxt->CommandPage); -+ -+ spin_lock_irqsave (&ctxt->Device->IntrLock, flags); -+ if (ELAN3_QUEUE_EMPTY(ctxt->CommandTrapQ) && !(ctxt->Status & CTXT_OTHERS_REASONS)) -+ { -+ PRINTF(ctxt, DBG_FAULT, "LoadCommandPortTranslation: load xlation addr=%08x phys=%llx perm=%d\n", -+ addr, (unsigned long long)phys, perm); -+ -+ ctxt->Status |= CTXT_COMMAND_MAPPED_ELAN; -+ -+ for (off = 0; off < PAGESIZE; off += ELAN3_PAGE_SIZE) -+ elan3mmu_pteload (elan3mmu, PTBL_LEVEL_3, addr + off, phys + off, perm, PTE_LOAD | PTE_NO_SLEEP); -+ } -+ spin_unlock_irqrestore (&ctxt->Device->IntrLock, flags); -+ -+ spin_unlock (&elan3mmu->elan3mmu_lock); -+ } -+} -+ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/elan3/cproc.c linux-2.6.9/drivers/net/qsnet/elan3/cproc.c ---- clean/drivers/net/qsnet/elan3/cproc.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan3/cproc.c 2004-02-10 10:05:10.000000000 -0500 -@@ -0,0 +1,539 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: cproc.c,v 1.46 2004/02/10 15:05:10 david Exp $" -+/* $Source: /cvs/master/quadrics/elan3mod/elan3/os/cproc.c,v $ */ -+ -+ -+#include -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+void -+HandleCProcTrap (ELAN3_DEV *dev, E3_uint32 Pend, E3_uint32 *Maskp) -+{ -+ E3_FaultSave_BE FaultSave; -+ CProcTrapBuf_BE TrapBuf; -+ COMMAND_TRAP *trap; -+ ELAN3_CTXT *ctxt; -+ sdramaddr_t CurrTrap; -+ sdramaddr_t LastTrapAddr; -+ int NTrapEntries; -+ int NewPend; -+ unsigned long flags; -+ -+ /* -+ * Temporarily mask out the command processor interrupt, since -+ * we may cause it be re-asserted when we re-issue the commands -+ * from the overflow queue area. -+ */ -+ DISABLE_INT_MASK (dev, INT_CProc | INT_ComQueue); -+ -+ NewPend = read_reg32 (dev, Exts.InterruptReg); -+ -+ do { -+ if (NewPend & INT_ComQueue) -+ { -+ if ((read_reg32 (dev, ComQueueStatus) & ComQueueError) != 0) -+ { -+ printk ("elan%d: InterruptReg=%x ComQueueStatus=%x\n", dev->Instance, -+ read_reg32 (dev, Exts.InterruptReg), read_reg32 (dev, ComQueueStatus)); -+ panic ("elan: command queue has overflowed !!"); -+ /* NOTREACHED */ -+ } -+ -+ BumpStat (dev, ComQueueHalfFull); -+ -+ /* -+ * Capture the other cpus and stop the threads processor then -+ * allow the command processor to eagerly flush the command queue. -+ */ -+ dev->FlushCommandCount++; dev->HaltThreadCount++; -+ SetSchedStatusRegister (dev, Pend, Maskp); -+ -+ CAPTURE_CPUS(); -+ -+ while ((read_reg32 (dev, ComQueueStatus) & ComQueueNotEmpty) != 0) -+ mb(); -+ -+ /* -+ * Let the threads processor run again, and release the cross call. -+ */ -+ RELEASE_CPUS(); -+ -+ dev->FlushCommandCount--; dev->HaltThreadCount--; -+ SetSchedStatusRegister (dev, Pend, Maskp); -+ -+ /* -+ * Re-sample the interrupt register to see if the command processor -+ * has trapped while flushing the queue. Preserve the INT_ComQueue -+ * bit, so we can clear the ComQueueStatus register later. -+ */ -+ NewPend = (read_reg32 (dev, Exts.InterruptReg) | INT_ComQueue); -+ } -+ -+ CurrTrap = dev->CommandPortTraps[dev->CurrentCommandPortTrap]; -+ -+ if (NewPend & INT_CProc) -+ { -+ BumpStat (dev, CProcTraps); -+ -+ /* -+ * Copy the MMU Fault Save area and zero it out for future traps. -+ */ -+ elan3_sdram_copyq_from_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, CProc), &FaultSave, sizeof (E3_FaultSave)); -+ elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, CProc), sizeof (E3_FaultSave)); -+ -+ /* -+ * First entry in the cproc trap save area is the value of Areg and Breg for the -+ * uWord before the address fault. -+ */ -+ TrapBuf.Align64 = elan3_sdram_readq (dev, CurrTrap); CurrTrap += sizeof (TrapBuf.Align64); -+ -+ ctxt = ELAN3_DEV_CTX_TABLE(dev, (TrapBuf.r.Breg >> 16)); -+ if (ctxt == NULL) -+ { -+ PRINTF2 (DBG_DEVICE, DBG_INTR, "HandleCProcTrap: context invalid [%08x.%08x]\n", TrapBuf.r.Areg, TrapBuf.r.Breg); -+ BumpStat (dev, InvalidContext); -+ } -+ else -+ { -+ if (ELAN3_QUEUE_REALLY_FULL (ctxt->CommandTrapQ)) -+ { -+ if ((ctxt->Status & CTXT_COMMAND_OVERFLOW_ERROR) == 0) -+ { -+ ctxt->Status |= CTXT_COMMAND_OVERFLOW_ERROR; -+ StartSwapoutContext (ctxt, Pend, Maskp); -+ } -+ } -+ else -+ { -+ trap = ELAN3_QUEUE_BACK (ctxt->CommandTrapQ, ctxt->CommandTraps); -+ -+ trap->FaultSave = FaultSave; -+ trap->Status.Status = read_reg32 (dev, Exts.CProcStatus.Status); -+ trap->TrapBuf = TrapBuf; -+ -+ /* -+ * The command processor does not stop after it has trapped. It will continue -+ * to save commands for other contexts into the commands port save area. -+ * The valid context for the trap is held in FaultSave. As some of this -+ * trap code uses the context in the status register the local copy must be -+ * updated with the trap context. -+ */ -+ trap->Status.s.Context = (TrapBuf.r.Breg >> 16); -+ -+ PRINTF4 (ctxt, DBG_INTR, "HandleCProcTrap: WakeupFnt=%x Cntx=%x SuspAddr=%x TrapType=%s\n", -+ trap->Status.s.WakeupFunction, trap->Status.s.Context, -+ trap->Status.s.SuspendAddr, MiToName(trap->Status.s.TrapType)); -+ PRINTF2 (ctxt, DBG_INTR, "HandleCProcTrap: Areg=%08x Breg=%08x\n", -+ trap->TrapBuf.r.Areg, trap->TrapBuf.r.Breg); -+ -+ if (ELAN3_OP_CPROC_TRAP (ctxt, trap) == OP_DEFER) -+ { -+ ELAN3_QUEUE_ADD (ctxt->CommandTrapQ); -+ -+ PRINTF1 (ctxt, DBG_INTR, "HandleCProcTrap: setting Command Flag at %p to 1\n", &ctxt->FlagPage->CommandFlag); -+ -+ ctxt->FlagPage->CommandFlag = 1; -+ -+ kcondvar_wakeupone (&ctxt->Wait, &dev->IntrLock); -+ } -+ } -+ -+ UnloadCommandPageMapping (ctxt); -+ } -+ } -+ -+ /* -+ * Now change the CommandPortTrap queue. -+ * Must stop the command processor, wait for it to stop, find the final -+ * entry in the current cproc trap save area, reset the comm port -+ * trap save address to the other queue, clear the command port interrupt and -+ * set it running normally again, and then let it go again. This is not very -+ * time critical but it would be a good idea to prevent a higher priority -+ * interrupt from slowing down the process to prevent to fifos filling. -+ */ -+ spin_lock_irqsave (&dev->CProcLock, flags); -+ -+ SET_SCHED_STATUS (dev, CProcStop); -+ -+ while ((read_reg32 (dev, Exts.SchCntReg) & CProcStopped) == 0) -+ { -+ PRINTF0 (DBG_DEVICE, DBG_INTR, "HandleCProcTrap: waiting for command processor to stop\n"); -+ mb(); -+ } -+ -+ /* -+ * Remember how many entries are in the saved command queue, and -+ * re-initialise it, before restarting the command processor. -+ */ -+ NTrapEntries = (read_reg32 (dev, CProc_TrapSave_Addr) - dev->CommandPortTraps[dev->CurrentCommandPortTrap])/sizeof (E3_uint64); -+ LastTrapAddr = dev->CommandPortTraps[dev->CurrentCommandPortTrap] + NTrapEntries*sizeof (TrapBuf); -+ -+ dev->CurrentCommandPortTrap ^= 1; -+ write_reg32 (dev, CProc_TrapSave_Addr, dev->CommandPortTraps[dev->CurrentCommandPortTrap]); -+ -+ PRINTF1 (DBG_DEVICE, DBG_INTR, "HandleCProcTrap: command trap queue has %d entries\n", NTrapEntries); -+ -+ if (NTrapEntries > ELAN3_COMMAND_TRAP_SIZE/sizeof (E3_uint64)) -+ panic ("HandleCProcTrap: command trap queue has overflowed\n"); -+ -+ if (NewPend & INT_CProc) -+ { -+ /* -+ * Clear the CProc interrupt and set it running normally again. Nothing should -+ * be running now that could issue commands apart from this trap handler. -+ */ -+ PULSE_SCHED_STATUS (dev, RestartCProc); -+ } -+ -+ if (NewPend & INT_ComQueue) -+ { -+ /* -+ * Write any value here to clear out the half full and error bits of the command -+ * overflow queues. This will also remove the overflow interrupt. -+ */ -+ write_reg32 (dev, ComQueueStatus, 0); -+ } -+ -+ /* -+ * And let the command processor start again -+ */ -+ CLEAR_SCHED_STATUS (dev, CProcStop); -+ -+ /* -+ * Now re-issue all the commands that were issued after the command port trapped. -+ * Should halt the dma processor and force command sto be put onto the run queues -+ * to ensure that a remote re-issued command is handled correctly. NOTE it is -+ * not necessary to wait for the dma processor to stop and this will reduce the -+ * performance impact. As CProcHalt is asserted all commands will be flushed -+ * to the queues. -+ */ -+ dev->HaltDmaDequeueCount++; dev->FlushCommandCount++; -+ SetSchedStatusRegister (dev, Pend, Maskp); -+ -+ /* -+ * XXXX: should we do a capture/release if the trap overflow -+ * area has a "large" number of commands in it, since -+ * we will just stuff them all back in, together with -+ * all those issued by the other cpus/thread processors. -+ */ -+ while (CurrTrap != LastTrapAddr) -+ { -+ /* Read the next saved (but not trapped) command */ -+ TrapBuf.Align64 = elan3_sdram_readq (dev, CurrTrap); CurrTrap += sizeof (TrapBuf); -+ -+ -+ ctxt = ELAN3_DEV_CTX_TABLE(dev, (TrapBuf.s.ContextType >> 16)); -+ -+ if (ctxt == NULL) -+ { -+ PRINTF1 (DBG_DEVICE, DBG_INTR, "HandleCProcTrap: context %x invalid\n", TrapBuf.s.ContextType >> 16); -+ BumpStat (dev, InvalidContext); -+ } -+ else -+ { -+ if (!ELAN3_QUEUE_EMPTY (ctxt->CommandTrapQ) || (ctxt->Status & CTXT_OTHERS_REASONS)) -+ { -+ PRINTF3 (ctxt, DBG_INTR, "HandleCProcTrap: save command %x context %x - %08x\n", -+ (TrapBuf.s.ContextType>>3) & 0x3ff, TrapBuf.s.ContextType >> 17, TrapBuf.s.Addr); -+ -+ if (ELAN3_QUEUE_REALLY_FULL (ctxt->CommandQ)) -+ { -+ ctxt->Status |= CTXT_COMMAND_OVERFLOW_ERROR; -+ StartSwapoutContext (ctxt, Pend, Maskp); -+ } -+ else -+ { -+ *ELAN3_QUEUE_BACK(ctxt->CommandQ, ctxt->Commands) = TrapBuf; -+ -+ ELAN3_QUEUE_ADD (ctxt->CommandQ); -+ } -+ continue; -+ } -+ -+ /* Reissue the command to the command port for this context */ -+ PRINTF2 (ctxt, DBG_INTR, "HandleCProcTrap: re-issue command %x - %08x\n", -+ (TrapBuf.s.ContextType>>5) & 0xff, TrapBuf.s.Addr); -+ -+ mb(); -+ if (ELAN3_OP_CPROC_REISSUE(ctxt, &TrapBuf) != OP_HANDLED) -+ ((E3_uint32 *) ctxt->CommandPort)[(TrapBuf.s.ContextType>>5) & 0xff] = TrapBuf.s.Addr; -+ mmiob(); -+ } -+ } -+ -+ while ((read_reg32 (dev, ComQueueStatus) & ComQueueNotEmpty) != 0) -+ { -+ PRINTF0 (DBG_DEVICE, DBG_INTR, "HandleCProcTrap: waiting for queues to empty after reissueing commands\n"); -+ mb(); -+ } -+ -+ dev->HaltDmaDequeueCount--; dev->FlushCommandCount--; -+ SetSchedStatusRegister (dev, Pend, Maskp); -+ -+ spin_unlock_irqrestore (&dev->CProcLock, flags); -+ -+ /* -+ * Re-read the interrupt register and see if we've got another command -+ * port interrupt -+ */ -+ NewPend = read_reg32 (dev, Exts.InterruptReg); -+ } while ((NewPend & (INT_CProc | INT_ComQueue)) != 0); -+ -+ -+ /* -+ * Re-enable the command processor interrupt as we've finished -+ * polling it. -+ */ -+ ENABLE_INT_MASK (dev, INT_CProc | INT_ComQueue); -+} -+ -+void -+ResolveCProcTrap (ELAN3_CTXT *ctxt) -+{ -+ ELAN3_DEV *dev = ctxt->Device; -+ COMMAND_TRAP *trap; -+ int res; -+ unsigned long flags; -+ -+ kmutex_lock (&ctxt->CmdLock); -+ spin_lock_irqsave (&dev->IntrLock, flags); -+ -+ while (! ELAN3_QUEUE_BACK_EMPTY (ctxt->CommandTrapQ)) -+ { -+ trap = ELAN3_QUEUE_MIDDLE(ctxt->CommandTrapQ, ctxt->CommandTraps); -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+ -+ switch (trap->Status.s.TrapType) -+ { -+ case MI_EventIntUpdateBPtr: -+ case MI_ChainedEventError: -+ case MI_EventQueueOverflow: -+ case MI_ThreadQueueOverflow: -+ case MI_DmaQueueOverflow: -+ PRINTF1 (ctxt, DBG_CPROC, "ResolveCProcTrap: %s\n", MiToName (trap->Status.s.TrapType)); -+ break; -+ -+ default: -+ /* All other traps are MMU related, we should have a fault address and FSR */ -+ if ((res = elan3_pagefault (ctxt, &trap->FaultSave, 1)) != ESUCCESS) -+ { -+ PRINTF1 (ctxt, DBG_CPROC, "ResolveCProcTrap: elan3_pagefault failed for address %08x\n", -+ trap->FaultSave.s.FaultAddress); -+ ElanException (ctxt, EXCEPTION_INVALID_ADDR, COMMAND_PROC, trap, &trap->FaultSave, res); -+ -+ /* Set the trap type to 0 so the command does not get re-issued */ -+ trap->Status.s.TrapType = 0; -+ } -+ break; -+ } -+ -+ spin_lock_irqsave (&dev->IntrLock, flags); -+ -+ ELAN3_QUEUE_CONSUME (ctxt->CommandTrapQ); -+ } -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+ kmutex_unlock (&ctxt->CmdLock); -+} -+ -+int -+RestartCProcTrap (ELAN3_CTXT *ctxt) -+{ -+ ELAN3_DEV *dev = ctxt->Device; -+ COMMAND_TRAP trap; -+ void *item; -+ int res; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&dev->IntrLock, flags); -+ -+ while (! ELAN3_QUEUE_FRONT_EMPTY (ctxt->CommandTrapQ)) -+ { -+ trap = (*ELAN3_QUEUE_FRONT (ctxt->CommandTrapQ, ctxt->CommandTraps)); -+ ELAN3_QUEUE_REMOVE (ctxt->CommandTrapQ); -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+ -+ BumpUserStat (ctxt, CProcTraps); -+ -+ switch (trap.Status.s.TrapType) -+ { -+ case 0: -+ res = ISSUE_COMMAND_OK; -+ break; -+ -+ case MI_WaitForWaitEventDesc: -+ /* -+ * Fault occured on the read of wait event descriptor for wait event type 0. -+ * Fault already fixed. Just re-issue the wait command. Wait event descriptor addr -+ * is in the Areg save value. -+ */ -+ PRINTF1 (ctxt, DBG_CPROC, "RestartCProcTrap: WaitEvent type0 desc read fault %08x\n", -+ trap.TrapBuf.r.Areg); -+ -+ res = IssueCommand (ctxt, offsetof (E3_CommandPort, WaitEvent0), trap.TrapBuf.r.Areg, ISSUE_COMMAND_FOR_CPROC); -+ break; -+ -+ case MI_WaitForEventReadTy0: -+ /* -+ * Fault occured on the read of event location for wait event type 0. -+ * Fault already fixed. Just re-issue the wait command. Wait event descriptor addr -+ * is in the Areg save value. -+ */ -+ PRINTF1 (ctxt, DBG_CPROC, "RestartCProcTrap: WaitEvent type0 event loc fault %08x\n", -+ trap.TrapBuf.r.Areg); -+ -+ res = IssueCommand (ctxt, offsetof (E3_CommandPort, WaitEvent0), trap.TrapBuf.r.Areg, ISSUE_COMMAND_FOR_CPROC); -+ break; -+ -+ case MI_WaitForEventReadTy1: -+ /* -+ * Fault occured on the read of the event location for wait event type 1. -+ * Areg has the original ptr and count. -+ * Fault already fixed. Just re-issue the wait command using Areg and context. -+ */ -+ PRINTF1 (ctxt, DBG_CPROC, "RestartCProcTrap: WaitEvent type1 event location read fault %08x\n", -+ trap.TrapBuf.r.Areg); -+ res = IssueCommand (ctxt, offsetof (E3_CommandPort, WaitEvent1), trap.TrapBuf.r.Areg, ISSUE_COMMAND_FOR_CPROC); -+ break; -+ -+ case MI_WaitForCntxDmaDescRead: -+ case MI_WaitForNonCntxDmaDescRead: -+ /* -+ * Fault occured on the read of the dma descriptor. Run dma using the -+ * Fault Address in FaultSave. -+ */ -+ PRINTF1 (ctxt, DBG_CPROC, "RestartCProcTrap: MI_WaitForCntxDmaDescRead: re-issue dma at %08x\n", -+ trap.FaultSave.s.FaultAddress); -+ -+ res = IssueDmaCommand (ctxt, trap.FaultSave.s.FaultAddress, NULL, ISSUE_COMMAND_FOR_CPROC); -+ break; -+ -+ default: -+ /* -+ * Assume the fault will be fixed by FixupEventTrap. -+ */ -+ FixupEventTrap (ctxt, COMMAND_PROC, &trap, trap.Status.s.TrapType, &trap.FaultSave, ISSUE_COMMAND_FOR_CPROC); -+ -+ res = ISSUE_COMMAND_OK; -+ break; -+ } -+ -+ switch (res) -+ { -+ case ISSUE_COMMAND_OK: /* command re-issued ok*/ -+ break; -+ -+ case ISSUE_COMMAND_TRAPPED: /* command trapped, it will have been copied */ -+ return (EAGAIN); /* to the back of the trap queue */ -+ -+ case ISSUE_COMMAND_RETRY: /* didn't issue command, so place back at front for */ -+ spin_lock_irqsave (&dev->IntrLock, flags); /* later (after resolving other traps */ -+ -+ if (ELAN3_QUEUE_REALLY_FULL (ctxt->CommandTrapQ)) -+ ctxt->Status |= CTXT_COMMAND_OVERFLOW_ERROR; -+ else -+ { -+ ELAN3_QUEUE_ADD_FRONT(ctxt->CommandTrapQ); -+ (*ELAN3_QUEUE_FRONT (ctxt->CommandTrapQ, ctxt->CommandTraps)) = trap; -+ } -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+ return (EAGAIN); -+ -+ default: -+ return (EINVAL); -+ } -+ spin_lock_irqsave (&dev->IntrLock, flags); -+ } -+ -+ /* -+ * GNAT 5409 - if CommandPortItem was not NULL, but other reasons were set, -+ * then we'd not free the CommandPortItem even though we'd re- -+ * issued all trapped and overflowed commands. Hence only return -+ * without clearing CommandPortItem if we will be called again as -+ * either CommandTrapQ or CommandQ is not empty. -+ */ -+ -+ /* Now run the overflowed commands for this context */ -+ if (! ELAN3_QUEUE_EMPTY (ctxt->CommandQ)) -+ { -+ if (! ELAN3_QUEUE_EMPTY (ctxt->CommandTrapQ) || (ctxt->Status & CTXT_OTHERS_REASONS)) -+ { -+ PRINTF0 (ctxt, DBG_CPROC, "RestartCProcTrap: cannot issue overflowed commands\n"); -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+ return (EAGAIN); -+ } -+ -+ /* -+ * Just re-issue the commands, if one traps then the remainder will -+ * just get placed in the overflow queue again and the interrupt handler -+ * will copy them back in here. -+ * -+ * Stop the dma processor from taking commands, since one of the commands -+ * could be a re-issued remote dma, which must be processed by the command -+ * processor. -+ */ -+ -+ if (dev->HaltDmaDequeueCount++ == 0) -+ SetSchedStatusRegister (dev, 0, NULL); -+ -+ while (! ELAN3_QUEUE_EMPTY (ctxt->CommandQ)) -+ { -+ CProcTrapBuf_BE *TrapBuf = ELAN3_QUEUE_FRONT (ctxt->CommandQ, ctxt->Commands); -+ -+ PRINTF2 (ctxt, DBG_CPROC, "RestartCProcTrap: re-issue command %x - %08x\n", -+ (TrapBuf->s.ContextType>>5) & 0xff, TrapBuf->s.Addr); -+ mb(); /* ensure writes to main memory completed */ -+ ((E3_uint32 *) ctxt->CommandPort)[(TrapBuf->s.ContextType>>5) & 0xff] = TrapBuf->s.Addr; -+ mmiob(); /* and flush through IO writes */ -+ -+ ELAN3_QUEUE_REMOVE (ctxt->CommandQ); -+ } -+ -+ /* observe the command processor having halted */ -+ res = CheckCommandQueueFlushed (ctxt, DmaComQueueNotEmpty, 0, &flags); -+ -+ if (res != ISSUE_COMMAND_OK) -+ { -+ PRINTF0 (ctxt, DBG_CPROC, "RestartCProcTrap: trapped after issueing overflowed commands\n"); -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+ return (EAGAIN); -+ } -+ } -+ -+ /* remove the command port item, while holding the lock */ -+ item = ctxt->CommandPortItem; -+ ctxt->CommandPortItem = NULL; -+ -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+ -+ if (item != NULL) /* Free of any item that may have been stored */ -+ { /* because of the commandport trap */ -+ PRINTF1 (ctxt, DBG_CPROC, "RestartCProcTrap: commandPortItem %p\n", item); -+ -+ kmutex_lock (&ctxt->SwapListsLock); -+ ELAN3_OP_FREE_BLOCK_ITEM (ctxt, item); -+ kmutex_unlock (&ctxt->SwapListsLock); -+ } -+ -+ return (ESUCCESS); -+} -+ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/elan3/dproc.c linux-2.6.9/drivers/net/qsnet/elan3/dproc.c ---- clean/drivers/net/qsnet/elan3/dproc.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan3/dproc.c 2003-09-24 09:57:25.000000000 -0400 -@@ -0,0 +1,553 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: dproc.c,v 1.52 2003/09/24 13:57:25 david Exp $" -+/* $Source: /cvs/master/quadrics/elan3mod/elan3/os/dproc.c,v $ */ -+ -+#include -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define DMA_RETRY_FAIL_COUNT 8 -+ -+static void PrintUserDma (ELAN3_CTXT *ctxt, E3_Addr addr); -+ -+int -+HandleDProcTrap (ELAN3_DEV *dev, E3_uint32 *RestartBits) -+{ -+ DMA_TRAP *trap = dev->DmaTrap; -+ -+ ASSERT(SPINLOCK_HELD (&dev->IntrLock)); -+ -+ /* Scoop out the trap information, before restarting the Elan */ -+ trap->Status.Status = read_reg32 (dev, Exts.DProcStatus.Status); -+ -+ ASSERT(trap->Status.s.WakeupFunction == WakeupNever); -+ -+ /* copy the normal dma access fault type */ -+ elan3_sdram_copyq_from_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProc), &trap->FaultSave, sizeof (E3_FaultSave_BE)); -+ -+ /* copy all 4 of the dma data fault type */ -+ elan3_sdram_copyq_from_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData0), &trap->Data0, 4*sizeof (E3_FaultSave_BE)); -+ -+ /* Copy the DMA descriptor */ -+ copy_dma_regs (dev, &trap->Desc); -+ -+ /* Copy the packet info */ -+ trap->PacketInfo.Value = read_reg32 (dev, Exts.Dmas.DmaRds.DMA_PacketInfo.Value); -+ -+ /* update device statistics */ -+ BumpStat (dev, DProcTraps); -+ switch (trap->Status.s.TrapType) -+ { -+ case MI_DmaPacketTimedOutOrPacketError: -+ if (trap->PacketInfo.s.PacketTimeout) -+ BumpStat (dev, DmaOutputTimeouts); -+ else if (trap->PacketInfo.s.PacketAckValue == C_ACK_ERROR) -+ BumpStat (dev, DmaPacketAckErrors); -+ break; -+ -+ case MI_DmaFailCountError: -+ BumpStat (dev, DmaRetries); -+ break; -+ } -+ -+ /* Must now zero all the FSRs so that a subsequent fault can be seen */ -+ elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProc), sizeof (E3_FaultSave)); -+ elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData0), 4*sizeof (E3_FaultSave)); -+ -+ *RestartBits |= RestartDProc; -+ return (TRUE); -+} -+ -+void -+DeliverDProcTrap (ELAN3_DEV *dev, DMA_TRAP *dmaTrap, E3_uint32 Pend) -+{ -+ ELAN3_CTXT *ctxt; -+ E3_FaultSave_BE *FaultArea; -+ DMA_TRAP *trap; -+ register int i; -+ -+ ASSERT(SPINLOCK_HELD (&dev->IntrLock)); -+ -+ ctxt = ELAN3_DEV_CTX_TABLE(dev, dmaTrap->Status.s.Context); -+ -+ if (ctxt == NULL) -+ { -+ PRINTF1 (DBG_DEVICE, DBG_INTR, "DeliverDProcTrap: context %x invalid\n", dmaTrap->Status.s.Context); -+ BumpStat (dev, InvalidContext); -+ } -+ else -+ { -+ if (ELAN3_OP_DPROC_TRAP (ctxt, dmaTrap) == OP_DEFER) -+ { -+ if (ELAN3_QUEUE_REALLY_FULL (ctxt->DmaTrapQ)) -+ { -+ ctxt->Status |= CTXT_COMMAND_OVERFLOW_ERROR; -+ StartSwapoutContext (ctxt, Pend, NULL); -+ } -+ else -+ { -+ trap = ELAN3_QUEUE_BACK (ctxt->DmaTrapQ, ctxt->DmaTraps); -+ -+ bcopy (dmaTrap, trap, sizeof (DMA_TRAP)); -+ -+ PRINTF5 (ctxt, DBG_INTR, "DeliverDProcTrap: WakeupFnt=%x Cntx=%x SuspAddr=%x PacketInfo=%x TrapType=%s\n", -+ trap->Status.s.WakeupFunction, trap->Status.s.Context, -+ trap->Status.s.SuspendAddr, trap->PacketInfo.Value, MiToName (trap->Status.s.TrapType)); -+ PRINTF3 (ctxt, DBG_INTR, " FaultAddr=%x EventAddr=%x FSR=%x\n", -+ trap->FaultSave.s.FaultAddress, trap->FaultSave.s.EventAddress, -+ trap->FaultSave.s.FSR.Status); -+ for (i = 0, FaultArea = &trap->Data0; i < 4; i++, FaultArea++) -+ PRINTF4 (ctxt, DBG_INTR, " %d FaultAddr=%x EventAddr=%x FSR=%x\n", i, -+ FaultArea->s.FaultAddress, FaultArea->s.EventAddress, FaultArea->s.FSR.Status); -+ -+ PRINTF4 (ctxt, DBG_INTR, " type %08x size %08x source %08x dest %08x\n", -+ trap->Desc.s.dma_type, trap->Desc.s.dma_size, trap->Desc.s.dma_source, trap->Desc.s.dma_dest); -+ PRINTF2 (ctxt, DBG_INTR, " Dest event %08x cookie/proc %08x\n", -+ trap->Desc.s.dma_destEvent, trap->Desc.s.dma_destCookieVProc); -+ PRINTF2 (ctxt, DBG_INTR, " Source event %08x cookie/proc %08x\n", -+ trap->Desc.s.dma_srcEvent, trap->Desc.s.dma_srcCookieVProc); -+ ELAN3_QUEUE_ADD (ctxt->DmaTrapQ); -+ kcondvar_wakeupone (&ctxt->Wait, &dev->IntrLock); -+ -+ if (ELAN3_QUEUE_FULL (ctxt->DmaTrapQ)) -+ { -+ PRINTF0 (ctxt, DBG_INTR, "DeliverDProcTrap: dma queue full, must swap out\n"); -+ ctxt->Status |= CTXT_DMA_QUEUE_FULL; -+ -+ StartSwapoutContext (ctxt, Pend, NULL); -+ } -+ } -+ } -+ } -+} -+ -+int -+NextDProcTrap (ELAN3_CTXT *ctxt, DMA_TRAP *trap) -+{ -+ ELAN3_DEV *dev = ctxt->Device; -+ -+ ASSERT (SPINLOCK_HELD (&dev->IntrLock)); -+ -+ if (ELAN3_QUEUE_EMPTY (ctxt->DmaTrapQ)) -+ return (0); -+ -+ *trap = *ELAN3_QUEUE_FRONT (ctxt->DmaTrapQ, ctxt->DmaTraps); -+ ELAN3_QUEUE_REMOVE (ctxt->DmaTrapQ); -+ -+ return (1); -+} -+ -+void -+ResolveDProcTrap (ELAN3_CTXT *ctxt, DMA_TRAP *trap) -+{ -+ E3_FaultSave_BE *FaultArea; -+ int FaultHandled = 0; -+ int res; -+ register int i; -+ -+ PRINTF4 (ctxt, DBG_DPROC, "ResolveDProcTrap: WakeupFnt=%x Cntx=%x SuspAddr=%x TrapType=%s\n", -+ trap->Status.s.WakeupFunction, trap->Status.s.Context, -+ trap->Status.s.SuspendAddr, MiToName (trap->Status.s.TrapType)); -+ PRINTF3 (ctxt, DBG_DPROC, " FaultAddr=%x EventAddr=%x FSR=%x\n", -+ trap->FaultSave.s.FaultAddress, trap->FaultSave.s.EventAddress, -+ trap->FaultSave.s.FSR.Status); -+ for (i = 0, FaultArea = &trap->Data0; i < 4; i++, FaultArea++) -+ PRINTF4 (ctxt, DBG_DPROC, " %d FaultAddr=%x EventAddr=%x FSR=%x\n", i, -+ FaultArea->s.FaultAddress, FaultArea->s.EventAddress, FaultArea->s.FSR.Status); -+ -+ PRINTF4 (ctxt, DBG_DPROC, " type %08x size %08x source %08x dest %08x\n", -+ trap->Desc.s.dma_type, trap->Desc.s.dma_size, trap->Desc.s.dma_source, trap->Desc.s.dma_dest); -+ PRINTF2 (ctxt, DBG_DPROC, " Dest event %08x cookie/proc %08x\n", -+ trap->Desc.s.dma_destEvent, trap->Desc.s.dma_destCookieVProc); -+ PRINTF2 (ctxt, DBG_DPROC, " Source event %08x cookie/proc %08x\n", -+ trap->Desc.s.dma_srcEvent, trap->Desc.s.dma_srcCookieVProc); -+ -+ BumpUserStat (ctxt, DProcTraps); -+ -+ switch (trap->Status.s.TrapType) -+ { -+ case MI_DmaPacketTimedOutOrPacketError: -+ /* -+ * Faulted due to packet timeout or a PAckError. -+ * Reset fail count and reissue the same desc. -+ */ -+ PRINTF0 (ctxt, DBG_DPROC, "ResolveDProcTrap: got a PAckError or the output timed out. Rescheduling dma.\n"); -+ if (ElanException (ctxt, EXCEPTION_PACKET_TIMEOUT, DMA_PROC, trap) == OP_IGNORE) -+ { -+ BumpUserStat (ctxt, DmaRetries); -+ -+ trap->Desc.s.dma_failCount = DMA_RETRY_FAIL_COUNT; -+ -+ RestartDmaTrap (ctxt, trap); -+ } -+ return; -+ -+ case MI_DmaFailCountError: -+ /* -+ * Faulted due to dma fail count. -+ * Reset fail count and reissue the same desc. -+ */ -+ PRINTF1 (ctxt, DBG_DPROC, "ResolveDProcTrap: Reset dma fail count to %d\n", DMA_RETRY_FAIL_COUNT); -+ -+ if (ElanException (ctxt, EXCEPTION_DMA_RETRY_FAIL, DMA_PROC, trap) == OP_IGNORE) -+ { -+ BumpUserStat (ctxt, DmaRetries); -+ -+ trap->Desc.s.dma_failCount = DMA_RETRY_FAIL_COUNT; -+ -+ RestartDmaTrap (ctxt, trap); -+ } -+ return; -+ -+ case MI_TimesliceDmaQueueOverflow: -+ PRINTF0 (ctxt, DBG_DPROC, "ResolveDProcTrap: dma timeslice queue overflow\n"); -+ RestartDmaTrap (ctxt, trap); -+ return; -+ -+ case MI_UnimplementedError: -+ PRINTF0 (ctxt, DBG_DPROC, "ResolveDProcTrap: unimplemented dma trap\n"); -+ if (ElanException (ctxt, EXCEPTION_UNIMPLEMENTED, DMA_PROC, trap) == OP_IGNORE) -+ RestartDmaTrap (ctxt, trap); -+ return; -+ -+ case MI_EventQueueOverflow: -+ case MI_ThreadQueueOverflow: -+ case MI_DmaQueueOverflow: -+ PRINTF0 (ctxt, DBG_DPROC, "ResolveDProcTrap: trapped on a write set event.\n"); -+ FixupEventTrap (ctxt, DMA_PROC, trap, trap->Status.s.TrapType, &trap->FaultSave, 0); -+ return; -+ -+ case MI_RemoteDmaCommand: -+ case MI_RunDmaCommand: -+ case MI_DequeueNonSysCntxDma: -+ case MI_DequeueSysCntxDma: -+ /* -+ * The DMA processor has trapped due to outstanding prefetches from the previous -+ * dma. The "current" dma has not been consumed, so we just ignore the trap -+ */ -+ return; -+ -+ case MI_WaitForRemoteDescRead2: -+ case MI_ExecuteDmaDescriptorForRun: -+ /* -+ * The DMA processor has trapped while fetching the dma descriptor, so -+ * zero it out to not confuse the user on an error -+ */ -+ bzero (&trap->Desc, sizeof (trap->Desc)); -+ break; -+ } -+ -+ /* -+ * All other uWords will have updated one of the fault areas, so fix -+ * any faults found in them. If there were no faults found then it -+ * must have been a bus error -+ */ -+ for (i = 0, FaultArea = &trap->Data0; i < 4; i++, FaultArea++) -+ { -+ if (FaultArea->s.FSR.Status != 0) -+ { -+ FaultHandled++; -+ -+ ASSERT ((FaultArea->s.FSR.Status & FSR_SizeMask) == FSR_Block64 || -+ (FaultArea->s.FSR.Status & FSR_SizeMask) == FSR_Block32); -+ -+ ASSERT (FaultArea->s.FaultContext == trap->Status.s.Context); -+ -+ if (((trap->Desc.s.dma_source & PAGEOFFSET) >= (PAGESIZE-E3_BLK_SIZE)) && -+ ((trap->Desc.s.dma_source & PAGEMASK) != ((trap->Desc.s.dma_source + trap->Desc.s.dma_size-1) & PAGEMASK))) -+ { -+ /* XXXX: dma started within last 64 bytes of the page -+ * terminate the process if it has pagefaulted */ -+ if (FaultArea->s.FaultAddress == (trap->Desc.s.dma_source & ~(E3_BLK_SIZE-1))) -+ { -+ printk ("elan%d: invalid dma - context=%x source=%x\n", ctxt->Device->Instance, -+ ctxt->Capability.cap_mycontext, trap->Desc.s.dma_source); -+ -+ if (ElanException (ctxt, EXCEPTION_BAD_DMA, DMA_PROC, trap, NULL, 0) != OP_IGNORE) -+ return; -+ } -+ } -+ -+ if (trap->Desc.s.dma_size != 0 && (res = elan3_pagefault (ctxt, FaultArea, 1)) != ESUCCESS) -+ { -+ /* XXXX: Rev B Elans can prefetch data passed the end of the dma descriptor */ -+ /* if the fault relates to this, then just ignore it */ -+ if (FaultArea->s.FaultAddress < (trap->Desc.s.dma_source+trap->Desc.s.dma_size) || -+ FaultArea->s.FaultAddress > (trap->Desc.s.dma_source+trap->Desc.s.dma_size+E3_BLK_SIZE*2)) -+ { -+ PRINTF1 (ctxt, DBG_DPROC, "ResolveDProcTrap: elan3_pagefault failed for address %x\n", -+ FaultArea->s.FaultAddress); -+ -+ if (ElanException (ctxt, EXCEPTION_INVALID_ADDR, DMA_PROC, trap, FaultArea, res) != OP_IGNORE) -+ return; -+ } -+ } -+ } -+ } -+ -+ if (trap->FaultSave.s.FSR.Status != 0) -+ { -+ FaultHandled++; -+ -+ ASSERT (trap->FaultSave.s.FaultContext == trap->Status.s.Context); -+ -+ if ((trap->FaultSave.s.FSR.Status & FSR_SizeMask) == FSR_RouteFetch) -+ { -+ res = ResolveVirtualProcess (ctxt, trap->FaultSave.s.FaultAddress & 0xffff); /* mask out cookie */ -+ -+ switch (res) -+ { -+ default: -+ if (ElanException (ctxt, EXCEPTION_INVALID_PROCESS, DMA_PROC, trap, trap->FaultSave.s.FaultAddress, res) != OP_IGNORE) -+ return; -+ -+ case EAGAIN: -+ /* XXXX; wait on trail blazing code */ -+ -+ case 0: -+ break; -+ } -+ } -+ else -+ { -+ if ((res = elan3_pagefault (ctxt, &trap->FaultSave, 1)) != ESUCCESS) -+ { -+ PRINTF1 (ctxt, DBG_DPROC, "ResolveDProcTrap: elan3_pagefault failed for address %x\n", -+ trap->FaultSave.s.FaultAddress); -+ -+ if (ElanException (ctxt, EXCEPTION_INVALID_ADDR, DMA_PROC, trap, &trap->FaultSave, res) != OP_IGNORE) -+ return; -+ } -+ } -+ } -+ -+ if (! FaultHandled) -+ { -+ ElanBusError (ctxt->Device); -+ -+ if (ElanException (ctxt, EXCEPTION_INVALID_ADDR, DMA_PROC, trap, &trap->FaultSave, EFAULT) != OP_IGNORE) -+ return; -+ } -+ -+ switch (trap->Status.s.TrapType) -+ { -+ case MI_WaitForRemoteDescRead2: -+ /* -+ * Faulted while trying to read the dma descriptor for a read dma. -+ * Fix fault and re-issue using FaultAddress. -+ */ -+ PRINTF1 (ctxt, DBG_DPROC, "ResolveDProcTrap: trapped reading a remote dma descriptor at %x.\n", -+ trap->FaultSave.s.FaultAddress); -+ -+ RestartDmaPtr (ctxt, trap->FaultSave.s.FaultAddress); -+ break; -+ -+ case MI_ExecuteDmaDescriptorForRun: -+ /* -+ * Faulted while trying to read the dma descriptor for a write dma. -+ * Fix fault and re-issue using FaultAddress. -+ */ -+ PRINTF1 (ctxt, DBG_DPROC, "ResolveDProcTrap: trapped reading a write dma descriptor at %x.\n", -+ trap->FaultSave.s.FaultAddress); -+ -+ RestartDmaPtr (ctxt, trap->FaultSave.s.FaultAddress); -+ break; -+ -+ case MI_WaitForRemoteRoutes1: -+ case MI_WaitForRemoteRoutes2: -+ case MI_SendRemoteDmaDesc: -+ case MI_SendDmaIdentify: -+ case MI_SendRemoteDmaRoutes2: -+ case MI_WaitForDmaRoutes1: -+ case MI_DmaLoop: -+ case MI_ExitDmaLoop: -+ case MI_GetDestEventValue: -+ case MI_SendFinalUnlockTrans: -+ case MI_SendNullSetEvent: -+ case MI_SendFinalSetEvent: -+ case MI_SendDmaEOP: -+ /* -+ * Faulted either fetching routes or fetching dma data. -+ * Fix fault and re-issue using FaultAddress. -+ */ -+ -+ case MI_SendEOPforRemoteDma: -+ case MI_LookAtRemoteAck: -+ case MI_FailedAckIfCCis0: -+ /* -+ * Possible fault when reading the remote desc into the dma data buffers -+ */ -+ PRINTF0 (ctxt, DBG_DPROC, "ResolveDProcTrap: trapped reading a dma data or fetching a route\n"); -+ RestartDmaTrap (ctxt, trap); -+ break; -+ -+ case MI_DequeueSysCntxDma: -+ case MI_DequeueNonSysCntxDma: -+ case MI_RemoteDmaCommand: -+ case MI_RunDmaCommand: -+ /* -+ * It is possible that a dma can get back onto the queue while outstanding dma -+ * have not finished trapping. In this case the trap can be ignored as the dma -+ * state has been saved. It might trap again the next time it comes to the front -+ * of the queue and be fixed then. -+ */ -+ PRINTF0 (ctxt, DBG_DPROC, "ResolveDProcTrap: trap after dma has finished. ignored\n"); -+ break; -+ -+ default: -+ PRINTF0 (ctxt, DBG_DPROC, "ResolveDProcTrap: trapped on a write set event.\n"); -+ FixupEventTrap (ctxt, DMA_PROC, trap, trap->Status.s.TrapType, &trap->FaultSave, 0); -+ break; -+ } -+} -+ -+int -+DProcNeedsRestart (ELAN3_CTXT *ctxt) -+{ -+ return (ctxt->ItemCount[LIST_DMA_PTR] != 0 || -+ ctxt->ItemCount[LIST_DMA_DESC] != 0); -+} -+ -+void -+RestartDProcItems (ELAN3_CTXT *ctxt) -+{ -+ void *item; -+ E3_Addr value; -+ int res; -+ -+ kmutex_lock (&ctxt->SwapListsLock); -+ while (ctxt->ItemCount[LIST_DMA_PTR]) -+ { -+ if (! ELAN3_OP_GET_WORD_ITEM (ctxt, LIST_DMA_PTR, &item, &value)) -+ ctxt->ItemCount[LIST_DMA_PTR] = 0; -+ else -+ { -+ PRINTF1 (ctxt, DBG_DPROC, "RestartDProc: issue write dma at %x\n", value); -+ PrintUserDma (ctxt, value); -+ -+ res = IssueDmaCommand (ctxt, value, NULL, 0); -+ -+ if (res == ISSUE_COMMAND_RETRY) -+ { -+ ELAN3_OP_PUTBACK_ITEM (ctxt, LIST_DMA_PTR, item); -+ kmutex_unlock (&ctxt->SwapListsLock); -+ return; -+ } -+ -+ ctxt->ItemCount[LIST_DMA_PTR]--; -+ ELAN3_OP_FREE_WORD_ITEM (ctxt, item); -+ } -+ } -+ -+ while (ctxt->ItemCount[LIST_DMA_DESC]) -+ { -+ if (! ELAN3_OP_GET_BLOCK_ITEM (ctxt, LIST_DMA_DESC, &item, &value)) -+ ctxt->ItemCount[LIST_DMA_DESC] = 0; -+ else -+ { -+ PRINTF1 (ctxt, DBG_DPROC, "RestartDProc: issue dma desc at %x\n", value); -+ PrintUserDma (ctxt, value); -+ -+ res = IssueDmaCommand (ctxt, value, item, 0); -+ -+ switch (res) -+ { -+ case ISSUE_COMMAND_OK: -+ ctxt->ItemCount[LIST_DMA_DESC]--; -+ ELAN3_OP_FREE_BLOCK_ITEM (ctxt, item); -+ break; -+ -+ case ISSUE_COMMAND_RETRY: -+ ELAN3_OP_PUTBACK_ITEM (ctxt, LIST_DMA_DESC, item); -+ kmutex_unlock (&ctxt->SwapListsLock); -+ return; -+ -+ case ISSUE_COMMAND_TRAPPED: -+ ctxt->ItemCount[LIST_DMA_DESC]--; -+ /* The item will be freed off when the command port trap */ -+ /* fixed up and the command successfully re-issued */ -+ break; -+ } -+ } -+ } -+ -+ kmutex_unlock (&ctxt->SwapListsLock); -+} -+ -+void -+RestartDmaDesc(ELAN3_CTXT *ctxt, E3_DMA_BE *desc) -+{ -+ kmutex_lock (&ctxt->SwapListsLock); -+ if (desc->s.dma_direction != DMA_WRITE) -+ desc->s.dma_direction = (desc->s.dma_direction & ~DMA_READ) | DMA_READ_REQUEUE; -+ -+ ELAN3_OP_PUT_BLOCK_ITEM (ctxt, LIST_DMA_DESC, (E3_uint32 *) desc); -+ ctxt->ItemCount[LIST_DMA_DESC]++; -+ -+ kmutex_unlock (&ctxt->SwapListsLock); -+} -+ -+void -+RestartDmaTrap(ELAN3_CTXT *ctxt, DMA_TRAP *trap) -+{ -+ /* Negative length DMAs are illegal, since they hangup the dma processor, -+ * if they got generated then they will have been spotted by PollForDmahungup, -+ * and delivered to us with a Dequeue suspend address, -+ * -+ * GNAT sw-elan3/3908: Moved this check into this new function to avoid -+ * it sampling old or invalid register state -+ */ -+ if (trap->Desc.s.dma_size > E3_MAX_DMA_SIZE) -+ ElanException (ctxt, EXCEPTION_BAD_DMA, DMA_PROC, trap, NULL, 0); -+ else -+ RestartDmaDesc (ctxt, &trap->Desc); -+} -+ -+void -+RestartDmaPtr (ELAN3_CTXT *ctxt, E3_Addr ptr) -+{ -+ kmutex_lock (&ctxt->SwapListsLock); -+ ELAN3_OP_PUT_WORD_ITEM (ctxt, LIST_DMA_PTR, ptr); -+ ctxt->ItemCount[LIST_DMA_PTR]++; -+ kmutex_unlock (&ctxt->SwapListsLock); -+} -+ -+static void -+PrintUserDma (ELAN3_CTXT *ctxt, E3_Addr addr) -+{ -+ E3_DMA *dma; -+ -+ /* Dont call a function which takes locks unless we need to */ -+ if (!(elan3_debug & DBG_DPROC)) -+ return; -+ -+ dma = (E3_DMA *) elan3mmu_mainaddr (ctxt->Elan3mmu, addr); -+ -+ PRINTF4 (ctxt, DBG_DPROC, "DMA: type %08x size %08x source %08x dest %08x\n", -+ fuword ((int *) &dma->dma_type), fuword ((int *) &dma->dma_size), -+ fuword ((int *) &dma->dma_source), fuword ((int *) &dma->dma_dest)); -+ PRINTF4 (ctxt, DBG_DPROC, "DMA: Dest %08x %08x Local %08x %08x\n", -+ fuword ((int *) &dma->dma_destEvent), fuword ((int *) &dma->dma_destCookieProc), -+ fuword ((int *) &dma->dma_srcEvent), fuword ((int *) &dma->dma_srcCookieProc)); -+} -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/elan3/elan3mmu_generic.c linux-2.6.9/drivers/net/qsnet/elan3/elan3mmu_generic.c ---- clean/drivers/net/qsnet/elan3/elan3mmu_generic.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan3/elan3mmu_generic.c 2004-12-14 05:19:38.000000000 -0500 -@@ -0,0 +1,3255 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: elan3mmu_generic.c,v 1.76 2004/12/14 10:19:38 mike Exp $" -+/* $Source: /cvs/master/quadrics/elan3mod/elan3/vm/elan3mmu_generic.c,v $*/ -+ -+#include -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#ifdef CONFIG_MPSAS -+# define zero_all_ptbls -+#endif -+ -+/* -+ * Debugging -+ */ -+int elan3mmu_debug = 0; -+ -+#define N_L3PTBL_MTX (0x20) -+#define N_L2PTBL_MTX (0x40) -+#define N_L1PTBL_MTX (0x20) -+ -+#define L3PTBL_MTX_HASH(p) \ -+ ((((uintptr_t)(p) >> 12) ^ ((uintptr_t)(p) >> 2)) & (N_L3PTBL_MTX - 1)) -+static spinlock_t l3ptbl_lock[N_L3PTBL_MTX]; -+ -+#define L2PTBL_MTX_HASH(p) \ -+ ((((uintptr_t)(p) >> 12) ^ ((uintptr_t)(p) >> 2)) & (N_L2PTBL_MTX - 1)) -+static spinlock_t l2ptbl_lock[N_L2PTBL_MTX]; -+ -+#define L1PTBL_MTX_HASH(p) \ -+ ((((uintptr_t)(p) >> 12) ^ ((uintptr_t)(p) >> 2)) & (N_L1PTBL_MTX - 1)) -+static spinlock_t l1ptbl_lock[N_L1PTBL_MTX]; -+ -+ -+#define BASE2VA(p) ((E3_Addr)((p)->ptbl_base << 16)) -+#define VA2BASE(v) ((u_short)(((uintptr_t)(v)) >> 16)) -+ -+ELAN3MMU_GLOBAL_STATS elan3mmu_global_stats; -+ -+static void elan3mmu_flush_context_filter (ELAN3_DEV *dev, void *); -+static void elan3mmu_unload_loop (ELAN3MMU *elan3mmu, ELAN3_PTBL *ptbl, int first_valid, int nptes, int flags); -+ -+static ELAN3_PTBL *elan3mmu_create_ptbls (ELAN3_DEV *dev, int level, int attr, int keep); -+static ELAN3_PTBL *elan3mmu_ta_to_ptbl (ELAN3MMU *elan3mmu, ELAN3_PTP *ptp); -+ -+static ELAN3_PTBL *elan3mmu_alloc_pte (ELAN3_DEV *dev, ELAN3MMU *elan3mmu, int *idx); -+void elan3mmu_free_lXptbl (ELAN3_DEV *dev, ELAN3_PTBL *ptbl); -+ -+void elan3mmu_free_pte (ELAN3_DEV *dev, ELAN3MMU *elan3mmu, ELAN3_PTBL *ptbl_ptr, int idx); -+ -+static ELAN3_PTBL *elan3mmu_alloc_l1ptbl (ELAN3_DEV *dev, int attr, ELAN3MMU *elan3mmu); -+static ELAN3_PTBL *elan3mmu_alloc_l2ptbl (ELAN3_DEV *dev, int attr, ELAN3_PTBL *parent, ELAN3MMU *elan3mmu, -+ E3_Addr base, spinlock_t **plock, unsigned long *flags); -+static ELAN3_PTBL *elan3mmu_alloc_l3ptbl (ELAN3_DEV *dev, int attr, ELAN3_PTBL *parent, ELAN3MMU *elan3mmu, -+ E3_Addr base, spinlock_t **plock, unsigned long *flags); -+ -+static int elan3mmu_steal_this_ptbl (ELAN3_DEV *dev, ELAN3_PTBL *l3ptbl); -+static ELAN3_PTBL *elan3mmu_steal_l3ptbl (ELAN3_DEV *dev, int attr); -+ -+static spinlock_t *elan3mmu_ptbl_to_lock (int level, ELAN3_PTBL *ptbl); -+ -+/* -+ * Encoding of MMU permissions against access type, -+ * to allow quick permission checking against access -+ * type. -+ */ -+u_char elan3mmu_permissionTable[] = -+{ -+ 0xcc, /* 11001100 ELAN3_PERM_NULL */ -+ 0x01, /* 00000001 ELAN3_PERM_LOCALREAD */ -+ 0x05, /* 00000101 ELAN3_PERM_READ */ -+ 0x33, /* 00110011 ELAN3_PERM_NOREMOTE */ -+ 0x37, /* 00110111 ELAN3_PERM_REMOTEREAD */ -+ 0x3f, /* 00111111 ELAN3_PERM_REMOTEWRITE */ -+ 0xf7, /* 11110111 ELAN3_PERM_REMOTEEVENT */ -+ 0xff, /* 11111111 ELAN3_PERM_REMOTEALL */ -+} ; -+ -+void -+elan3mmu_init() -+{ -+ register int i; -+ -+ HAT_PRINTF0 (1, "elan3mmu_init: initialising elan mmu\n"); -+ -+ for (i = 0; i < N_L1PTBL_MTX; i++) -+ spin_lock_init (&l1ptbl_lock[i]); -+ -+ for (i = 0; i < N_L2PTBL_MTX; i++) -+ spin_lock_init (&l2ptbl_lock[i]); -+ -+ for (i = 0; i < N_L3PTBL_MTX; i++) -+ spin_lock_init (&l3ptbl_lock[i]); -+ -+ elan3mmu_global_stats.version = ELAN3MMU_STATS_VERSION; -+ -+ elan3mmu_init_osdep(); -+} -+ -+void -+elan3mmu_fini() -+{ -+ register int i; -+ -+ HAT_PRINTF0 (1, "elan3mmu_fini: finalising elan mmu\n"); -+ -+ for (i = 0; i < N_L1PTBL_MTX; i++) -+ spin_lock_destroy (&l1ptbl_lock[i]); -+ -+ for (i = 0; i < N_L2PTBL_MTX; i++) -+ spin_lock_destroy (&l2ptbl_lock[i]); -+ -+ for (i = 0; i < N_L3PTBL_MTX; i++) -+ spin_lock_destroy (&l3ptbl_lock[i]); -+ -+ elan3mmu_fini_osdep(); -+} -+ -+ELAN3MMU * -+elan3mmu_alloc (ELAN3_CTXT *ctxt) -+{ -+ ELAN3MMU *elan3mmu; -+ ELAN3_PTBL *l1ptbl; -+ -+ ALLOC_ELAN3MMU (elan3mmu, TRUE); -+ -+ spin_lock_init (&elan3mmu->elan3mmu_lock); -+ -+ spin_lock (&elan3mmu->elan3mmu_lock); /* lock_lint */ -+ -+ elan3mmu->elan3mmu_ergns = NULL; -+ elan3mmu->elan3mmu_etail = NULL; -+ elan3mmu->elan3mmu_ergnlast = NULL; -+ elan3mmu->elan3mmu_mrgns = NULL; -+ elan3mmu->elan3mmu_mtail = NULL; -+ elan3mmu->elan3mmu_mrgnlast = NULL; -+ elan3mmu->elan3mmu_ctxt = ctxt; -+ -+ spin_lock_init (&elan3mmu->elan3mmu_lXptbl_lock); -+ elan3mmu->elan3mmu_lXptbl = NULL; -+ -+ spin_unlock (&elan3mmu->elan3mmu_lock); /* lock_lint */ -+ -+ l1ptbl = elan3mmu_alloc_l1ptbl(ctxt->Device, 0, elan3mmu); -+ -+ elan3mmu->elan3mmu_ctp = (sdramaddr_t) 0; -+ elan3mmu->elan3mmu_dev = ctxt->Device; -+ elan3mmu->elan3mmu_l1ptbl = l1ptbl; -+ -+ /* Ensure that there are at least some level 3 page tables, since if a level 2 and */ -+ /* a level 3 table are allocated together, then the level 3 is allocated with the NO_ALLOC */ -+ /* flag, thus there MUST be at least one that can be stolen or on the free list */ -+ if (elan3mmu->elan3mmu_dev->Level[PTBL_LEVEL_3].PtblFreeList == NULL) -+ elan3mmu_create_ptbls (elan3mmu->elan3mmu_dev, PTBL_LEVEL_3, 0, 0); -+ -+ HAT_PRINTF1 (1, "elan3mmu_alloc: elan3mmu %p\n", elan3mmu); -+ -+ elan3mmu_alloc_osdep (elan3mmu); -+ -+ return (elan3mmu); -+} -+ -+void -+elan3mmu_free (ELAN3MMU *elan3mmu) -+{ -+ ELAN3MMU_RGN *rgn; -+ ELAN3_PTBL *l1ptbl; -+ spinlock_t *l1lock; -+ unsigned long l1flags; -+ unsigned long flags; -+ -+ HAT_PRINTF1 (1, "elan3mmu_free : elan3mmu %p\n", elan3mmu); -+ -+ /* -+ * Invalidate the level1 page table, since it's already removed -+ * from the context table, there is no need to flush the tlb. -+ */ -+ l1ptbl = elan3mmu->elan3mmu_l1ptbl; -+ elan3mmu->elan3mmu_l1ptbl = NULL; -+ -+ if (elan3mmu_lock_ptbl (l1ptbl, LK_PTBL_FAILOK, elan3mmu, (E3_Addr) 0, PTBL_LEVEL_1, &l1lock, &l1flags) == LK_PTBL_OK) -+ { -+ elan3mmu_l1inval (elan3mmu, l1ptbl, PTE_UNLOAD_NOFLUSH); -+ elan3mmu_free_l1ptbl (elan3mmu->elan3mmu_dev, l1ptbl, l1lock, l1flags); -+ } -+ -+ /* -+ * Free of any permission regions. -+ */ -+ spin_lock (&elan3mmu->elan3mmu_lock); /* lock_lint */ -+ while ((rgn = elan3mmu->elan3mmu_mrgns) != NULL) -+ { -+ spin_lock_irqsave (&elan3mmu->elan3mmu_dev->IntrLock, flags); /* lock_lint */ -+ elan3mmu_removergn_elan (elan3mmu, rgn->rgn_ebase); -+ elan3mmu_removergn_main (elan3mmu, rgn->rgn_mbase); -+ spin_unlock_irqrestore (&elan3mmu->elan3mmu_dev->IntrLock, flags); /* lock_lint */ -+ -+ FREE_ELAN3MMU_RGN (rgn); -+ } -+ elan3mmu->elan3mmu_mrgnlast = NULL; -+ elan3mmu->elan3mmu_ergnlast = NULL; -+ -+ /* -+ * Free the lXptbl list -+ */ -+ ASSERT (elan3mmu->elan3mmu_lXptbl == NULL); /* XXXX MRH need to add list removal */ -+ -+ elan3mmu->elan3mmu_lXptbl = NULL; -+ spin_lock_destroy (&elan3mmu->elan3mmu_lXptbl_lock); -+ -+ -+ spin_unlock (&elan3mmu->elan3mmu_lock); /* lock_lint */ -+ -+ spin_lock_destroy (&elan3mmu->elan3mmu_lock); -+ -+ FREE_ELAN3MMU (elan3mmu); -+} -+ -+/*================================================================================*/ -+/* Interface routines to device driver */ -+static void -+elan3mmu_flush_context_filter (ELAN3_DEV *dev, void *arg) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave (&dev->IntrLock, flags); -+ ASSERT ((read_reg32 (dev, Exts.InterruptReg) & (INT_DiscardingSysCntx | INT_DiscardingNonSysCntx)) == -+ (INT_DiscardingSysCntx | INT_DiscardingNonSysCntx)); -+ -+ dev->FilterHaltQueued = 0; -+ -+ write_reg32 (dev, Input_Context_Fil_Flush, 0); -+ -+ HAT_PRINTF0 (1, "elan3mmu_flush_context_filter completed\n"); -+ -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+} -+ -+void -+elan3mmu_set_context_filter (ELAN3_DEV *dev, int ctx, int disabled, E3_uint32 Pend, E3_uint32 *Maskp) -+{ -+ int mctx = ctx & MAX_ROOT_CONTEXT_MASK; -+ sdramaddr_t ctp = dev->ContextTable + mctx * sizeof (E3_ContextControlBlock); -+ -+ ASSERT (SPINLOCK_HELD (&dev->IntrLock)); -+ -+ ASSERT ((mctx < 32 || mctx >= ELAN3_KCOMM_BASE_CONTEXT_NUM) ? (ctx & SYS_CONTEXT_BIT) : ! (ctx & SYS_CONTEXT_BIT)); -+ -+ elan3_sdram_writel (dev, ctp + offsetof (E3_ContextControlBlock, filter), -+ ((ctx & SYS_CONTEXT_BIT) ? E3_CCB_CNTX0 : 0) | (disabled ? E3_CCB_DISCARD_ALL : 0)); -+ -+ HAT_PRINTF4 (1, "elan3mmu_set_context_filter: ctx %x [%lx] -> %s (%x)\n", ctx, ctp, -+ disabled ? "up" : "down", elan3_sdram_readl (dev, ctp + offsetof (E3_ContextControlBlock, filter))); -+ -+ /* queue a halt operation to flush the context filter while the inputter is halted */ -+ if (dev->FilterHaltQueued == 0) -+ { -+ dev->FilterHaltQueued = 1; -+ QueueHaltOperation (dev, Pend, Maskp, INT_DiscardingSysCntx | INT_DiscardingNonSysCntx, -+ elan3mmu_flush_context_filter, NULL); -+ } -+} -+ -+int -+elan3mmu_attach (ELAN3_DEV *dev, int ctx, ELAN3MMU *elan3mmu, sdramaddr_t routeTable, E3_uint32 routeMask) -+{ -+ sdramaddr_t ctp; -+ ELAN3_PTP trootptp; -+ -+ ASSERT (SPINLOCK_HELD (&dev->IntrLock)); -+ -+ ctx &= MAX_ROOT_CONTEXT_MASK; /* Mask out all high bits in context */ -+ -+ if (ctx < 0 || ctx >= dev->ContextTableSize) -+ return (EINVAL); -+ -+ ctp = dev->ContextTable + ctx * sizeof (E3_ContextControlBlock); -+ -+ trootptp = elan3_readptp (dev, ctp + offsetof (E3_ContextControlBlock, rootPTP)); -+ -+ if (ELAN3_PTP_TYPE(trootptp) != ELAN3_ET_INVALID) -+ return (EBUSY); -+ -+ elan3mmu->elan3mmu_ctp = ctp; -+ -+ trootptp = PTBL_TO_PTADDR (elan3mmu->elan3mmu_l1ptbl) | ELAN3_ET_PTP; -+ -+ HAT_PRINTF4 (1, "elan3mmu_attach: ctp at %08lx : trootptp=%08x VPT_ptr=%08lx VPT_mask=%08x\n", -+ ctp, trootptp, routeTable, routeMask); -+ -+ elan3_writeptp (dev, ctp + offsetof (E3_ContextControlBlock, rootPTP), trootptp); -+ elan3_writeptp (dev, ctp + offsetof (E3_ContextControlBlock, VPT_ptr), routeTable); -+ elan3_writeptp (dev, ctp + offsetof (E3_ContextControlBlock, VPT_mask), routeMask); -+ -+ return (ESUCCESS); -+} -+ -+void -+elan3mmu_detach (ELAN3_DEV *dev, int ctx) -+{ -+ ELAN3_PTP invalidptp = ELAN3_INVALID_PTP; -+ sdramaddr_t ctp; -+ -+ ctx &= MAX_ROOT_CONTEXT_MASK; /* Mask out all high bits in context */ -+ -+ if (ctx < 0 || ctx >= dev->ContextTableSize) -+ return; -+ -+ ctp = dev->ContextTable + ctx * sizeof (E3_ContextControlBlock); -+ -+ HAT_PRINTF1 (1, "elan3mmu_detach: clearing ptp at %lx\n", ctp); -+ -+ elan3_writeptp (dev, ctp + offsetof (E3_ContextControlBlock, rootPTP), invalidptp); -+ elan3_writeptp (dev, ctp + offsetof (E3_ContextControlBlock, VPT_mask), 0); -+ elan3_writeptp (dev, ctp + offsetof (E3_ContextControlBlock, VPT_ptr), 0); -+ -+ ElanFlushTlb (dev); -+} -+ -+int -+elan3mmu_reference (ELAN3MMU *elan3mmu, int ctx) -+{ -+ ELAN3_DEV *dev = elan3mmu->elan3mmu_dev; -+ sdramaddr_t ctp; -+ E3_ContextControlBlock ccb; -+ ELAN3_PTP trootptp; -+ -+ ctx &= MAX_ROOT_CONTEXT_MASK; /* Mask out all high bits in context */ -+ -+ if (ctx < 0 || ctx >= dev->ContextTableSize) -+ return (EINVAL); -+ -+ ctp = dev->ContextTable + ctx * sizeof (E3_ContextControlBlock); -+ -+ trootptp = elan3_readptp (dev, ctp + offsetof (E3_ContextControlBlock, rootPTP)); -+ -+ if (ELAN3_PTP_TYPE(trootptp) != ELAN3_ET_INVALID) -+ return (EBUSY); -+ -+ elan3_sdram_copyl_from_sdram (dev, elan3mmu->elan3mmu_ctp, &ccb, sizeof (E3_ContextControlBlock)); -+ elan3_sdram_copyl_to_sdram (dev, &ccb, ctp, sizeof (E3_ContextControlBlock)); -+ -+ return (ESUCCESS); -+ -+} -+/*================================================================================*/ -+/* Elan permission regions. */ -+ -+/* elan address region management */ -+ELAN3MMU_RGN * -+elan3mmu_findrgn_elan (ELAN3MMU *elan3mmu, -+ E3_Addr addr, int tail) -+{ -+ ELAN3MMU_RGN *next = NULL; -+ ELAN3MMU_RGN *rgn; -+ ELAN3MMU_RGN *hirgn; -+ ELAN3MMU_RGN *lorgn; -+ E3_Addr base; -+ E3_Addr lastaddr; -+ int forward; -+ -+ ASSERT (SPINLOCK_HELD (&elan3mmu->elan3mmu_dev->IntrLock) || SPINLOCK_HELD (&elan3mmu->elan3mmu_lock)); -+ -+ if (elan3mmu->elan3mmu_ergns == NULL) -+ return (NULL); -+ -+ rgn = elan3mmu->elan3mmu_ergnlast; -+ if (rgn == NULL) -+ rgn = elan3mmu->elan3mmu_ergns; -+ -+ forward = 0; -+ if ((u_long) (base = rgn->rgn_ebase) < (u_long)addr) -+ { -+ if ((u_long)addr <= ((u_long) base + rgn->rgn_len - 1)) -+ return (rgn); /* ergnlast contained addr */ -+ -+ hirgn = elan3mmu->elan3mmu_etail; -+ -+ if ((u_long) (lastaddr = (hirgn->rgn_ebase + hirgn->rgn_len - 1)) < (u_long) addr) -+ return (tail ? hirgn : NULL); /* addr is out of range */ -+ -+ if ((u_long) (addr - base) > (u_long) (lastaddr - addr)) -+ rgn = hirgn; -+ else -+ { -+ rgn = rgn->rgn_enext; -+ forward++; -+ } -+ } -+ else -+ { -+ lorgn = elan3mmu->elan3mmu_ergns; -+ -+ if ((u_long)lorgn->rgn_ebase > (u_long) addr) -+ return (lorgn); /* lowest regions is higher than addr */ -+ if ((u_long)(addr - lorgn->rgn_ebase) < (u_long) (base - addr)) -+ { -+ rgn = lorgn; /* search forward from head */ -+ forward++; -+ } -+ } -+ if (forward) -+ { -+ while ((u_long)(rgn->rgn_ebase + rgn->rgn_len - 1) < (u_long)addr) -+ rgn = rgn->rgn_enext; -+ -+ if ((u_long)rgn->rgn_ebase <= (u_long)addr) -+ elan3mmu->elan3mmu_ergnlast = rgn; -+ return (rgn); -+ } -+ else -+ { -+ while ((u_long)rgn->rgn_ebase > (u_long)addr) -+ { -+ next = rgn; -+ rgn = rgn->rgn_eprev; -+ } -+ -+ if ((u_long) (rgn->rgn_ebase + rgn->rgn_len - 1) < (u_long)addr) -+ return (next); -+ else -+ { -+ elan3mmu->elan3mmu_ergnlast = rgn; -+ return (rgn); -+ } -+ } -+} -+ -+int -+elan3mmu_addrgn_elan (ELAN3MMU *elan3mmu, ELAN3MMU_RGN *nrgn) -+{ -+ ELAN3MMU_RGN *rgn = elan3mmu_findrgn_elan (elan3mmu, nrgn->rgn_ebase, 1); -+ E3_Addr nbase = nrgn->rgn_ebase; -+ E3_Addr ntop = nbase + nrgn->rgn_len - 1; /* avoid wrap */ -+ E3_Addr base; -+ -+ ASSERT (SPINLOCK_HELD (&elan3mmu->elan3mmu_dev->IntrLock) && SPINLOCK_HELD (&elan3mmu->elan3mmu_lock)); -+ -+ if (rgn == NULL) -+ { -+ elan3mmu->elan3mmu_ergns = elan3mmu->elan3mmu_etail = nrgn; -+ nrgn->rgn_enext = nrgn->rgn_eprev = NULL; -+ } -+ else -+ { -+ base = rgn->rgn_ebase; -+ -+ if ((u_long)(base + rgn->rgn_len - 1) < (u_long)nbase) /* top of region below requested address */ -+ { /* so insert after region (and hence at end */ -+ nrgn->rgn_eprev = rgn; /* of list */ -+ nrgn->rgn_enext = NULL; -+ rgn->rgn_enext = elan3mmu->elan3mmu_etail = nrgn; -+ } -+ else -+ { -+ if ((u_long)nbase >= (u_long)base || (u_long)ntop >= (u_long)base) -+ return (-1); /* overlapping region */ -+ -+ nrgn->rgn_enext = rgn; /* insert before region */ -+ nrgn->rgn_eprev = rgn->rgn_eprev; -+ rgn->rgn_eprev = nrgn; -+ if (elan3mmu->elan3mmu_ergns == rgn) -+ elan3mmu->elan3mmu_ergns = nrgn; -+ else -+ nrgn->rgn_eprev->rgn_enext = nrgn; -+ } -+ } -+ elan3mmu->elan3mmu_ergnlast = nrgn; -+ -+ return (0); -+} -+ -+ELAN3MMU_RGN * -+elan3mmu_removergn_elan (ELAN3MMU *elan3mmu, E3_Addr addr) -+{ -+ ELAN3MMU_RGN *rgn = elan3mmu_findrgn_elan (elan3mmu, addr, 0); -+ -+ ASSERT (SPINLOCK_HELD (&elan3mmu->elan3mmu_dev->IntrLock) && SPINLOCK_HELD (&elan3mmu->elan3mmu_lock)); -+ -+ if (rgn == NULL || rgn->rgn_ebase != addr) -+ return (NULL); -+ -+ elan3mmu->elan3mmu_ergnlast = rgn->rgn_enext; -+ if (rgn == elan3mmu->elan3mmu_etail) -+ elan3mmu->elan3mmu_etail = rgn->rgn_eprev; -+ else -+ rgn->rgn_enext->rgn_eprev = rgn->rgn_eprev; -+ -+ if (rgn == elan3mmu->elan3mmu_ergns) -+ elan3mmu->elan3mmu_ergns = rgn->rgn_enext; -+ else -+ rgn->rgn_eprev->rgn_enext = rgn->rgn_enext; -+ -+ return (rgn); -+} -+ -+ELAN3MMU_RGN * -+elan3mmu_rgnat_elan (ELAN3MMU *elan3mmu, E3_Addr addr) -+{ -+ ELAN3MMU_RGN *rgn = elan3mmu_findrgn_elan (elan3mmu, addr, 0); -+ E3_Addr base; -+ -+ if (rgn != NULL && (u_long)(base = rgn->rgn_ebase) <= (u_long)addr && (u_long)addr <= (u_long)(base + rgn->rgn_len - 1)) -+ return (rgn); -+ return (NULL); -+} -+ -+/* main address region management */ -+ELAN3MMU_RGN * -+elan3mmu_findrgn_main (ELAN3MMU *elan3mmu, -+ caddr_t addr, int tail) -+{ -+ ELAN3MMU_RGN *next = NULL; -+ ELAN3MMU_RGN *rgn; -+ ELAN3MMU_RGN *hirgn; -+ ELAN3MMU_RGN *lorgn; -+ caddr_t lastaddr; -+ caddr_t base; -+ int forward; -+ -+ ASSERT (SPINLOCK_HELD (&elan3mmu->elan3mmu_dev->IntrLock) || SPINLOCK_HELD (&elan3mmu->elan3mmu_lock)); -+ -+ if (elan3mmu->elan3mmu_mrgns == NULL) -+ return (NULL); -+ -+ rgn = elan3mmu->elan3mmu_mrgnlast; -+ if (rgn == NULL) -+ rgn = elan3mmu->elan3mmu_mrgns; -+ -+ forward = 0; -+ if ((base = rgn->rgn_mbase) < addr) -+ { -+ if (addr <= (base + rgn->rgn_len - 1)) -+ return (rgn); /* ergnlast contained addr */ -+ -+ hirgn = elan3mmu->elan3mmu_mtail; -+ if ((lastaddr = hirgn->rgn_mbase + hirgn->rgn_len - 1) < addr) -+ return (tail ? hirgn : NULL); /* addr is out of range */ -+ -+ if ((addr - base) > (lastaddr - addr)) -+ rgn = hirgn; -+ else -+ { -+ rgn = rgn->rgn_mnext; -+ forward++; -+ } -+ } -+ else -+ { -+ lorgn = elan3mmu->elan3mmu_mrgns; -+ if (lorgn->rgn_mbase > addr) -+ return (lorgn); /* lowest regions is higher than addr */ -+ if ((addr - lorgn->rgn_mbase) < (base - addr)) -+ { -+ rgn = lorgn; /* search forward from head */ -+ forward++; -+ } -+ } -+ if (forward) -+ { -+ while ((rgn->rgn_mbase + rgn->rgn_len - 1) < addr) -+ rgn = rgn->rgn_mnext; -+ -+ if (rgn->rgn_mbase <= addr) -+ elan3mmu->elan3mmu_mrgnlast = rgn; -+ return (rgn); -+ } -+ else -+ { -+ while (rgn->rgn_mbase > addr) -+ { -+ next = rgn; -+ rgn = rgn->rgn_mprev; -+ } -+ if ((rgn->rgn_mbase + rgn->rgn_len - 1) < addr) -+ return (next); -+ else -+ { -+ elan3mmu->elan3mmu_mrgnlast = rgn; -+ return (rgn); -+ } -+ } -+} -+ -+int -+elan3mmu_addrgn_main (ELAN3MMU *elan3mmu, ELAN3MMU_RGN *nrgn) -+{ -+ ELAN3MMU_RGN *rgn = elan3mmu_findrgn_main (elan3mmu, nrgn->rgn_mbase, 1); -+ caddr_t nbase = nrgn->rgn_mbase; -+ caddr_t ntop = nbase + nrgn->rgn_len - 1; -+ caddr_t base; -+ -+ ASSERT (SPINLOCK_HELD (&elan3mmu->elan3mmu_dev->IntrLock) && SPINLOCK_HELD (&elan3mmu->elan3mmu_lock)); -+ -+ if (rgn == NULL) -+ { -+ elan3mmu->elan3mmu_mrgns = elan3mmu->elan3mmu_mtail = nrgn; -+ nrgn->rgn_mnext = nrgn->rgn_mprev = NULL; -+ } -+ else -+ { -+ base = rgn->rgn_mbase; -+ -+ if ((base + rgn->rgn_len - 1) < nbase) /* top of region below requested address */ -+ { /* so insert after region (and hence at end */ -+ nrgn->rgn_mprev = rgn; /* of list */ -+ nrgn->rgn_mnext = NULL; -+ rgn->rgn_mnext = elan3mmu->elan3mmu_mtail = nrgn; -+ } -+ else -+ { -+ if (nbase >= base || ntop >= base) -+ return (-1); /* overlapping region */ -+ -+ nrgn->rgn_mnext = rgn; /* insert before region */ -+ nrgn->rgn_mprev = rgn->rgn_mprev; -+ rgn->rgn_mprev = nrgn; -+ if (elan3mmu->elan3mmu_mrgns == rgn) -+ elan3mmu->elan3mmu_mrgns = nrgn; -+ else -+ nrgn->rgn_mprev->rgn_mnext = nrgn; -+ } -+ } -+ elan3mmu->elan3mmu_mrgnlast = nrgn; -+ -+ return (0); -+} -+ -+ELAN3MMU_RGN * -+elan3mmu_removergn_main (ELAN3MMU *elan3mmu, caddr_t addr) -+{ -+ ELAN3MMU_RGN *rgn = elan3mmu_findrgn_main (elan3mmu, addr, 0); -+ -+ ASSERT (SPINLOCK_HELD (&elan3mmu->elan3mmu_dev->IntrLock) && SPINLOCK_HELD (&elan3mmu->elan3mmu_lock)); -+ -+ if (rgn == NULL || rgn->rgn_mbase != addr) -+ return (NULL); -+ -+ elan3mmu->elan3mmu_mrgnlast = rgn->rgn_mnext; -+ if (rgn == elan3mmu->elan3mmu_mtail) -+ elan3mmu->elan3mmu_mtail = rgn->rgn_mprev; -+ else -+ rgn->rgn_mnext->rgn_mprev = rgn->rgn_mprev; -+ -+ if (rgn == elan3mmu->elan3mmu_mrgns) -+ elan3mmu->elan3mmu_mrgns = rgn->rgn_mnext; -+ else -+ rgn->rgn_mprev->rgn_mnext = rgn->rgn_mnext; -+ -+ return (rgn); -+} -+ -+ELAN3MMU_RGN * -+elan3mmu_rgnat_main (ELAN3MMU *elan3mmu, caddr_t addr) -+{ -+ ELAN3MMU_RGN *rgn = elan3mmu_findrgn_main (elan3mmu, addr, 0); -+ caddr_t base; -+ -+ if (rgn != NULL && (base = rgn->rgn_mbase) <= addr && addr <= (base + rgn->rgn_len - 1)) -+ return (rgn); -+ return (NULL); -+} -+ -+int -+elan3mmu_setperm (ELAN3MMU *elan3mmu, -+ caddr_t maddr, -+ E3_Addr eaddr, -+ u_int len, -+ u_int perm) -+{ -+ ELAN3_DEV *dev = elan3mmu->elan3mmu_dev; -+ ELAN3MMU_RGN *nrgn; -+ unsigned long flags; -+ -+ HAT_PRINTF4 (1, "elan3mmu_setperm: user %p elan %08x len %x perm %x\n", maddr, eaddr, len, perm); -+ -+ if ((((uintptr_t) maddr) & PAGEOFFSET) || (eaddr & PAGEOFFSET) || (len & PAGEOFFSET)) -+ { -+ HAT_PRINTF0 (1, "elan3mmu_setperm: alignment failure\n"); -+ return (EINVAL); -+ } -+ -+ if (((uintptr_t) maddr + len - 1) < (uintptr_t) maddr || ((u_long)eaddr + len - 1) < (u_long)eaddr) -+ { -+ HAT_PRINTF0 (1, "elan3mmu_setperm: range failure\n"); -+ return (EINVAL); -+ } -+ -+ ALLOC_ELAN3MMU_RGN(nrgn, TRUE); -+ -+ spin_lock (&elan3mmu->elan3mmu_lock); -+ nrgn->rgn_mbase = maddr; -+ nrgn->rgn_ebase = eaddr; -+ nrgn->rgn_len = len; -+ nrgn->rgn_perm = perm; -+ -+ spin_lock_irqsave (&dev->IntrLock, flags); -+ if (elan3mmu_addrgn_elan (elan3mmu, nrgn) < 0) -+ { -+ HAT_PRINTF0 (1, "elan3mmu_setperm: elan address exists\n"); -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+ spin_unlock (&elan3mmu->elan3mmu_lock); -+ -+ FREE_ELAN3MMU_RGN (nrgn); -+ return (EINVAL); -+ } -+ -+ if (elan3mmu_addrgn_main (elan3mmu, nrgn) < 0) -+ { -+ HAT_PRINTF0 (1, "elan3mmu_setperm: main address exists\n"); -+ elan3mmu_removergn_elan (elan3mmu, eaddr); -+ -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+ spin_unlock (&elan3mmu->elan3mmu_lock); -+ -+ FREE_ELAN3MMU_RGN (nrgn); -+ return (EINVAL); -+ } -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+ spin_unlock (&elan3mmu->elan3mmu_lock); -+ -+ return (ESUCCESS); -+} -+ -+void -+elan3mmu_clrperm (ELAN3MMU *elan3mmu, -+ E3_Addr addr, -+ u_int len) -+{ -+ E3_Addr raddr; -+ E3_Addr rtop; -+ ELAN3MMU_RGN *nrgn; -+ ELAN3MMU_RGN *rgn; -+ ELAN3MMU_RGN *rgn_next; -+ u_int ssize; -+ unsigned long flags; -+ int res; -+ -+ HAT_PRINTF2 (1, "elan3mmu_clrperm: elan %08x len %x\n", addr, len); -+ -+ raddr = (addr & PAGEMASK); -+ rtop = ((addr + len - 1) & PAGEMASK) + PAGEOFFSET; -+ -+ ALLOC_ELAN3MMU_RGN (nrgn, TRUE); -+ -+ spin_lock (&elan3mmu->elan3mmu_lock); -+ -+ for (rgn = elan3mmu_findrgn_elan (elan3mmu, addr, 0); rgn != NULL; rgn = rgn_next) -+ { -+ if (rtop < rgn->rgn_ebase) /* rtop was in a gap */ -+ break; -+ -+ rgn_next = rgn->rgn_enext; /* Save next region pointer */ -+ -+ if (raddr <= rgn->rgn_ebase && rtop >= (rgn->rgn_ebase + rgn->rgn_len - 1)) -+ { -+ /* whole region is cleared */ -+ elan3mmu_unload (elan3mmu, rgn->rgn_ebase, rgn->rgn_len, PTE_UNLOAD); -+ -+ spin_lock_irqsave (&elan3mmu->elan3mmu_dev->IntrLock, flags); -+ elan3mmu_removergn_elan (elan3mmu, rgn->rgn_ebase); -+ elan3mmu_removergn_main (elan3mmu, rgn->rgn_mbase); -+ spin_unlock_irqrestore (&elan3mmu->elan3mmu_dev->IntrLock, flags); -+ -+ FREE_ELAN3MMU_RGN (rgn); -+ } -+ else if (raddr <= rgn->rgn_ebase) -+ { -+ /* clearing at beginning, so shrink size and increment base ptrs */ -+ ssize = rtop - rgn->rgn_ebase + 1; -+ -+ elan3mmu_unload (elan3mmu, rgn->rgn_ebase, ssize, PTE_UNLOAD); -+ -+ spin_lock_irqsave (&elan3mmu->elan3mmu_dev->IntrLock, flags); -+ rgn->rgn_mbase += ssize; -+ rgn->rgn_ebase += ssize; -+ rgn->rgn_len -= ssize; -+ spin_unlock_irqrestore (&elan3mmu->elan3mmu_dev->IntrLock, flags); -+ -+ } -+ else if (rtop >= (rgn->rgn_ebase + rgn->rgn_len - 1)) -+ { -+ /* clearing at end, so just shrink length of region */ -+ ssize = ((rgn->rgn_ebase + rgn->rgn_len - 1) - raddr) + 1; -+ -+ elan3mmu_unload (elan3mmu, raddr, ssize, PTE_UNLOAD); -+ -+ spin_lock_irqsave (&elan3mmu->elan3mmu_dev->IntrLock, flags); -+ rgn->rgn_len -= ssize; -+ spin_unlock_irqrestore (&elan3mmu->elan3mmu_dev->IntrLock, flags); -+ } -+ else -+ { -+ /* the section to go is in the middle, so need to */ -+ /* split it into two regions */ -+ elan3mmu_unload (elan3mmu, raddr, rtop - raddr + 1, PTE_UNLOAD); -+ -+ spin_lock_irqsave (&elan3mmu->elan3mmu_dev->IntrLock, flags); -+ -+ ASSERT (nrgn != NULL); -+ -+ nrgn->rgn_mbase = rgn->rgn_mbase + (rtop - rgn->rgn_ebase + 1);; -+ nrgn->rgn_ebase = rtop + 1; -+ nrgn->rgn_len = ((rgn->rgn_ebase + rgn->rgn_len - 1) - rtop); -+ nrgn->rgn_perm = rgn->rgn_perm; -+ -+ rgn->rgn_len = (raddr - rgn->rgn_ebase); /* shrink original region */ -+ -+ res = elan3mmu_addrgn_elan (elan3mmu, nrgn); /* insert new region */ -+ ASSERT (res == 0); /* which cannot fail */ -+ -+ res = elan3mmu_addrgn_main (elan3mmu, nrgn); -+ ASSERT (res == 0); -+ spin_unlock_irqrestore (&elan3mmu->elan3mmu_dev->IntrLock, flags); -+ -+ nrgn = NULL; -+ } -+ } -+ spin_unlock (&elan3mmu->elan3mmu_lock); -+ -+ if (nrgn != NULL) -+ FREE_ELAN3MMU_RGN (nrgn); -+} -+ -+int -+elan3mmu_checkperm (ELAN3MMU *elan3mmu, -+ E3_Addr addr, -+ u_int len, -+ u_int access) -+{ -+ E3_Addr raddr = (((E3_Addr) addr) & PAGEMASK); -+ u_int rtop = ((addr + len - 1) & PAGEMASK) + PAGEOFFSET; -+ u_int rsize = rtop - raddr + 1; -+ ELAN3MMU_RGN *rgn; -+ -+ HAT_PRINTF3 (1, "elan3mmu_checkperm: user %08x len %x access %x\n", addr, len, access); -+ -+ -+ if ((raddr + rsize - 1) < raddr) -+ return (ENOMEM); -+ -+ spin_lock (&elan3mmu->elan3mmu_lock); -+ if ((rgn = elan3mmu_rgnat_elan (elan3mmu, raddr)) == (ELAN3MMU_RGN *) NULL) -+ { -+ spin_unlock (&elan3mmu->elan3mmu_lock); -+ return (ENOMEM); -+ } -+ else -+ { -+ register int ssize; -+ -+ for (; rsize != 0; rsize -= ssize, raddr += ssize) -+ { -+ if (raddr > (rgn->rgn_ebase + rgn->rgn_len - 1)) -+ { -+ rgn = rgn->rgn_enext; -+ -+ if (rgn == NULL || raddr != rgn->rgn_ebase) -+ { -+ spin_unlock (&elan3mmu->elan3mmu_lock); -+ return (ENOMEM); -+ } -+ } -+ if ((raddr + rsize - 1) > (rgn->rgn_ebase + rgn->rgn_len - 1)) -+ ssize = ((rgn->rgn_ebase + rgn->rgn_len - 1) - raddr) + 1; -+ else -+ ssize = rsize; -+ -+ HAT_PRINTF4 (1, "elan3mmu_checkperm : rgn %x -> %x perm %x access %x\n", -+ rgn->rgn_ebase, rgn->rgn_ebase + rgn->rgn_len, rgn->rgn_perm, access); -+ -+ if (ELAN3_INCOMPAT_ACCESS (rgn->rgn_perm, access)) -+ { -+ spin_unlock (&elan3mmu->elan3mmu_lock); -+ return (EACCES); -+ } -+ } -+ } -+ -+ spin_unlock (&elan3mmu->elan3mmu_lock); -+ -+ return (ESUCCESS); -+} -+ -+caddr_t -+elan3mmu_mainaddr (ELAN3MMU *elan3mmu, E3_Addr addr) -+{ -+ ELAN3MMU_RGN *rgn; -+ caddr_t raddr; -+ -+ spin_lock (&elan3mmu->elan3mmu_lock); -+ if ((rgn = elan3mmu_rgnat_elan (elan3mmu, addr)) == (ELAN3MMU_RGN *) NULL) -+ raddr = NULL; -+ else -+ raddr = rgn->rgn_mbase + (addr - rgn->rgn_ebase); -+ spin_unlock (&elan3mmu->elan3mmu_lock); -+ -+ return (raddr); -+} -+ -+E3_Addr -+elan3mmu_elanaddr (ELAN3MMU *elan3mmu, caddr_t addr) -+{ -+ ELAN3MMU_RGN *rgn; -+ E3_Addr raddr; -+ -+ spin_lock (&elan3mmu->elan3mmu_lock); -+ if ((rgn = elan3mmu_rgnat_main (elan3mmu, addr)) == (ELAN3MMU_RGN *) NULL) -+ raddr = (E3_Addr) 0; -+ else -+ raddr = rgn->rgn_ebase + (addr - rgn->rgn_mbase); -+ spin_unlock (&elan3mmu->elan3mmu_lock); -+ -+ return (raddr); -+} -+ -+void -+elan3mmu_displayrgns(ELAN3MMU *elan3mmu) -+{ -+ ELAN3MMU_RGN *rgn; -+ -+ spin_lock (&elan3mmu->elan3mmu_lock); -+ HAT_PRINTF0 (1, "elan3mmu_displayrgns: main regions\n"); -+ for (rgn = elan3mmu->elan3mmu_mrgns; rgn; rgn = (rgn->rgn_mnext == elan3mmu->elan3mmu_mrgns) ? NULL : rgn->rgn_mnext) -+ HAT_PRINTF5 (1, " RGN %p ebase %08x mbase %p len %08x perm %08x\n", rgn, rgn->rgn_ebase, rgn->rgn_mbase, rgn->rgn_len, rgn->rgn_perm); -+ HAT_PRINTF0 (1, "elan3mmu_displayrgns: elan regions\n"); -+ for (rgn = elan3mmu->elan3mmu_ergns; rgn; rgn = (rgn->rgn_enext == elan3mmu->elan3mmu_ergns) ? NULL : rgn->rgn_enext) -+ HAT_PRINTF5 (1, " RGN %p ebase %08x mbase %p len %08x perm %08x\n", rgn, rgn->rgn_ebase, rgn->rgn_mbase, rgn->rgn_len, rgn->rgn_perm); -+ -+ spin_unlock (&elan3mmu->elan3mmu_lock); -+} -+ -+/*============================================================================*/ -+/* Private functions */ -+#define ELAN3_PTE_IS_VALID(ptbl, pte) \ -+ ((ptbl->ptbl_flags & PTBL_KERNEL) ? \ -+ (pte&(~ELAN3_PTE_REF)) != elan3mmu_kernel_invalid_pte(ptbl->ptbl_elan3mmu) : \ -+ ELAN3_PTE_VALID(pte)) -+ -+void -+elan3mmu_expand (ELAN3MMU *elan3mmu, E3_Addr addr, int len, int level, int attr) -+{ -+ ELAN3_PTBL *ptbl; -+ sdramaddr_t pte; -+ spinlock_t *lock; -+ u_int span; -+ unsigned long flags; -+ -+ HAT_PRINTF3 (1, "elan3mmu_expand: elan3mmu %p %08x to %08x\n", elan3mmu, -+ addr, addr + len); -+ -+ for ( ; len != 0; addr += span, len -= span) -+ { -+ /* as we asked for level 3 we know its a pte */ -+ pte = elan3mmu_ptealloc (elan3mmu, addr, level, &ptbl, &lock, attr, &flags); -+ -+ switch (level) -+ { -+ case PTBL_LEVEL_3: -+ span = MIN(len, ELAN3_L3_PTSIZE - ((E3_Addr) addr & ELAN3_L3_PTOFFSET)); -+ break; -+ case PTBL_LEVEL_2: -+ span = MIN(len, ELAN3_L2_PTSIZE - ((E3_Addr) addr & ELAN3_L2_PTOFFSET)); -+ break; -+ default: -+ span = len; -+ break; -+ } -+ -+ if (pte != (sdramaddr_t) 0) -+ elan3mmu_unlock_ptbl (ptbl, lock, flags); -+ } -+} -+ -+void -+elan3mmu_reserve (ELAN3MMU *elan3mmu, E3_Addr addr, u_int npages, sdramaddr_t *ptes) -+{ -+ ELAN3_PTBL *ptbl; -+ sdramaddr_t pte; -+ spinlock_t *lock; -+ u_int span; -+ int len; -+ int i; -+ unsigned long flags; -+ -+ HAT_PRINTF3 (1, "elan3mmu_reserve: elan3mmu %p %08x to %08x\n", elan3mmu, -+ addr, addr + (npages << ELAN3_PAGE_SHIFT)); -+ -+ for (len = (npages << ELAN3_PAGE_SHIFT); len != 0; addr += span, len -= span) -+ { -+ /* as we asked for level 3 we know its a pte */ -+ pte = elan3mmu_ptealloc (elan3mmu, addr, 3, &ptbl, &lock, 0, &flags); -+ -+ span = MIN(len, ELAN3_L3_PTSIZE - ((E3_Addr) addr & ELAN3_L3_PTOFFSET)); -+ -+ if (ptes != NULL) -+ { -+ for (i = 0; i < span; i += ELAN3_PAGE_SIZE, pte += ELAN3_PTE_SIZE) -+ *ptes++ = pte; -+ ptbl->ptbl_valid += (span >> ELAN3_PAGE_SHIFT); -+ -+ HAT_PRINTF4 (2, "elan3mmu_reserve: inc valid for level %d ptbl %p to %d (%d)\n", -+ PTBL_LEVEL(ptbl->ptbl_flags), ptbl, ptbl->ptbl_valid, (span >> ELAN3_PAGE_SHIFT)); -+ -+ } -+ -+ elan3mmu_unlock_ptbl (ptbl, lock, flags); -+ } -+} -+ -+void -+elan3mmu_release (ELAN3MMU *elan3mmu, E3_Addr addr, u_int npages, sdramaddr_t *ptes) -+{ -+ ELAN3_DEV *dev = elan3mmu->elan3mmu_dev; -+ ELAN3_PTBL *ptbl; -+ sdramaddr_t pte; -+ ELAN3_PTE tpte; -+ spinlock_t *lock; -+ u_int span; -+ int len; -+ int i; -+ int level; -+ unsigned long flags; -+ -+ HAT_PRINTF3 (1, "elan3mmu_release: elan3mmu %p %08x to %08x\n", elan3mmu, -+ addr, addr + (npages << ELAN3_PAGE_SHIFT)); -+ -+ if (ptes == NULL) -+ return; -+ -+ tpte = elan3mmu_kernel_invalid_pte (elan3mmu); -+ -+ for (len = (npages << ELAN3_PAGE_SHIFT); len != 0; addr += span, len -= span) -+ { -+ /* as we asked for level 3 we know its a pte */ -+ pte = elan3mmu_ptefind(elan3mmu, addr, &level, &ptbl, &lock, &flags); -+ ASSERT (level == PTBL_LEVEL_3); -+ -+ span = MIN(len, ELAN3_L3_PTSIZE - ((E3_Addr) addr & ELAN3_L3_PTOFFSET)); -+ -+ -+ for (i = 0 ; i < span; i += ELAN3_PAGE_SIZE, pte += ELAN3_PTE_SIZE) -+ elan3_writepte (dev, pte, tpte); -+ ptbl->ptbl_valid -= (span >> ELAN3_PAGE_SHIFT); -+ -+ HAT_PRINTF3 (2, "elan3mmu_release: inc valid for level %d ptbl %p to %d\n", -+ PTBL_LEVEL(ptbl->ptbl_flags), ptbl, ptbl->ptbl_valid); -+ -+ elan3mmu_unlock_ptbl (ptbl, lock, flags); -+ } -+ ElanFlushTlb (elan3mmu->elan3mmu_dev); -+} -+ -+void -+elan3mmu_pteload (ELAN3MMU *elan3mmu, int level, E3_Addr addr, physaddr_t paddr, int perm, int attr) -+ -+{ -+ ELAN3_DEV *dev; -+ ELAN3_PTBL *ptbl; -+ spinlock_t *lock; -+ unsigned long flags; -+ ELAN3_PTE newpte; -+ ELAN3_PTE oldpte; -+ sdramaddr_t pte; -+ -+ ASSERT((level == PTBL_LEVEL_2) || (level == PTBL_LEVEL_3)); -+ -+ /* Generate the new pte which we're going to load */ -+ dev = elan3mmu->elan3mmu_dev; -+ -+ newpte = elan3mmu_phys_to_pte (dev, paddr, perm); -+ -+ if (attr & PTE_LOAD_BIG_ENDIAN) -+ newpte |= ELAN3_PTE_BIG_ENDIAN; -+ -+ HAT_PRINTF4 (1, "elan3mmu_pteload: elan3mmu %p level %d addr %x pte %llx\n", elan3mmu, level, addr, (long long) newpte); -+ HAT_PRINTF5 (1, "elan3mmu_pteload:%s%s%s perm=%d phys=%llx\n", -+ (newpte & ELAN3_PTE_LOCAL) ? " local" : "", -+ (newpte & ELAN3_PTE_64_BIT) ? " 64 bit" : "", -+ (newpte & ELAN3_PTE_BIG_ENDIAN) ? " big-endian" : " little-endian", -+ (u_int) (newpte & ELAN3_PTE_PERM_MASK) >> ELAN3_PTE_PERM_SHIFT, -+ (unsigned long long) (newpte & ELAN3_PTE_PFN_MASK)); -+ -+ if (level == PTBL_LEVEL_3) -+ pte = elan3mmu_ptealloc (elan3mmu, addr, level, &ptbl, &lock, attr, &flags); -+ else -+ { -+ sdramaddr_t ptp = elan3mmu_ptealloc (elan3mmu, addr, level, &ptbl, &lock, attr, &flags); -+ -+ pte = elan3mmu_ptp2pte (elan3mmu, ptp, level); -+ -+ HAT_PRINTF3 (2, "elan3mmu_pteload: level %d ptp at %lx => pte at %lx\n", level, ptp, pte); -+ } -+ -+ if (pte == (sdramaddr_t) 0) -+ { -+ ASSERT (level == PTBL_LEVEL_3 && (attr & (PTE_NO_SLEEP | PTE_NO_STEAL)) == (PTE_NO_SLEEP | PTE_NO_STEAL)); -+ return; -+ } -+ -+ ASSERT (ptbl->ptbl_elan3mmu == elan3mmu); -+ ASSERT (PTBL_LEVEL(ptbl->ptbl_flags) == level); -+ ASSERT (PTBL_IS_LOCKED (ptbl->ptbl_flags)); -+ -+ oldpte = elan3_readpte (dev, pte); -+ -+ HAT_PRINTF3 (2, "elan3mmu_pteload: modify pte at %lx from %llx to %llx\n", pte, (long long) oldpte, (long long) newpte); -+ -+ if (ELAN3_PTE_IS_VALID(ptbl, oldpte)) -+ { -+ ELAN3MMU_STAT(ptereload); -+ -+ ASSERT ((newpte & ~((E3_uint64)ELAN3_PTE_PERM_MASK | ELAN3_RM_MASK)) == (oldpte & ~((E3_uint64)ELAN3_PTE_PERM_MASK | ELAN3_RM_MASK))); -+ -+ if ((newpte & ~ELAN3_RM_MASK) != (oldpte & ~ELAN3_RM_MASK)) -+ { -+ /* We're modifying a valid translation, it must be mapping the same page */ -+ /* so we use elan3_modifypte to not affect the referenced and modified bits */ -+ elan3_modifypte (dev, pte, newpte); -+ -+ -+ ElanFlushTlb (elan3mmu->elan3mmu_dev); -+ } -+ } -+ else -+ { -+ ELAN3MMU_STAT(pteload); -+ -+ ptbl->ptbl_valid++; -+ -+ HAT_PRINTF3 (2, "elan3mmu_pteload: inc valid for level %d ptbl %p to %d\n", -+ PTBL_LEVEL(ptbl->ptbl_flags), ptbl, ptbl->ptbl_valid); -+ -+ HAT_PRINTF2 (2, "elan3mmu_pteload: write pte %lx to %llx\n", pte, (long long) newpte); -+ -+ elan3_writepte (dev, pte, newpte); -+ -+ if (ptbl->ptbl_flags & PTBL_KERNEL) -+ ElanFlushTlb (elan3mmu->elan3mmu_dev); -+ -+ } -+ -+ elan3mmu_unlock_ptbl (ptbl, lock, flags); -+} -+ -+void -+elan3mmu_unload (ELAN3MMU *elan3mmu, E3_Addr addr, u_int len, int attr) -+{ -+ ELAN3_PTBL *ptbl; -+ sdramaddr_t ptp; -+ spinlock_t *lock; -+ int level; -+ u_int span; -+ unsigned long flags; -+ -+ HAT_PRINTF3(1, "elan3mmu_unload (elan3mmu %p addr %x -> %x)\n", elan3mmu, addr, addr+len-1); -+ -+ for (; len != 0; addr += span, len -= span) -+ { -+ ptp = elan3mmu_ptefind(elan3mmu, addr, &level, &ptbl, &lock, &flags); -+ -+ span = MIN(len, ELAN3_L3_PTSIZE - ((E3_Addr) addr & ELAN3_L3_PTOFFSET)); -+ -+ if (ptp != (sdramaddr_t) 0) -+ { -+ HAT_PRINTF2 (2, "elan3mmu_unload: unload [%x,%x]\n", addr, addr + span); -+ -+ if ( level == PTBL_LEVEL_3 ) -+ elan3mmu_unload_loop (elan3mmu, ptbl, ptp - PTBL_TO_PTADDR(ptbl), span >> ELAN3_PAGE_SHIFT, attr); -+ else -+ { -+ ELAN3_PTP invalidptp = ELAN3_INVALID_PTP; -+ ELAN3_DEV *dev = elan3mmu->elan3mmu_dev; -+ ELAN3_PTBL *lXptbl; -+ ELAN3_PTP tptp; -+ int idx; -+ -+ tptp = elan3_readptp (elan3mmu->elan3mmu_dev, ptp); -+ -+ ASSERT (ELAN3_PTP_TYPE(tptp) == ELAN3_ET_PTE); -+ -+ lXptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tptp); -+ idx = (PTP_TO_PT_PADDR(tptp) - PTBL_TO_PTADDR(lXptbl))/ELAN3_PTE_SIZE; -+ -+ if ( level == PTBL_LEVEL_1) -+ span = MIN(len, ELAN3_L2_PTSIZE - ((E3_Addr) addr & ELAN3_L2_PTOFFSET)); -+ else -+ span = MIN(len, ELAN3_L3_PTSIZE - ((E3_Addr) addr & ELAN3_L3_PTOFFSET)); -+ -+ /* invalidate the ptp. */ -+ elan3_writeptp (dev, ptp, invalidptp); -+ if (! (attr & PTE_UNLOAD_NOFLUSH)) -+ ElanFlushTlb (dev); -+ -+ elan3mmu_free_pte ( dev, elan3mmu, lXptbl, idx); -+ -+ ptbl->ptbl_valid--; -+ -+ HAT_PRINTF3 (2, "elan3mmu_unload: dec valid for level %d ptbl %p to %d\n", -+ PTBL_LEVEL(ptbl->ptbl_flags), ptbl, ptbl->ptbl_valid); -+ -+ } -+ elan3mmu_unlock_ptbl (ptbl, lock, flags); -+ } -+ } -+} -+ -+static void -+elan3mmu_unload_loop (ELAN3MMU *elan3mmu, ELAN3_PTBL *ptbl, int first_valid, int nptes, int flags) -+{ -+ ELAN3_DEV *dev = elan3mmu->elan3mmu_dev; -+ sdramaddr_t pte; -+ ELAN3_PTE tpte; -+ int last_valid = first_valid + nptes; -+ int i; -+ -+ HAT_PRINTF3 (1, "elan3mmu_unloadloop: ptbl %p entries [%d->%d]\n", ptbl, first_valid, last_valid); -+ -+ ASSERT (PTBL_IS_LOCKED (ptbl->ptbl_flags)); -+ ASSERT (PTBL_LEVEL(ptbl->ptbl_flags) == PTBL_LEVEL_3); -+ -+ pte = PTBL_TO_PTADDR(ptbl) + first_valid; -+ -+ for (i = first_valid; i < last_valid; i++, pte += ELAN3_PTE_SIZE) -+ { -+ if (ptbl->ptbl_valid == 0) -+ break; -+ -+ tpte = elan3_readpte (dev, pte); -+ if (! ELAN3_PTE_IS_VALID(ptbl, tpte)) -+ continue; -+ -+ elan3mmu_pteunload (ptbl, pte, flags, NO_MLIST_LOCK); -+ } -+} -+ -+void -+elan3mmu_pteunload (ELAN3_PTBL *ptbl, sdramaddr_t pte, int flags, int got_mlist_lock) -+{ -+ ELAN3_DEV *dev = ptbl->ptbl_elan3mmu->elan3mmu_dev; -+ ELAN3_PTE tpte; -+ -+ ASSERT (PTBL_LEVEL (ptbl->ptbl_flags) == PTBL_LEVEL_3); -+ ASSERT (PTBL_IS_LOCKED (ptbl->ptbl_flags)); -+ -+ HAT_PRINTF2 (1, "elan3mmu_pteunload: ptbl %p pte %lx\n", ptbl, pte); -+ -+ ELAN3MMU_STAT (pteunload); -+ -+ elan3_invalidatepte (dev, pte); -+ -+ if (! (flags & PTE_UNLOAD_NOFLUSH)) -+ ElanFlushTlb (dev); -+ -+ tpte = ELAN3_INVALID_PTE; -+ elan3_writepte (dev, pte, tpte); -+ -+ if (ptbl->ptbl_flags & PTBL_KERNEL) -+ { -+ tpte = elan3mmu_kernel_invalid_pte(ptbl->ptbl_elan3mmu); -+ -+ elan3_writepte (dev, pte, tpte); -+ } -+ -+ ptbl->ptbl_valid--; -+ -+ HAT_PRINTF3 (2, "elan3mmu_pteunload: dec valid for level %d ptbl %p to %d\n", -+ PTBL_LEVEL(ptbl->ptbl_flags), ptbl, ptbl->ptbl_valid); -+ -+} -+ -+void -+elan3mmu_ptesync (ELAN3_PTBL *ptbl, sdramaddr_t pte, int flags, int got_mlist_lock) -+{ -+ -+} -+ -+/* -+ * Create more page tables at a given level for this Elan. -+ */ -+static ELAN3_PTBL * -+elan3mmu_create_ptbls (ELAN3_DEV *dev, int level, int attr, int keep) -+{ -+ sdramaddr_t pts; -+ ELAN3_PTBL *ptbl; -+ ELAN3_PTBL *first; -+ ELAN3_PTBL *last; -+ ELAN3_PTBL_GR *ptg; -+ register int i; -+ register int inc; -+ -+ HAT_PRINTF1 (2, "elan3mmu_create_ptbls: create level %d ptbls\n", level); -+ -+ pts = elan3_sdram_alloc (dev, PTBL_GROUP_SIZE); -+ if (pts == (sdramaddr_t) 0) -+ { -+ HAT_PRINTF0 (2, "elan3mmu_create_ptbls: cannot map elan pages\n"); -+ -+ ELAN3MMU_STAT (create_ptbl_failed); -+ return (NULL); -+ } -+ -+ HAT_PRINTF1 (2, "elan3mmu_create_ptbls: pts at %lx\n", pts); -+ -+ ALLOC_PTBL_GR (ptg, !(attr & PTE_NO_SLEEP)); /* Allocate the group of page tables */ -+ if (ptg == NULL) /* for this page */ -+ { -+ HAT_PRINTF0 (2, "elan3mmu_create_ptbls: cannot allocate page table group\n"); -+ -+ elan3_sdram_free (dev, pts, PTBL_GROUP_SIZE); -+ -+ ELAN3MMU_STAT (create_ptbl_failed); -+ return (NULL); -+ } -+ -+ HAT_PRINTF1 (2, "elan3mmu_create_ptbls: ptg is %p\n", ptg); -+ -+ ElanSetPtblGr (dev, pts, ptg); -+ -+ HAT_PRINTF4 (2, "elan3mmu_create_ptbls: zeroing %d bytes at %lx, %d bytes at %p\n", -+ PTBL_GROUP_SIZE, pts, (int) sizeof (ELAN3_PTBL_GR), ptg); -+ -+#ifndef zero_all_ptbls -+ elan3_sdram_zeroq_sdram (dev, pts, PTBL_GROUP_SIZE); /* Ensure that all PTEs/PTPs are invalid */ -+#endif -+ bzero ((caddr_t) ptg, sizeof (ELAN3_PTBL_GR)); -+ -+ ptg->pg_addr = pts; -+ ptg->pg_level = level; -+ -+ ptbl = ptg->pg_ptbls; /* Initialise the index in all page tables */ -+ for (i = 0; i < PTBLS_PER_GROUP_MAX; i++) -+ { -+ ptbl->ptbl_index = (u_char) i; -+ ptbl->ptbl_next = (ELAN3_PTBL *) 0xdeaddead; -+ ptbl++; -+ } -+ -+ switch (level) /* Determine the number of ptbls we can */ -+ { /* allocate from this page, by jumping */ -+ case PTBL_LEVEL_X: inc = PTBLS_PER_PTBL_LX; break; /* multiples of the smallest. */ -+ case PTBL_LEVEL_1: inc = PTBLS_PER_PTBL_L1; break; -+ case PTBL_LEVEL_2: inc = PTBLS_PER_PTBL_L2; break; -+ case PTBL_LEVEL_3: inc = PTBLS_PER_PTBL_L3; break; -+ default: inc = PTBLS_PER_PTBL_L3; break; -+ } -+ -+ ptbl = ptg->pg_ptbls; /* Chain them together */ -+ for (i = 0; i < PTBLS_PER_GROUP_MAX; i += inc, ptbl += inc) -+ ptbl->ptbl_next = ptbl + inc; -+ -+ first = ptg->pg_ptbls; /* Determine list of */ -+ last = first + PTBLS_PER_GROUP_MAX - inc; /* ptbls to add to free list */ -+ if (! keep) -+ ptbl = NULL; -+ else -+ { -+ ptbl = first; -+ first = first->ptbl_next; -+ } -+ -+ spin_lock (&dev->Level[level].PtblLock); -+ dev->Level[level].PtblTotal += PTBLS_PER_GROUP_MAX/inc; /* Increment the counts */ -+ dev->Level[level].PtblFreeCount += PTBLS_PER_GROUP_MAX/inc; -+ -+ ELAN3MMU_SET_STAT (num_ptbl_level[level], dev->Level[level].PtblTotal); -+ -+ if (keep) -+ dev->Level[level].PtblFreeCount--; -+ -+ last->ptbl_next = dev->Level[level].PtblFreeList; /* And add to free list */ -+ dev->Level[level].PtblFreeList = first; -+ spin_unlock (&dev->Level[level].PtblLock); -+ -+ spin_lock (&dev->PtblGroupLock); -+ ptg->pg_next = dev->Level[level].PtblGroupList; -+ dev->Level[level].PtblGroupList = ptg; -+ spin_unlock (&dev->PtblGroupLock); -+ -+ HAT_PRINTF1 (2, "elan3mmu_create_ptbls: returning ptbl %p\n", ptbl); -+ -+ return (ptbl); -+} -+ -+static ELAN3_PTBL * -+elan3mmu_ta_to_ptbl (ELAN3MMU *elan3mmu, ELAN3_PTP *ptp) -+{ -+ E3_Addr ptpa = PTP_TO_PT_PADDR(*ptp); -+ ELAN3_PTBL_GR *pg = ElanGetPtblGr (elan3mmu->elan3mmu_dev, (sdramaddr_t)ptpa & ~(PTBL_GROUP_SIZE-1)); -+ -+ return (pg->pg_ptbls + ((ptpa - pg->pg_addr) >> ELAN3_PT_SHIFT)); -+} -+ -+static ELAN3_PTBL * -+elan3mmu_alloc_lXptbl (ELAN3_DEV *dev, int attr, ELAN3MMU *elan3mmu) -+{ -+ ELAN3_PTBL *ptbl = NULL; -+ -+ spin_lock (&dev->Level[PTBL_LEVEL_X].PtblLock); -+ if (dev->Level[PTBL_LEVEL_X].PtblFreeList) -+ { -+ ptbl = dev->Level[PTBL_LEVEL_X].PtblFreeList; -+ -+ HAT_PRINTF1 (2, "elan3mmu_alloc_lXptbl: found ptbl %p on free list\n", ptbl); -+ -+ dev->Level[PTBL_LEVEL_X].PtblFreeList = ptbl->ptbl_next; -+ dev->Level[PTBL_LEVEL_X].PtblFreeCount--; -+ } -+ spin_unlock (&dev->Level[PTBL_LEVEL_X].PtblLock); -+ -+ if (ptbl == NULL) -+ { -+ ptbl = elan3mmu_create_ptbls (dev, PTBL_LEVEL_X, attr, 1); -+ -+ HAT_PRINTF1 (2, "elan3mmu_alloc_lXptbl: created level X ptbl %p\n", ptbl); -+ } -+ -+ if (ptbl == NULL) -+ { -+ if ((attr & PTE_NO_STEAL)) -+ { -+ HAT_PRINTF0 (2, "elan3mmu_alloc_lXptbl: not allowed to steal ptbl for use at level 2\n"); -+ return NULL; -+ } -+ -+ ELAN3MMU_STAT(lX_alloc_l3); -+ -+ ptbl = elan3mmu_steal_l3ptbl (dev, attr); -+ -+ HAT_PRINTF1 (2, "elan3mmu_alloc_lXptbl: stolen level3 ptbl %p used as level 2\n", ptbl); -+ } -+ -+ ptbl->ptbl_elan3mmu = elan3mmu; -+ ptbl->ptbl_base = 0; -+ ptbl->ptbl_parent = 0; -+ ptbl->ptbl_flags = PTBL_LEVEL_X | PTBL_ALLOCED; -+ -+ HAT_PRINTF2 (2, "elan3mmu_alloc_lXptbl: ptbl %p dev %p\n", ptbl, dev); -+ -+#ifdef zero_all_ptbls -+ elan3_sdram_zero_sdarm (dev, PTBL_TO_PTADDR(ptbl), ELAN3_LX_ENTRIES*ELAN3_PTE_SIZE); -+#endif -+ -+ return (ptbl); -+} -+ -+static ELAN3_PTBL * -+elan3mmu_alloc_pte (ELAN3_DEV *dev, ELAN3MMU *elan3mmu, int *idx) -+{ -+ ELAN3_PTBL * ptbl_ptr; -+ int index; -+ -+ /* lock whilst looking for space */ -+ spin_lock (&elan3mmu->elan3mmu_lXptbl_lock); -+ -+ /* walk the lXptbl list */ -+ ptbl_ptr = elan3mmu->elan3mmu_lXptbl; -+ while ( ptbl_ptr != NULL ) -+ { -+ /* does this ptlb have any free ones */ -+ if ( (index = ptbl_ptr->ptbl_valid) < ELAN3_LX_ENTRIES) -+ { -+ /* better to search from valid count as its likly to be free */ -+ index = ptbl_ptr->ptbl_valid; -+ do { -+ if ((ptbl_ptr->ptbl_base & (1 << index)) == 0) -+ goto found; -+ -+ /* move index on and wrap back to start if needed */ -+ if ((++index) == ELAN3_LX_ENTRIES) -+ index = 0; -+ } while (index != ptbl_ptr->ptbl_valid); -+ -+ panic ("elan3mmu_alloc_pte: has ptbl valid < 32 when but no free pte's"); -+ } -+ ptbl_ptr = ptbl_ptr->ptbl_parent; -+ } -+ -+ /* unlock so we can create space */ -+ spin_unlock (&elan3mmu->elan3mmu_lXptbl_lock); -+ -+ /* if create some more */ -+ ptbl_ptr = elan3mmu_alloc_lXptbl(dev, 0, elan3mmu); -+ -+ /* get the lock again */ -+ spin_lock (&elan3mmu->elan3mmu_lXptbl_lock); -+ -+ /* add to front of list as its obviously got free ones on it */ -+ ptbl_ptr->ptbl_parent = elan3mmu->elan3mmu_lXptbl; -+ elan3mmu->elan3mmu_lXptbl = ptbl_ptr; -+ -+ /* grap the first one */ -+ index = 0; -+ -+ found: -+ ptbl_ptr->ptbl_base |= (1 << index); -+ ptbl_ptr->ptbl_valid++; -+ -+ HAT_PRINTF3 (2, "elan3mmu_alloc_pte: inc valid for level %d ptbl %p to %d\n", -+ PTBL_LEVEL(ptbl_ptr->ptbl_flags), ptbl_ptr, ptbl_ptr->ptbl_valid); -+ -+ /* release the loc and return it */ -+ spin_unlock (&elan3mmu->elan3mmu_lXptbl_lock); -+ -+ *idx = index; -+ return (ptbl_ptr); -+} -+ -+static ELAN3_PTBL * -+elan3mmu_alloc_l1ptbl (ELAN3_DEV *dev, int attr, ELAN3MMU *elan3mmu) -+{ -+ ELAN3_PTBL *ptbl = NULL; -+ ELAN3_PTBL *p; -+ int i,j; -+ -+ spin_lock (&dev->Level[PTBL_LEVEL_1].PtblLock); -+ if (dev->Level[PTBL_LEVEL_1].PtblFreeList) -+ { -+ ptbl = dev->Level[PTBL_LEVEL_1].PtblFreeList; -+ dev->Level[PTBL_LEVEL_1].PtblFreeList = ptbl->ptbl_next; -+ dev->Level[PTBL_LEVEL_1].PtblFreeCount--; -+ } -+ spin_unlock (&dev->Level[PTBL_LEVEL_1].PtblLock); -+ -+ if (ptbl == NULL) -+ ptbl = elan3mmu_create_ptbls (dev, PTBL_LEVEL_1, attr, 1); -+ -+ if (ptbl == NULL) -+ panic ("elan3mmu_alloc_l1ptbl: cannot alloc ptbl"); -+ -+ for (p = ptbl, j = i = 0; i < PTBLS_PER_PTBL_L1; i++, p++) -+ { -+ p->ptbl_elan3mmu = elan3mmu; -+ p->ptbl_base = VA2BASE (j); -+ p->ptbl_flags = PTBL_LEVEL_1 | PTBL_GROUPED; -+ p->ptbl_parent = NULL; -+ -+ j += L1_VA_PER_PTBL; -+ } -+ -+ /* Now mark the real page table as allocated */ -+ /* level 1 ptbls are returned unlocked */ -+ ptbl->ptbl_flags = PTBL_LEVEL_1 | PTBL_ALLOCED; -+ -+ HAT_PRINTF2 (2, "elan3mmu_alloc_l1ptbl: ptbl %p dev %p\n", ptbl, dev); -+ -+#ifdef zero_all_ptbls -+ elan3_sdram_zeroq_sdram (dev, PTBL_TO_PTADDR(ptbl), ELAN3_L1_ENTRIES*ELAN3_PTP_SIZE); -+#endif -+ -+ return (ptbl); -+} -+ -+static ELAN3_PTBL * -+elan3mmu_alloc_l2ptbl (ELAN3_DEV *dev, int attr, ELAN3_PTBL *parent, ELAN3MMU *elan3mmu, E3_Addr base, spinlock_t **plock, unsigned long *flags) -+{ -+ ELAN3_PTBL *ptbl = NULL; -+ ELAN3_PTBL *p; -+ int i; -+ int j; -+ unsigned long ptbl_flags; -+ -+ spin_lock_irqsave (&dev->Level[PTBL_LEVEL_2].PtblLock, ptbl_flags); -+ if (dev->Level[PTBL_LEVEL_2].PtblFreeList) -+ { -+ ptbl = dev->Level[PTBL_LEVEL_2].PtblFreeList; -+ -+ HAT_PRINTF1 (2, "elan3mmu_alloc_l2ptbl: found ptbl %p on free list\n", ptbl); -+ -+ dev->Level[PTBL_LEVEL_2].PtblFreeList = ptbl->ptbl_next; -+ dev->Level[PTBL_LEVEL_2].PtblFreeCount--; -+ } -+ spin_unlock_irqrestore (&dev->Level[PTBL_LEVEL_2].PtblLock, ptbl_flags); -+ -+ if (ptbl == NULL) -+ { -+ ptbl = elan3mmu_create_ptbls (dev, PTBL_LEVEL_2, attr, 1); -+ -+ HAT_PRINTF1 (2, "elan3mmu_alloc_l2ptbl: created level 2 ptbl %p\n", ptbl); -+ } -+ -+ if (ptbl == NULL) -+ { -+ if ((attr & PTE_NO_STEAL)) -+ { -+ HAT_PRINTF0 (2, "elan3mmu_alloc_l2ptbl: not allowted to steal ptbl for use at level 2\n"); -+ return (NULL); -+ } -+ -+ ELAN3MMU_STAT(l2_alloc_l3); -+ -+ ptbl = elan3mmu_steal_l3ptbl (dev, attr); -+ -+ HAT_PRINTF1 (2, "elan3mmu_alloc_l2ptbl: stolen level3 ptbl %p used as level 2\n", ptbl); -+ } -+ -+ *plock = elan3mmu_ptbl_to_lock (PTBL_LEVEL_2, ptbl); -+ spin_lock_irqsave (*plock, *flags); -+ -+ for (p = ptbl, j = i = 0; i < PTBLS_PER_PTBL_L2; i++, p++) -+ { -+ p->ptbl_elan3mmu = elan3mmu; -+ p->ptbl_base = VA2BASE (base + j); -+ p->ptbl_flags = PTBL_LEVEL_2 | PTBL_GROUPED; -+ p->ptbl_parent = parent; -+ -+ j += L2_VA_PER_PTBL; -+ } -+ -+ ptbl->ptbl_flags = PTBL_LEVEL_2 | PTBL_ALLOCED | PTBL_LOCKED; -+ -+ HAT_PRINTF3 (2, "elan3mmu_alloc_l2ptbl: ptbl %p dev %p base %x\n", ptbl, dev, base); -+ -+#ifdef zero_all_ptbls -+ elan3_sdram_zero_sdarm (dev, PTBL_TO_PTADDR(ptbl), ELAN3_L2_ENTRIES*ELAN3_PTP_SIZE); -+#endif -+ -+ return (ptbl); -+} -+ -+static ELAN3_PTBL * -+elan3mmu_alloc_l3ptbl (ELAN3_DEV *dev, int attr, ELAN3_PTBL *parent, ELAN3MMU *elan3mmu, E3_Addr base, spinlock_t **plock, unsigned long *flags) -+{ -+ ELAN3_PTBL *ptbl = NULL; -+ ELAN3_PTBL *p; -+ int i; -+ int j; -+ unsigned long ptbl_flags; -+ -+ spin_lock_irqsave (&dev->Level[PTBL_LEVEL_3].PtblLock, ptbl_flags); -+ if (dev->Level[PTBL_LEVEL_3].PtblFreeList) -+ { -+ HAT_PRINTF1 (2, "elan3mmu_alloc_l3ptbl: found ptbl %p on free list\n", ptbl); -+ -+ ptbl = dev->Level[PTBL_LEVEL_3].PtblFreeList; -+ dev->Level[PTBL_LEVEL_3].PtblFreeList = ptbl->ptbl_next; -+ dev->Level[PTBL_LEVEL_3].PtblFreeCount--; -+ } -+ spin_unlock_irqrestore (&dev->Level[PTBL_LEVEL_3].PtblLock, ptbl_flags); -+ -+ if (ptbl == NULL) -+ { -+ ptbl = elan3mmu_create_ptbls (dev, PTBL_LEVEL_3, attr, 1); -+ -+ HAT_PRINTF1 (2, "elan3mmu_alloc_l3ptbl: created level 3 ptbl %p\n", ptbl); -+ } -+ -+ if (ptbl == NULL) -+ { -+ if ((attr & PTE_NO_STEAL)) -+ { -+ HAT_PRINTF0 (2, "elan3mmu_alloc_l3ptbl: not allowed to steal ptbl for use at level 3\n"); -+ return (NULL); -+ } -+ -+ ptbl = elan3mmu_steal_l3ptbl (dev, attr); -+ -+ HAT_PRINTF1 (2, "elan3mmu_alloc_l3ptbl: stolen level3 ptbl %p\n", ptbl); -+ } -+ -+ *plock = elan3mmu_ptbl_to_lock (PTBL_LEVEL_3, ptbl); -+ spin_lock_irqsave (*plock,*flags); -+ -+ for (p = ptbl, j = i = 0; i < PTBLS_PER_PTBL_L3; i++, p++) -+ { -+ p->ptbl_elan3mmu = elan3mmu; -+ p->ptbl_base = VA2BASE (base + j); -+ p->ptbl_flags = PTBL_LEVEL_3 | PTBL_GROUPED; -+ p->ptbl_parent = parent; -+ -+ j += L3_VA_PER_PTBL; -+ } -+ -+ ptbl->ptbl_flags = PTBL_LEVEL_3 | PTBL_ALLOCED | PTBL_LOCKED; -+ -+ HAT_PRINTF3 (2, "elan3mmu_alloc_l3ptbl: ptbl %p dev %p base %x\n", ptbl, dev, base); -+ -+#ifdef zero_all_ptbls -+ elan3_sdram_zeroq_sdram (dev, PTBL_TO_PTADDR(ptbl), ELAN3_L3_ENTRIES*ELAN3_PTE_SIZE); -+#endif -+ -+ return (ptbl); -+} -+ -+void -+elan3mmu_free_pte (ELAN3_DEV *dev, ELAN3MMU *elan3mmu, ELAN3_PTBL *ptbl_ptr, int idx) -+{ -+ sdramaddr_t pte = PTBL_TO_PTADDR (ptbl_ptr) | (idx * sizeof (ELAN3_PTE)); -+ ELAN3_PTE tpte = ELAN3_INVALID_PTE; -+ ELAN3_PTBL *prev; -+ -+ /* ensure that the pte is invalid when free */ -+ elan3_writepte (dev, pte, tpte); -+ -+ /* lock whilst removing */ -+ spin_lock (&elan3mmu->elan3mmu_lXptbl_lock); -+ -+ HAT_PRINTF4 (2, "elan3mmu_free_pte idx %d ptbl_ptr %p ptbl_base %x ptbl_ptr->ptbl_valid %d \n", -+ idx, ptbl_ptr, ptbl_ptr->ptbl_base, ptbl_ptr->ptbl_valid); -+ /* make sure it was set */ -+ ASSERT ( ptbl_ptr->ptbl_base & (1 << idx) ); -+ ASSERT ( ptbl_ptr->ptbl_valid > 0 ); -+ -+ ptbl_ptr->ptbl_base &= ~(1 << idx); -+ ptbl_ptr->ptbl_valid--; -+ -+ HAT_PRINTF3 (2, "elan3mmu_free_pte: dec valid for level %d ptbl %p to %d\n", -+ PTBL_LEVEL(ptbl_ptr->ptbl_flags), ptbl_ptr, ptbl_ptr->ptbl_valid); -+ -+ /* was that the last one on this page */ -+ if ( ! ptbl_ptr->ptbl_valid ) -+ { -+ /* so no bits should be set then */ -+ ASSERT ( ptbl_ptr->ptbl_base == 0 ); -+ -+ /* is this the first page ?? */ -+ if ( elan3mmu->elan3mmu_lXptbl == ptbl_ptr ) -+ { -+ /* make the list start at the second element */ -+ elan3mmu->elan3mmu_lXptbl = ptbl_ptr->ptbl_parent; -+ -+ /* put ptbl back on free list */ -+ elan3mmu_free_lXptbl(dev, ptbl_ptr); -+ -+ /* unlock and return */ -+ spin_unlock (&elan3mmu->elan3mmu_lXptbl_lock); -+ return ; -+ } -+ -+ /* scan thro list looking for this page */ -+ prev = elan3mmu->elan3mmu_lXptbl; -+ while ( prev->ptbl_parent != NULL ) -+ { -+ if ( prev->ptbl_parent == ptbl_ptr ) /* its the next one */ -+ { -+ /* remove element from chain */ -+ prev->ptbl_parent = ptbl_ptr->ptbl_parent; -+ -+ /* put ptbl back on free list */ -+ elan3mmu_free_lXptbl(dev, ptbl_ptr); -+ -+ /* unlock and return */ -+ spin_unlock (&elan3mmu->elan3mmu_lXptbl_lock); -+ return ; -+ } -+ prev = prev->ptbl_parent; -+ } -+ -+ panic ("elan3mmu_free_pte: failed to find ptbl in chain"); -+ /* NOTREACHED */ -+ } -+ -+ spin_unlock (&elan3mmu->elan3mmu_lXptbl_lock); -+} -+ -+void -+elan3mmu_free_lXptbl (ELAN3_DEV *dev, ELAN3_PTBL *ptbl) -+{ -+ ELAN3_PTBL_GR *ptg; -+ -+ HAT_PRINTF2 (2, "elan3mmu_free_lXptbl: dev %p ptbl %p\n", dev, ptbl); -+ -+ ASSERT (ptbl->ptbl_flags & PTBL_ALLOCED); -+ ASSERT ((ptbl->ptbl_flags & PTBL_KEEP) == 0); -+ ASSERT (PTBL_LEVEL(ptbl->ptbl_flags) == PTBL_LEVEL_X); -+ ASSERT (ptbl->ptbl_valid == 0); -+ -+ ptbl->ptbl_flags = 0; -+ -+ ptg = PTBL_TO_GR(ptbl); -+ -+ if (ptg->pg_level == PTBL_LEVEL_3) -+ { -+ ELAN3MMU_STAT(lX_freed_l3); -+ -+ HAT_PRINTF1 (2, "elan3mmu_free_lXptbl: freeing stolen level 3 ptbl %p\n", ptbl); -+ -+ /* this was really a level 3 ptbl which we had to steal */ -+ spin_lock (&dev->Level[PTBL_LEVEL_3].PtblLock); -+ ptbl->ptbl_next = dev->Level[PTBL_LEVEL_3].PtblFreeList; -+ dev->Level[PTBL_LEVEL_3].PtblFreeList = ptbl; -+ dev->Level[PTBL_LEVEL_3].PtblFreeCount++; -+ spin_unlock (&dev->Level[PTBL_LEVEL_3].PtblLock); -+ } -+ else -+ { -+ spin_lock (&dev->Level[PTBL_LEVEL_X].PtblLock); -+ ptbl->ptbl_next = dev->Level[PTBL_LEVEL_X].PtblFreeList; -+ dev->Level[PTBL_LEVEL_X].PtblFreeList = ptbl; -+ dev->Level[PTBL_LEVEL_X].PtblFreeCount++; -+ spin_unlock (&dev->Level[PTBL_LEVEL_X].PtblLock); -+ } -+} -+ -+void -+elan3mmu_free_l1ptbl (ELAN3_DEV *dev, ELAN3_PTBL *ptbl, spinlock_t *lock, unsigned long flags) -+{ -+ HAT_PRINTF3 (2, "elan3mmu_free_l1ptbl: dev %p ptbl %p ptbl->ptbl_valid %x \n", dev, ptbl, ptbl->ptbl_valid); -+ -+ ASSERT (ptbl->ptbl_flags & PTBL_ALLOCED); -+ ASSERT ((ptbl->ptbl_flags & PTBL_KEEP) == 0); -+ ASSERT (PTBL_LEVEL(ptbl->ptbl_flags) == PTBL_LEVEL_1); -+ ASSERT (ptbl->ptbl_valid == 0); -+ -+ HAT_PRINTF2 (2, "elan3mmu_free_l1ptbl: dev %p ptbl %p\n", dev, ptbl); -+ -+ ptbl->ptbl_flags = 0; -+ spin_unlock (lock); -+ -+ spin_lock (&dev->Level[PTBL_LEVEL_1].PtblLock); -+ ptbl->ptbl_next = dev->Level[PTBL_LEVEL_1].PtblFreeList; -+ dev->Level[PTBL_LEVEL_1].PtblFreeList = ptbl; -+ dev->Level[PTBL_LEVEL_1].PtblFreeCount++; -+ spin_unlock (&dev->Level[PTBL_LEVEL_1].PtblLock); -+ -+ local_irq_restore (flags); -+} -+ -+void -+elan3mmu_free_l2ptbl (ELAN3_DEV *dev, ELAN3_PTBL *ptbl, spinlock_t *lock, unsigned long flags) -+{ -+ ELAN3_PTBL_GR *ptg; -+ -+ HAT_PRINTF2 (2, "elan3mmu_free_l2ptbl: dev %p ptbl %p\n", dev, ptbl); -+ -+ ASSERT (PTBL_IS_LOCKED(ptbl->ptbl_flags)); -+ ASSERT (ptbl->ptbl_flags & PTBL_ALLOCED); -+ ASSERT ((ptbl->ptbl_flags & PTBL_KEEP) == 0); -+ ASSERT (PTBL_LEVEL(ptbl->ptbl_flags) == PTBL_LEVEL_2); -+ ASSERT (ptbl->ptbl_valid == 0); -+ -+ ptbl->ptbl_flags = 0; -+ spin_unlock (lock); -+ -+ ptg = PTBL_TO_GR(ptbl); -+ -+ if (ptg->pg_level == PTBL_LEVEL_3) -+ { -+ ELAN3MMU_STAT(l2_freed_l3); -+ -+ HAT_PRINTF1 (2, "elan3mmu_free_l2ptbl: freeing stolen level 3 ptbl %p\n", ptbl); -+ -+ /* this was really a level 3 ptbl which we had to steal */ -+ spin_lock (&dev->Level[PTBL_LEVEL_3].PtblLock); -+ ptbl->ptbl_next = dev->Level[PTBL_LEVEL_3].PtblFreeList; -+ dev->Level[PTBL_LEVEL_3].PtblFreeList = ptbl; -+ dev->Level[PTBL_LEVEL_3].PtblFreeCount++; -+ spin_unlock (&dev->Level[PTBL_LEVEL_3].PtblLock); -+ } -+ else -+ { -+ spin_lock (&dev->Level[PTBL_LEVEL_2].PtblLock); -+ ptbl->ptbl_next = dev->Level[PTBL_LEVEL_2].PtblFreeList; -+ dev->Level[PTBL_LEVEL_2].PtblFreeList = ptbl; -+ dev->Level[PTBL_LEVEL_2].PtblFreeCount++; -+ spin_unlock (&dev->Level[PTBL_LEVEL_2].PtblLock); -+ } -+ local_irq_restore (flags); -+} -+ -+void -+elan3mmu_free_l3ptbl (ELAN3_DEV *dev, ELAN3_PTBL *ptbl, spinlock_t *lock, unsigned long flags) -+{ -+ ASSERT (PTBL_IS_LOCKED(ptbl->ptbl_flags)); -+ ASSERT (ptbl->ptbl_flags & PTBL_ALLOCED); -+ ASSERT ((ptbl->ptbl_flags & PTBL_KEEP) == 0); -+ ASSERT (PTBL_LEVEL(ptbl->ptbl_flags) == PTBL_LEVEL_3); -+ ASSERT (ptbl->ptbl_valid == 0); -+ -+ HAT_PRINTF2 (2, "elan3mmu_free_l3ptbl: dev %p ptbl %p\n", dev, ptbl); -+ -+ if (ptbl->ptbl_flags & PTBL_KERNEL) /* if the ptbl has been used by the kernel */ -+ { /* then zero all the pte's, since they will */ -+ elan3_sdram_zeroq_sdram (dev, PTBL_TO_PTADDR(ptbl), ELAN3_L3_ENTRIES*ELAN3_PTE_SIZE); -+ } -+ -+ ptbl->ptbl_flags = 0; -+ spin_unlock (lock); -+ -+ spin_lock (&dev->Level[PTBL_LEVEL_3].PtblLock); -+ ptbl->ptbl_next = dev->Level[PTBL_LEVEL_3].PtblFreeList; -+ dev->Level[PTBL_LEVEL_3].PtblFreeList = ptbl; -+ dev->Level[PTBL_LEVEL_3].PtblFreeCount++; -+ spin_unlock (&dev->Level[PTBL_LEVEL_3].PtblLock); -+ -+ local_irq_restore (flags); -+} -+ -+void -+elan3mmu_kernel_l3ptbl (ELAN3_PTBL *ptbl) -+{ -+ ELAN3_DEV *dev = ptbl->ptbl_elan3mmu->elan3mmu_dev; -+ sdramaddr_t pte = PTBL_TO_PTADDR(ptbl); -+ ELAN3_PTE tpte = elan3mmu_kernel_invalid_pte(ptbl->ptbl_elan3mmu); -+ int i; -+ -+ ptbl->ptbl_flags |= PTBL_KERNEL; -+ for (i = 0; i < ELAN3_L3_ENTRIES; i++, pte += ELAN3_PTE_SIZE) -+ { -+ elan3_writepte (dev, pte, tpte); -+ } -+} -+ -+#define PTBL_CAN_STEAL(flag) (((flag) & (PTBL_KERNEL|PTBL_KEEP)) == 0 && (((flag) & PTBL_ALLOCED) && PTBL_LEVEL(flag) == PTBL_LEVEL_3)) -+#define PTBL_MAY_STEAL(flag) (((flag) & (PTBL_KERNEL|PTBL_KEEP|PTBL_LOCKED)) == 0 && (((flag) & PTBL_ALLOCED) && PTBL_LEVEL(flag) == PTBL_LEVEL_3)) -+ -+static int -+elan3mmu_steal_this_ptbl (ELAN3_DEV *dev, ELAN3_PTBL *l3ptbl) -+{ -+ ELAN3_PTBL *l2ptbl = l3ptbl->ptbl_parent; -+ E3_Addr l2addr = BASE2VA(l2ptbl); -+ E3_Addr l3addr = BASE2VA(l3ptbl); -+ ELAN3_PTP invalidptp = ELAN3_INVALID_PTP; -+ sdramaddr_t l2ptp; -+ spinlock_t *l2lock; -+ unsigned long l2flags; -+ -+ HAT_PRINTF5 (1, "elan3mmu_steal_this_ptbl: l3ptbl %p (%x) l2ptbl %p (%x) l2addr %x\n", -+ l3ptbl, l3ptbl->ptbl_flags, l2ptbl, l2ptbl->ptbl_flags, l2addr); -+ -+ if (PTBL_CAN_STEAL (l3ptbl->ptbl_flags) && -+ elan3mmu_lock_ptbl (l2ptbl, LK_PTBL_NOWAIT, l3ptbl->ptbl_elan3mmu, l2addr, PTBL_LEVEL_2, &l2lock, &l2flags) == LK_PTBL_OK) -+ { -+ ELAN3MMU_STAT(stolen_ptbls); -+ -+ /* Locked both L3 and L2 page tables. */ -+ l2ptp = PTBL_TO_PTADDR (l2ptbl) + ELAN3_L2_INDEX(l3addr)*ELAN3_PTP_SIZE; -+ -+ /* detach the level 3 page table */ -+ elan3_writeptp (dev, l2ptp, invalidptp); -+ ElanFlushTlb (dev); -+ -+ l2ptbl->ptbl_valid--; -+ -+ HAT_PRINTF3 (2, "elan3mmu_steal_this_ptbl: dec valid for level %d ptbl %p to %d\n", PTBL_LEVEL(l2ptbl->ptbl_flags), l2ptbl, l2ptbl->ptbl_valid); -+ -+ elan3mmu_unlock_ptbl (l2ptbl, l2lock, l2flags); -+ -+ elan3mmu_unload_loop (l3ptbl->ptbl_elan3mmu, l3ptbl, 0, ELAN3_L3_ENTRIES, PTE_UNLOAD_NOFLUSH); -+ -+ ASSERT (l3ptbl->ptbl_valid == 0); -+ -+ l3ptbl->ptbl_flags = 0; -+ return (1); -+ } -+ return (0); -+} -+ -+static ELAN3_PTBL * -+elan3mmu_steal_l3ptbl (ELAN3_DEV *dev, int attr) -+{ -+ ELAN3_PTBL_GR *ptg; -+ ELAN3_PTBL *ptbl; -+ spinlock_t *lock; -+ unsigned long group_flags; -+ unsigned long ptbl_flags; -+ register int i; -+ -+ HAT_PRINTF1 (2, "elan3mmu_steal_l3ptbl: attr %x\n", attr); -+ -+ spin_lock_irqsave (&dev->PtblGroupLock, group_flags); -+ -+ ptg = dev->Level3PtblGroupHand; -+ -+ if (ptg == NULL) -+ ptg = dev->Level[PTBL_LEVEL_3].PtblGroupList; -+ -+ for (;;) -+ { -+ while (ptg) -+ { -+ for (i = 0, ptbl = ptg->pg_ptbls; i < PTBLS_PER_GROUP_MAX; i++, ptbl++) -+ { -+ if (PTBL_MAY_STEAL (ptbl->ptbl_flags) && -+ elan3mmu_lock_this_ptbl (ptbl, LK_PTBL_NOWAIT, &lock, &ptbl_flags) == LK_PTBL_OK) -+ { -+ if (elan3mmu_steal_this_ptbl (dev, ptbl )) -+ { -+ HAT_PRINTF1 (2, "elan3mmu_steal_l3ptbl: stolen ptbl %p\n", ptbl); -+ -+ elan3mmu_unlock_ptbl (ptbl, lock,ptbl_flags); -+ -+ dev->Level3PtblGroupHand = ptg->pg_next; -+ -+ spin_unlock_irqrestore (&dev->PtblGroupLock, group_flags); -+ -+ return (ptbl); -+ } -+ elan3mmu_unlock_ptbl (ptbl, lock, ptbl_flags); -+ } -+ } -+ ptg = ptg->pg_next; -+ } -+ -+ if (dev->Level[PTBL_LEVEL_3].PtblFreeList) -+ { -+ spin_lock (&dev->Level[PTBL_LEVEL_3].PtblLock); -+ ptbl = dev->Level[PTBL_LEVEL_3].PtblFreeList; -+ if (ptbl != NULL) -+ { -+ dev->Level[PTBL_LEVEL_3].PtblFreeList = ptbl->ptbl_next; -+ dev->Level[PTBL_LEVEL_3].PtblFreeCount--; -+ } -+ spin_unlock (&dev->Level[PTBL_LEVEL_3].PtblLock); -+ -+ if (ptbl != NULL) -+ { -+ HAT_PRINTF1 (2, "elan3mmu_steal_l3ptbl: found ptbl %p on free list\n", ptbl); -+ break; -+ } -+ } -+ -+ ptbl = elan3mmu_create_ptbls (dev, PTBL_LEVEL_3, attr, 1); -+ -+ if (ptbl != NULL) -+ { -+ HAT_PRINTF1 (2, "elan3mmu_steal_l3ptbl: created new ptbl %p\n", ptbl); -+ break; -+ } -+ -+ HAT_PRINTF0 (1, "elan3mmu_steal_l3ptbl: cannot find a ptbl, retrying\n"); -+ ptg = dev->Level[PTBL_LEVEL_3].PtblGroupList; -+ } -+ -+ spin_unlock (&dev->PtblGroupLock); -+ return (ptbl); -+} -+ -+sdramaddr_t -+elan3mmu_ptefind (ELAN3MMU *elan3mmu, E3_Addr addr, int *level, -+ ELAN3_PTBL **pptbl, spinlock_t **plock, unsigned long *flags) -+{ -+ ELAN3_DEV *dev = elan3mmu->elan3mmu_dev; -+ ELAN3_PTBL *l1ptbl; -+ sdramaddr_t l1ptp; -+ ELAN3_PTP tl1ptp; -+ E3_Addr l1base; -+ ELAN3_PTBL *l2ptbl; -+ sdramaddr_t l2ptp; -+ ELAN3_PTP tl2ptp; -+ E3_Addr l2base; -+ ELAN3_PTBL *l3ptbl; -+ sdramaddr_t l3pte; -+ spinlock_t *l1lock; -+ spinlock_t *l2lock; -+ spinlock_t *l3lock; -+ unsigned long l1flags; -+ unsigned long l2flags; -+ unsigned long l3flags; -+ -+ HAT_PRINTF2 (2, "elan3mmu_ptefind: elan3mmu %p addr %x\n", elan3mmu, addr); -+ -+ l1ptbl = elan3mmu->elan3mmu_l1ptbl; -+ *level = 0; -+ -+ if (l1ptbl == NULL) -+ return ((sdramaddr_t) NULL); -+ -+ l1ptp = PTBL_TO_PTADDR(l1ptbl) + ELAN3_L1_INDEX(addr)*ELAN3_PTP_SIZE; -+ l1base = ELAN3_L1_BASE(addr); -+ -+retryl1: -+ tl1ptp = elan3_readptp (dev, l1ptp); -+ -+ HAT_PRINTF4 (2, "elan3mmu_ptefind: l1ptbl %p l1ptp %lx l1base %x : tl1ptp %x\n", l1ptbl, l1ptp, l1base, tl1ptp); -+ -+ switch (ELAN3_PTP_TYPE(tl1ptp)) -+ { -+ case ELAN3_ET_PTE: -+ elan3mmu_lock_ptbl (l1ptbl, LK_PTBL_FAILOK, elan3mmu, addr, PTBL_LEVEL_1, &l1lock, &l1flags); -+ -+ tl1ptp = elan3_readptp (dev, l1ptp); -+ if (ELAN3_PTP_TYPE(tl1ptp) != ELAN3_ET_PTE) -+ { -+ elan3mmu_unlock_ptbl (l1ptbl, l1lock, l1flags); -+ goto retryl1; -+ } -+ -+ *level = 1; -+ *pptbl = l1ptbl; -+ *plock = l1lock; -+ *flags = l1flags; -+ -+ /* return with l1lock */ -+ return (l1ptp); -+ -+ case ELAN3_ET_INVALID: -+ return ((sdramaddr_t) 0); -+ -+ case ELAN3_ET_PTP: -+ break; -+ -+ default: -+ panic ("elan3mmu_ptefind: found bad entry in level 1 page table"); -+ /* NOTREACHED */ -+ } -+ -+ HAT_PRINTF1 (2, "elan3mmu_ptefind: chain to level 2 ptbl from ptp %x\n", tl1ptp); -+ -+ l2ptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tl1ptp); -+ l2ptp = PTBL_TO_PTADDR(l2ptbl) + ELAN3_L2_INDEX(addr)*ELAN3_PTP_SIZE; -+ l2base = ELAN3_L2_BASE(addr); -+ -+ tl2ptp = elan3_readptp (dev, l2ptp); -+ -+ HAT_PRINTF4 (2, "elan3mmu_ptefind: l2ptbl %p l2ptp %lx l2base %x : tl2ptp %x\n", l2ptbl, l2ptp, l2base, tl2ptp); -+ -+ switch (ELAN3_PTP_TYPE(tl2ptp)) -+ { -+ case ELAN3_ET_PTE: -+ switch (elan3mmu_lock_ptbl (l2ptbl, LK_PTBL_FAILOK, elan3mmu, addr, PTBL_LEVEL_2, &l2lock, &l2flags)) -+ { -+ case LK_PTBL_OK: -+ tl2ptp = elan3_readptp (dev, l2ptp); -+ if (ELAN3_PTP_TYPE(tl2ptp) != ELAN3_ET_PTE) -+ { -+ elan3mmu_unlock_ptbl (l2ptbl, l2lock, l2flags); -+ goto retryl1; -+ } -+ -+ *level = 2; -+ *pptbl = l2ptbl; -+ *plock = l2lock; -+ *flags = l2flags; -+ -+ /* return with l2lock */ -+ return (l2ptp); -+ -+ case LK_PTBL_MISMATCH: -+ HAT_PRINTF6 (2, "elan3mmu_ptefind: PTBL_MISMATCH : ptbl %p flags %x elan3mmu %p base %x (%p %x)\n", -+ l2ptbl, l2ptbl->ptbl_flags, l2ptbl->ptbl_elan3mmu, l2ptbl->ptbl_base, elan3mmu, addr); -+ -+ /* -+ * We've trogged down to this ptbl, but someone has just -+ * stolen it, so try all over again. -+ */ -+ goto retryl1; -+ -+ default: -+ panic ("elan3mmu_ptefind: elan3mmu_lock_ptbl returned bad value"); -+ /* NOTREACHED */ -+ } -+ case ELAN3_ET_INVALID: -+ return ((sdramaddr_t) 0); -+ -+ case ELAN3_ET_PTP: -+ break; -+ default: -+ panic ("elan3mmu_ptefind: found bad entry in level 2 page table"); -+ /* NOTREACHED */ -+ } -+ -+ HAT_PRINTF1 (2, "elan3mmu_ptefind: chain to level 3 page table from ptp %x\n", tl2ptp); -+ -+ l3ptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tl2ptp); -+ l3pte = PTBL_TO_PTADDR(l3ptbl) + ELAN3_L3_INDEX(addr)*ELAN3_PTE_SIZE; -+ -+ HAT_PRINTF2 (2, "elan3mmu_ptefind: l3ptbl %p l3pte %lx\n", l3ptbl, l3pte); -+ -+ switch (elan3mmu_lock_ptbl (l3ptbl, LK_PTBL_FAILOK, elan3mmu, addr, PTBL_LEVEL_3, &l3lock, &l3flags)) -+ { -+ case LK_PTBL_OK: -+ *level = 3; -+ *plock = l3lock; -+ *pptbl = l3ptbl; -+ *flags = l3flags; -+ -+ return (l3pte); -+ -+ case LK_PTBL_FAILED: -+ panic ("elan3mmu_ptefind: l3 lock failed"); -+ /* NOTREACHED */ -+ -+ case LK_PTBL_MISMATCH: -+ HAT_PRINTF6 (2, "elan3mmu_ptefind: PTBL_MISMATCH : ptbl %p flags %x elan3mmu %p base %x (%p %x)\n", -+ l3ptbl, l3ptbl->ptbl_flags, l3ptbl->ptbl_elan3mmu, l3ptbl->ptbl_base, elan3mmu, addr); -+ -+ /* -+ * We've trogged down to this ptbl, but someone has just -+ * stolen it, so try all over again. -+ */ -+ goto retryl1; -+ -+ default: -+ panic ("elan3mmu_ptefind: elan3mmu_lock_ptbl returned bad value"); -+ /* NOTREACHED */ -+ } -+ /* NOTREACHED */ -+ return ((sdramaddr_t) 0); -+} -+ -+sdramaddr_t -+elan3mmu_ptp2pte (ELAN3MMU *elan3mmu, sdramaddr_t ptp, int level) -+{ -+ ELAN3_PTP tptp = elan3_readptp (elan3mmu->elan3mmu_dev, ptp); -+ -+ ASSERT (level != 3 && ELAN3_PTP_TYPE(tptp) == ELAN3_ET_PTE); -+ -+ return PTP_TO_PT_PADDR(tptp); -+} -+ -+sdramaddr_t -+elan3mmu_ptealloc (ELAN3MMU *elan3mmu, E3_Addr addr, int level, -+ ELAN3_PTBL **pptbl, spinlock_t **plock, int attr, unsigned long *flags) -+{ -+ ELAN3_DEV *dev = elan3mmu->elan3mmu_dev; -+ ELAN3_PTBL *l1ptbl; -+ ELAN3_PTBL *lXptbl; -+ int idx; -+ sdramaddr_t l1ptp; -+ ELAN3_PTP tl1ptp; -+ E3_Addr l1base; -+ spinlock_t *l1lock; -+ ELAN3_PTBL *l2ptbl; -+ sdramaddr_t l2ptp; -+ ELAN3_PTP tl2ptp; -+ E3_Addr l2base; -+ spinlock_t *l2lock; -+ ELAN3_PTBL *l3ptbl; -+ sdramaddr_t l3pte; -+ E3_Addr l3base; -+ spinlock_t *l3lock; -+ -+ unsigned long l1flags; -+ unsigned long l2flags; -+ unsigned long l3flags; -+ -+ HAT_PRINTF2 (2, "elan3mmu_ptealloc: elan3mmu %p addr %x\n", elan3mmu, addr); -+ -+ l1ptbl = elan3mmu->elan3mmu_l1ptbl; -+ if (l1ptbl == NULL) -+ return ((sdramaddr_t) 0); -+ -+ l1ptp = PTBL_TO_PTADDR(l1ptbl) + ELAN3_L1_INDEX(addr)*ELAN3_PTP_SIZE; -+ l1base = ELAN3_L1_BASE(addr); -+ -+retryl1: -+ tl1ptp = elan3_readptp (dev, l1ptp); -+ -+ HAT_PRINTF5 (2, "elan3mmu_ptealloc: l1ptbl %p 1ptp %lx l1base %x (%x) : tl1ptp %x\n", -+ l1ptbl, l1ptp, l1base, l1ptbl->ptbl_base, tl1ptp); -+ -+ switch (ELAN3_PTP_TYPE(tl1ptp)) -+ { -+ case ELAN3_ET_PTE: -+ if (level == PTBL_LEVEL_1) -+ { -+ elan3mmu_lock_ptbl (l1ptbl, 0, elan3mmu, addr, PTBL_LEVEL_1, &l1lock, &l1flags); -+ -+ tl1ptp = elan3_readptp (dev, l1ptp); -+ if (ELAN3_PTP_TYPE(tl1ptp) != ELAN3_ET_PTE) -+ { -+ elan3mmu_unlock_ptbl (l1ptbl, l1lock, l1flags); -+ goto retryl1; -+ } -+ -+ *pptbl = l1ptbl; -+ *plock = l1lock; -+ *flags = l1flags; -+ -+ /* return holding l1lock */ -+ return (l1ptp); -+ } -+ panic ("elan3mmu_ptealloc: found pte in level 1 page table"); -+ /* NOTREACHED */ -+ -+ case ELAN3_ET_PTP: -+ if (level == PTBL_LEVEL_1) -+ panic ("elan3mmu_ptealloc: found PTP when loading a level 1 PTE\n"); -+ break; -+ -+ case ELAN3_ET_INVALID: -+ if (level == PTBL_LEVEL_1) -+ { -+ if ((lXptbl = elan3mmu_alloc_pte (dev, elan3mmu, &idx)) == NULL) -+ return ((sdramaddr_t) 0); -+ -+ elan3mmu_lock_ptbl (l1ptbl, 0, elan3mmu, addr, PTBL_LEVEL_1, &l1lock, &l1flags); -+ -+ tl1ptp = elan3_readptp (dev, l1ptp); -+ if (ELAN3_PTP_TYPE(tl1ptp) != ELAN3_ET_INVALID) -+ { -+ /* raced with someone else, whose got there first */ -+ elan3mmu_free_pte (dev, elan3mmu, lXptbl, idx); -+ -+ /* drop the l1lock and retry */ -+ elan3mmu_unlock_ptbl (l1ptbl, l1lock, l1flags); -+ goto retryl1; -+ } -+ -+ tl1ptp = PTBL_TO_PTADDR(lXptbl) | (idx * ELAN3_PTE_SIZE) | ELAN3_ET_PTE; -+ -+ elan3_writeptp (dev, l1ptp, tl1ptp); -+ -+ *pptbl = l1ptbl; -+ *plock = l1lock; -+ *flags = l1flags; -+ -+ /* return holding l1lock */ -+ return (l1ptp); -+ } -+ -+ if (level == PTBL_LEVEL_2) -+ { -+ if ((lXptbl = elan3mmu_alloc_pte (dev, elan3mmu, &idx)) == NULL) -+ return ((sdramaddr_t) 0); -+ -+ if ((l2ptbl = elan3mmu_alloc_l2ptbl (dev, attr, l1ptbl, elan3mmu, ELAN3_L2_BASE(addr), &l2lock, &l2flags)) == NULL) -+ { -+ elan3mmu_free_pte (dev, elan3mmu, lXptbl, idx); -+ return ((sdramaddr_t) 0); -+ } -+ -+ /* Connect l2ptbl to the new LX pte */ -+ l2ptp = PTBL_TO_PTADDR(l2ptbl) + ELAN3_L2_INDEX(addr) * ELAN3_PTP_SIZE; -+ tl2ptp = PTBL_TO_PTADDR(lXptbl) | (idx * ELAN3_PTE_SIZE) | ELAN3_ET_PTE; -+ -+ elan3_writeptp (dev, l2ptp, tl2ptp); -+ -+ /* Now need to lock the l1 ptbl */ -+ elan3mmu_unlock_ptbl (l2ptbl, l2lock, l2flags); -+ -+ elan3mmu_lock_ptbl (l1ptbl, 0, elan3mmu, addr, PTBL_LEVEL_1, &l1lock, &l1flags); -+ elan3mmu_lock_ptbl (l2ptbl, 0, elan3mmu, addr, PTBL_LEVEL_2, &l2lock, &l2flags); -+ -+ tl1ptp = elan3_readptp (dev, l1ptp); -+ if (ELAN3_PTP_TYPE(tl1ptp) != ELAN3_ET_INVALID) -+ { -+ HAT_PRINTF0 (2, "elan3mmu_ptealloc: beaten to it, free l2 ptbl/lx pte\n"); -+ -+ tl2ptp = ELAN3_INVALID_PTP; -+ elan3_writeptp (dev, l2ptp, tl2ptp); -+ -+ HAT_PRINTF2 (2, "elan3mmu_ptealloc: write level 2 ptp %lx to %x\n", l2ptp, tl2ptp); -+ HAT_PRINTF2 (2, "elan3mmu_ptealloc: freeing l2 ptbl %p (%x)\n", l2ptbl, l2ptbl->ptbl_flags); -+ -+ elan3mmu_free_l2ptbl (dev, l2ptbl, l2lock, l2flags); -+ elan3mmu_free_pte (dev, elan3mmu, lXptbl, idx); -+ -+ elan3mmu_unlock_ptbl (l1ptbl, l1lock, l1flags); -+ -+ goto retryl1; -+ } -+ -+ /* Now have L1 locked, so install the L2 ptbl */ -+ l1ptp = PTBL_TO_PTADDR(l1ptbl) + ELAN3_L1_INDEX(addr)*ELAN3_PTP_SIZE; -+ tl1ptp = PTBL_TO_PTADDR(l2ptbl) | ELAN3_ET_PTP; -+ l1ptbl->ptbl_valid++; -+ -+ HAT_PRINTF3 (2, "elan3mmu_ptealloc: inc valid for level %d ptbl %p to %d\n", -+ PTBL_LEVEL(l1ptbl->ptbl_flags), l1ptbl, l1ptbl->ptbl_valid); -+ -+ elan3_writeptp (dev, l1ptp, tl1ptp); -+ -+ HAT_PRINTF2 (2, "elan3mmu_ptealloc: write l1ptp %lx to %x\n", l1ptp, tl1ptp); -+ -+ /* unordered unlock - lock l1ptbl, lock l2ptbl, unlock l1ptbl */ -+ elan3mmu_unlock_ptbl (l1ptbl, l1lock, l2flags); /* need to unlock with the l2flags to keep irq order correct */ -+ -+ *pptbl = l2ptbl; -+ *plock = l2lock; -+ *flags = l1flags; /* return the l1flags here as we have released the l2flags already to keep order */ -+ -+ /* return holding l2lock */ -+ return (l2ptp); -+ } -+ -+ HAT_PRINTF0 (2, "elan3mmu_ptealloc: allocating level 2 and level 3 page tables\n"); -+ -+ /* Allocate a level 2 and level 3 page table and link them together */ -+ if ((l2ptbl = elan3mmu_alloc_l2ptbl (dev, attr, l1ptbl, elan3mmu, ELAN3_L2_BASE(addr), &l2lock, &l2flags)) == NULL) -+ return ((sdramaddr_t) 0); -+ -+ if ((l3ptbl = elan3mmu_alloc_l3ptbl (dev, attr | PTE_NO_SLEEP, l2ptbl, elan3mmu, ELAN3_L3_BASE(addr), &l3lock, &l3flags)) == NULL) -+ { -+ elan3mmu_unlock_ptbl (l2ptbl, l2lock, l2flags); -+ return ((sdramaddr_t) 0); -+ } -+ -+ ASSERT (PTBL_IS_LOCKED (l2ptbl->ptbl_flags)); -+ ASSERT (PTBL_LEVEL (l2ptbl->ptbl_flags) == PTBL_LEVEL_2); -+ ASSERT (PTBL_IS_LOCKED (l3ptbl->ptbl_flags)); -+ ASSERT (PTBL_LEVEL (l3ptbl->ptbl_flags) == PTBL_LEVEL_3); -+ -+ HAT_PRINTF6 (2, "elan3mmu_ptealloc: l2ptbl %p (%x,%x) l3ptbl %p (%x,%x)\n", -+ l2ptbl, l2ptbl->ptbl_flags, l2ptbl->ptbl_base, -+ l3ptbl, l3ptbl->ptbl_flags, l3ptbl->ptbl_base); -+ -+ if (CTXT_IS_KERNEL (elan3mmu->elan3mmu_ctxt)) -+ { -+ l2ptbl->ptbl_flags |= PTBL_KERNEL; -+ elan3mmu_kernel_l3ptbl (l3ptbl); -+ } -+ -+ /* -+ * Connect L3 ptbl to the new L2 ptbl. -+ */ -+ l2ptp = PTBL_TO_PTADDR(l2ptbl) + ELAN3_L2_INDEX(addr) * ELAN3_PTP_SIZE; -+ tl2ptp = PTBL_TO_PTADDR(l3ptbl) | ELAN3_ET_PTP; -+ -+ l2ptbl->ptbl_valid = 1; -+ -+ HAT_PRINTF3 (2, "elan3mmu_ptealloc: set valid for level %d ptbl %p to %d\n", -+ PTBL_LEVEL(l2ptbl->ptbl_flags), l2ptbl, l2ptbl->ptbl_valid); -+ -+ HAT_PRINTF2 (2, "elan3mmu_ptealloc: write level 2 ptp %lx to %x\n", l2ptp, tl2ptp); -+ -+ elan3_writeptp (dev, l2ptp, tl2ptp); -+ -+ /* -+ * Now need to lock the l1 ptbl - to maintain lock ordering -+ * we set the PTBL_KEEP bit to stop the l3 ptbl from being -+ * stolen and drop the locks in the order we aquired them -+ */ -+ l3ptbl->ptbl_flags |= PTBL_KEEP; -+ -+ elan3mmu_unlock_ptbl (l3ptbl, l3lock, l3flags); -+ elan3mmu_unlock_ptbl (l2ptbl, l2lock, l2flags); -+ -+ elan3mmu_lock_ptbl (l1ptbl, 0, elan3mmu, addr, PTBL_LEVEL_1, &l1lock, &l1flags); -+ elan3mmu_lock_ptbl (l3ptbl, 0, elan3mmu, addr, PTBL_LEVEL_3, &l3lock, &l3flags); -+ -+ l3ptbl->ptbl_flags &= ~PTBL_KEEP; -+ -+ /* Now have l1 and l3 ptbls locked, so install the new l2 ptbl into the l1. */ -+ tl1ptp = elan3_readptp (dev, l1ptp); -+ -+ HAT_PRINTF2 (2, "elan3mmu_ptealloc: l1ptp %lx is %x\n", l1ptp, tl1ptp); -+ -+ if (ELAN3_PTP_TYPE(tl1ptp) != ELAN3_ET_INVALID) -+ { -+ HAT_PRINTF0 (2, "elan3mmu_ptealloc: beaten to it, free l2/l3 ptbls\n"); -+ -+ /* free off the level 3 page table */ -+ HAT_PRINTF2 (2, "elan3mmu_ptealloc: freeing l3 ptbl %p (%x)\n", l3ptbl, l3ptbl->ptbl_flags); -+ -+ l3ptbl->ptbl_flags &= ~PTBL_KEEP; -+ elan3mmu_free_l3ptbl (dev, l3ptbl, l3lock, l3flags); -+ -+ /* and unlock the level 1 ptbl */ -+ elan3mmu_unlock_ptbl (l1ptbl, l1lock, l1flags); -+ -+ /* lock the level 2 page table, and clear out the PTP, then free it */ -+ (void) elan3mmu_lock_ptbl (l2ptbl, 0, elan3mmu, addr, PTBL_LEVEL_2, &l2lock, &l2flags); -+ -+ HAT_PRINTF2 (2, "elan3mmu_ptealloc: locked l2 ptbl %p (%x)\n", l2ptbl, l2ptbl->ptbl_flags); -+ -+ tl2ptp = ELAN3_INVALID_PTP; -+ elan3_writeptp (dev, l2ptp, tl2ptp); -+ l2ptbl->ptbl_valid = 0; -+ -+ HAT_PRINTF3 (2, "elan3mmu_ptealloc: set to 0 valid for level %d ptbl %p to %d\n", PTBL_LEVEL(l2ptbl->ptbl_flags), l2ptbl, l2ptbl->ptbl_valid); -+ -+ HAT_PRINTF2 (2, "elan3mmu_ptealloc: write level 2 ptp %lx to %x\n", l2ptp, tl2ptp); -+ HAT_PRINTF2 (2, "elan3mmu_ptealloc: freeing l2 ptbl %p (%x)\n", l2ptbl, l2ptbl->ptbl_flags); -+ -+ elan3mmu_free_l2ptbl (dev, l2ptbl, l2lock, l2flags); -+ -+ goto retryl1; -+ } -+ -+ HAT_PRINTF4 (2, "elan3mmu_ptealloc: l1ptbl is %p (%x), l3ptbl is %p (%x)\n", -+ l1ptbl, l1ptbl->ptbl_flags, l3ptbl, l3ptbl->ptbl_flags); -+ -+ /* Now have L1 and L3 locked, so install the L2 ptbl */ -+ l1ptp = PTBL_TO_PTADDR(l1ptbl) + ELAN3_L1_INDEX(addr)*ELAN3_PTP_SIZE; -+ tl1ptp = PTBL_TO_PTADDR(l2ptbl) | ELAN3_ET_PTP; -+ l1ptbl->ptbl_valid++; -+ -+ HAT_PRINTF3 (2, "elan3mmu_ptealloc: inc valid for level %d ptbl %p to %d\n", -+ PTBL_LEVEL(l1ptbl->ptbl_flags), l1ptbl, l1ptbl->ptbl_valid); -+ -+ elan3_writeptp (dev, l1ptp, tl1ptp); -+ -+ HAT_PRINTF2 (2, "elan3mmu_ptealloc: write l1ptp %lx to %x\n", l1ptp, tl1ptp); -+ -+ /* unordered unlock - lock l1ptbl, lock l3ptbl, unlock l1ptbl */ -+ elan3mmu_unlock_ptbl (l1ptbl, l1lock, l3flags); /* free using l3flags to keep irq ordering */ -+ -+ l3pte = PTBL_TO_PTADDR (l3ptbl) + ELAN3_L3_INDEX(addr)*ELAN3_PTE_SIZE; -+ -+ /* Level 3 ptbl is already locked, so just return the pte */ -+ *pptbl = l3ptbl; -+ *plock = l3lock; -+ *flags = l1flags; /* return l1flags to keep irq ordering */ -+ -+ return (l3pte); -+ -+ default: -+ panic ("elan3mmu_ptealloc: found bad entry in level 1 page table"); -+ /* NOTREACHED */ -+ } -+ -+ HAT_PRINTF1 (2, "elan3mmu_ptealloc: chain to level 2 ptbl from ptp %x\n", tl1ptp); -+ -+ l2ptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tl1ptp); -+ l2ptp = PTBL_TO_PTADDR(l2ptbl) + ELAN3_L2_INDEX(addr)*ELAN3_PTP_SIZE; -+ l2base = ELAN3_L2_BASE(addr); -+ -+ tl2ptp = elan3_readptp (dev, l2ptp); -+ -+ HAT_PRINTF5 (2, "elan3mmu_ptealloc: l2ptbl %p l2ptp %lx l2base %x (%x) : tl2ptp %x\n", -+ l2ptbl, l2ptp, l2base, l2ptbl->ptbl_base, tl2ptp); -+ -+ switch (ELAN3_PTP_TYPE(tl2ptp)) -+ { -+ case ELAN3_ET_PTE: -+ if (level == PTBL_LEVEL_2) { -+ /* this is a pointer to a pte, we should just return it */ -+ -+ switch (elan3mmu_lock_ptbl (l2ptbl, 0, elan3mmu, addr, PTBL_LEVEL_2, &l2lock, &l2flags)) -+ { -+ case LK_PTBL_OK: -+ break; -+ -+ case LK_PTBL_FAILED: -+ panic ("elan3mmu_ptealloc: l2 lock failed"); -+ /* NOTREACHED */ -+ -+ case LK_PTBL_MISMATCH: -+ HAT_PRINTF6 (2, "elan3mmu_ptealloc: PTBL_MISMATCH : ptbl %p flags %x elan3mmu %p base %x (%p %x)\n", -+ l2ptbl, l2ptbl->ptbl_flags, l2ptbl->ptbl_elan3mmu, l2ptbl->ptbl_base, elan3mmu, addr); -+ -+ /* -+ * We've trogged down to this ptbl, but someone has just -+ * stolen it, so try all over again. -+ */ -+ goto retryl1; -+ -+ default: -+ panic ("elan3mmu_ptealloc: elan3mmu_lock_ptbl returned bad value"); -+ /* NOTREACHED */ -+ } -+ -+ -+ tl2ptp = elan3_readptp (dev, l2ptp); -+ if (ELAN3_PTP_TYPE(tl2ptp) != ELAN3_ET_PTE) -+ { -+ elan3mmu_unlock_ptbl (l2ptbl, l2lock, l2flags); -+ goto retryl1; -+ } -+ -+ *pptbl = l2ptbl; -+ *plock = l2lock; -+ *flags = l2flags; -+ -+ /* return holdind l2lock */ -+ return (l2ptp); -+ } -+ panic ("elan3mmu: found pte in level 2 page table"); -+ /* NOTREACHED */ -+ -+ case ELAN3_ET_PTP: -+ break; -+ -+ case ELAN3_ET_INVALID: -+ if (level == PTBL_LEVEL_2) -+ { -+ if ((lXptbl = elan3mmu_alloc_pte (dev, elan3mmu, &idx)) == NULL) -+ return ((sdramaddr_t) 0); -+ -+ switch (elan3mmu_lock_ptbl (l2ptbl, 0, elan3mmu, addr, PTBL_LEVEL_2, &l2lock, &l2flags)) -+ { -+ case LK_PTBL_OK: -+ break; -+ -+ case LK_PTBL_FAILED: -+ panic ("elan3mmu_ptealloc: l2 lock failed"); -+ /* NOTREACHED */ -+ -+ case LK_PTBL_MISMATCH: -+ HAT_PRINTF6 (2, "elan3mmu_ptealloc: PTBL_MISMATCH : ptbl %p flags %x elan3mmu %p base %x (%p %x)\n", -+ l2ptbl, l2ptbl->ptbl_flags, l2ptbl->ptbl_elan3mmu, l2ptbl->ptbl_base, elan3mmu, addr); -+ -+ /* -+ * We've trogged down to this ptbl, but someone has just -+ * stolen it, so try all over again. -+ */ -+ goto retryl1; -+ -+ default: -+ panic ("elan3mmu_ptealloc: elan3mmu_lock_ptbl returned bad value"); -+ /* NOTREACHED */ -+ } -+ -+ tl2ptp = elan3_readptp (dev, l2ptp); -+ if (ELAN3_PTP_TYPE(tl2ptp) != ELAN3_ET_INVALID) -+ { -+ HAT_PRINTF0 (2, "elan3mmu_ptealloc: beaten to it, free lx pte\n"); -+ -+ elan3mmu_free_pte (dev, elan3mmu, lXptbl, idx); -+ -+ elan3mmu_unlock_ptbl (l2ptbl, l2lock, l2flags); -+ goto retryl1; -+ } -+ -+ /* Connect l2ptbl to the new LX pte */ -+ tl2ptp = PTBL_TO_PTADDR(lXptbl) | (idx * ELAN3_PTE_SIZE) | ELAN3_ET_PTE; -+ -+ HAT_PRINTF3 (2, "elan3mmu_ptealloc: inc valid for level %d ptbl %p to %d\n", -+ PTBL_LEVEL(l2ptbl->ptbl_flags), l2ptbl, l2ptbl->ptbl_valid); -+ -+ elan3_writeptp (dev, l2ptp, tl2ptp); -+ -+ HAT_PRINTF2 (2, "elan3mmu_ptealloc: write l2ptp %lx to %x\n", l2ptp, tl2ptp); -+ -+ *pptbl = l2ptbl; -+ *plock = l2lock; -+ *flags = l2flags; -+ -+ /* return holding l2lock */ -+ return (l2ptp); -+ } -+ HAT_PRINTF0 (2, "elan3mmu_ptealloc: allocate level 3 page table\n"); -+ -+ if ((l3ptbl = elan3mmu_alloc_l3ptbl (dev, attr, l2ptbl, elan3mmu, ELAN3_L3_BASE(addr), &l3lock, &l3flags)) == NULL) -+ return ((sdramaddr_t) 0); -+ -+ if (CTXT_IS_KERNEL (elan3mmu->elan3mmu_ctxt)) -+ elan3mmu_kernel_l3ptbl (l3ptbl); -+ -+ /* -+ * Now need to lock the l2 ptbl - to maintain lock ordering -+ * we set the PTBL_KEEP bit to stop the l3 ptbl from being -+ * stolen and drop the locks in the order we aquired them -+ */ -+ l3ptbl->ptbl_flags |= PTBL_KEEP; -+ -+ elan3mmu_unlock_ptbl (l3ptbl, l3lock, l3flags); -+ -+ if (elan3mmu_lock_ptbl (l2ptbl, LK_PTBL_FAILOK, elan3mmu, addr, PTBL_LEVEL_2, &l2lock, &l2flags) == LK_PTBL_MISMATCH) -+ { -+ HAT_PRINTF0 (2, "elan3mmu_ptealloc: l2ptbl freed, free l3 ptbl and try again\n"); -+ -+ elan3mmu_lock_ptbl (l3ptbl, 0, elan3mmu, addr, PTBL_LEVEL_3, &l3lock, &l3flags); -+ -+ /* free off the level 3 page table, and try again */ -+ l3ptbl->ptbl_flags &= ~PTBL_KEEP; -+ elan3mmu_free_l3ptbl (dev, l3ptbl, l3lock, l3flags); -+ -+ goto retryl1; -+ } -+ -+ elan3mmu_lock_ptbl (l3ptbl, 0, elan3mmu, addr, PTBL_LEVEL_3, &l3lock, &l3flags); -+ -+ l3ptbl->ptbl_flags &= ~PTBL_KEEP; -+ -+ /* Now have L2 and L3 ptbls locked, see if someone has beaten us to it. */ -+ tl2ptp = elan3_readptp (dev, l2ptp); -+ -+ HAT_PRINTF2 (2, "elan3mmu_ptealloc: l2ptp at %lx is %x\n", l2ptp, tl2ptp); -+ -+ if (ELAN3_PTP_TYPE(tl2ptp) != ELAN3_ET_INVALID) -+ { -+ HAT_PRINTF0 (2, "elan3mmu_ptealloc: beaten to it, free l3 ptbl and try again\n"); -+ -+ /* free off the level 3 page table, and try again */ -+ l3ptbl->ptbl_flags &= ~PTBL_KEEP; -+ elan3mmu_free_l3ptbl (dev, l3ptbl, l3lock, l3flags); -+ -+ /* Someone has allocated the ptbl before us */ -+ elan3mmu_unlock_ptbl (l2ptbl, l2lock, l2flags); -+ -+ goto retryl1; -+ } -+ -+ ASSERT (PTBL_IS_LOCKED (l2ptbl->ptbl_flags)); -+ -+ /* Install the L3 ptbl into the L2 one */ -+ l2ptp = PTBL_TO_PTADDR(l2ptbl) + ELAN3_L2_INDEX(addr)*ELAN3_PTP_SIZE; -+ tl2ptp = PTBL_TO_PTADDR(l3ptbl) | ELAN3_ET_PTP; -+ l2ptbl->ptbl_valid++; -+ -+ HAT_PRINTF3 (2, "elan3mmu_ptealloc: inc valid for level %d ptbl %p to %d\n", -+ PTBL_LEVEL(l2ptbl->ptbl_flags), l2ptbl, l2ptbl->ptbl_valid); -+ -+ elan3_writeptp (dev, l2ptp, tl2ptp); -+ -+ HAT_PRINTF2 (2, "elan3mmu_ptealloc: write level 2 ptp %lx to %x\n", l2ptp, tl2ptp); -+ -+ /* unordered unlock - lock l2ptbl, lock l3ptbl, unlock l2ptbl */ -+ elan3mmu_unlock_ptbl (l2ptbl, l2lock, l3flags); /* free with the l3flags to keep irq ordering */ -+ -+ l3pte = PTBL_TO_PTADDR(l3ptbl) + ELAN3_L3_INDEX(addr)*ELAN3_PTE_SIZE; -+ -+ /* Level 3 ptbl is already locked, so just return the pte */ -+ *pptbl = l3ptbl; -+ *plock = l3lock; -+ *flags = l2flags; /* return l2flags to keep irq ordering */ -+ -+ return (l3pte); -+ -+ default: -+ panic ("elan3mmu_ptealloc: found bad entry in level 2 page table"); -+ /* NOTREACHED */ -+ } -+ -+ HAT_PRINTF1 (2, "elan3mmu_ptealloc: chain to level 3 page table from ptp %x\n", tl2ptp); -+ -+ l3ptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tl2ptp); -+ l3pte = PTBL_TO_PTADDR(l3ptbl) + ELAN3_L3_INDEX(addr)*ELAN3_PTE_SIZE; -+ l3base = ELAN3_L3_BASE(addr); -+ -+ HAT_PRINTF4 (2, "elan3mmu_ptealloc: l3ptbl %p 3pte %lx l3base %x (%x)\n", -+ l3ptbl, l3pte, l3base, l3ptbl->ptbl_base); -+ -+ if (elan3mmu_lock_ptbl (l3ptbl, LK_PTBL_FAILOK, elan3mmu, addr, PTBL_LEVEL_3, &l3lock, &l3flags) == LK_PTBL_OK) -+ { -+ *pptbl = l3ptbl; -+ *plock = l3lock; -+ *flags = l3flags; -+ -+ return (l3pte); -+ } -+ -+ /* got all the way down here, but its been nicked before we could lock it */ -+ /* so try all over again */ -+ goto retryl1; -+} -+ -+void -+elan3mmu_l1inval (ELAN3MMU *elan3mmu, ELAN3_PTBL *l1ptbl, int attr) -+{ -+ ELAN3_DEV *dev = elan3mmu->elan3mmu_dev; -+ ELAN3_PTP invalidptp = ELAN3_INVALID_PTP; -+ ELAN3_PTP tl1ptp; -+ sdramaddr_t l1ptp; -+ E3_Addr addr; -+ spinlock_t *l2lock; -+ ELAN3_PTBL *l2ptbl; -+ ELAN3_PTBL *lXptbl; -+ int idx; -+ int i; -+ int ret; -+ unsigned long flags; -+ -+ l1ptp = PTBL_TO_PTADDR(l1ptbl); -+ -+ HAT_PRINTF2 (1, "elan3mmu_l1inval: l1ptbl %p l1ptp %lx\n", l1ptbl, l1ptp); -+ -+ for (i = 0, addr = 0; i < ELAN3_L1_ENTRIES; i++, l1ptp += ELAN3_PTP_SIZE) -+ { -+ tl1ptp = elan3_readptp (dev, l1ptp); -+ switch (ELAN3_PTP_TYPE(tl1ptp)) -+ { -+ case ELAN3_ET_PTE: -+ lXptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tl1ptp); -+ idx = (PTP_TO_PT_PADDR(tl1ptp) - PTBL_TO_PTADDR(lXptbl))/ELAN3_PTE_SIZE; -+ -+ HAT_PRINTF3 (2, "elan3mmu_l1inval: l1ptbl %p : lXptbl %p idx %d\n", -+ l1ptbl, lXptbl, idx); -+ -+ /* invalidate the L1 pte. */ -+ elan3_writeptp (dev, l1ptp, invalidptp); -+ if (! (attr & PTE_UNLOAD_NOFLUSH)) -+ ElanFlushTlb (dev); -+ -+ l1ptbl->ptbl_valid--; -+ elan3mmu_free_pte ( dev, elan3mmu, lXptbl, idx); -+ -+ HAT_PRINTF3 (2, "elan3mmu_l1inval: dec valid for level %d ptbl %p to %d\n", -+ PTBL_LEVEL(l1ptbl->ptbl_flags), l1ptbl, l1ptbl->ptbl_valid); -+ -+ break; -+ -+ case ELAN3_ET_PTP: -+ HAT_PRINTF5 (2, "elan3mmu_l1inval: l1ptbl %p : ptp %lx (%x) addr %x (%d)\n", -+ l1ptbl, l1ptp, tl1ptp, addr, i); -+ -+ /* invalidate the L1 ptp. */ -+ elan3_writeptp (dev, l1ptp, invalidptp); -+ if (! (attr & PTE_UNLOAD_NOFLUSH)) -+ ElanFlushTlb (dev); -+ -+ /* invalidate the level 2 page table */ -+ l2ptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tl1ptp); -+ ret = elan3mmu_l2inval (elan3mmu, l2ptbl, attr | PTE_UNLOAD_NOFLUSH, addr, &l2lock, &flags); -+ -+ ASSERT ((l2ptbl->ptbl_flags & PTBL_KEEP) == 0); -+ -+ if (ret == LK_PTBL_OK) -+ { -+ if (((l2ptbl->ptbl_flags & PTBL_KEEP) == 0) && l2ptbl->ptbl_valid == 0) -+ { -+ HAT_PRINTF1 (2, "elan3mmu_l1inval: free l2ptbl %p\n", l2ptbl); -+ -+ l1ptbl->ptbl_valid--; -+ elan3mmu_free_l2ptbl (elan3mmu->elan3mmu_dev, l2ptbl, l2lock, flags); -+ -+ HAT_PRINTF3 (2, "elan3mmu_l1inval: dec valid for level %d ptbl %p to %d\n", -+ PTBL_LEVEL(l1ptbl->ptbl_flags), l1ptbl, l1ptbl->ptbl_valid); -+ } -+ else -+ { -+ /* need to keep this page table, so even though its now empty, */ -+ /* chain it back in */ -+ HAT_PRINTF1 (2, "elan3mmu_l1inval: keep l2ptbl %p\n", l2ptbl); -+ -+ elan3_writeptp (dev, l1ptp, tl1ptp); -+ elan3mmu_unlock_ptbl (l2ptbl, l2lock, flags); -+ } -+ } -+ else -+ { -+ l1ptbl->ptbl_valid--; -+ -+ HAT_PRINTF3 (2, "elan3mmu_l1inval: dec valid for level %d ptbl %p to %d\n", -+ PTBL_LEVEL(l1ptbl->ptbl_flags), l1ptbl, l1ptbl->ptbl_valid); -+ } -+ break; -+ -+ case ELAN3_ET_INVALID: -+ break; -+ -+ default: -+ panic ("elan3mmu_l1inval: found invalid entry in level 1 page table"); -+ /* NOTREACHED */ -+ } -+ -+ if (l1ptbl->ptbl_valid == 0) -+ break; -+ -+ addr += ELAN3_L1_SIZE; -+ } -+} -+ -+int -+elan3mmu_l2inval (ELAN3MMU *elan3mmu, ELAN3_PTBL *l2ptbl, int attr, E3_Addr addr, spinlock_t **pl2lock, unsigned long *flags) -+{ -+ ELAN3_DEV *dev = elan3mmu->elan3mmu_dev; -+ ELAN3_PTP invalidptp = ELAN3_INVALID_PTP; -+ ELAN3_PTP tl2ptp; -+ sdramaddr_t l2ptp; -+ spinlock_t *l3lock; -+ unsigned long l3flags; -+ ELAN3_PTBL *l3ptbl; -+ ELAN3_PTBL *lXptbl; -+ int idx; -+ int i; -+ int ret; -+ -+ HAT_PRINTF2 (1, "elan3mmu_l2inval: l2ptbl %p addr %x\n", l2ptbl, addr); -+ -+ ASSERT (PTBL_LEVEL (l2ptbl->ptbl_flags) == PTBL_LEVEL_2); -+ ASSERT (PTBL_LEVEL (l2ptbl->ptbl_parent->ptbl_flags) == PTBL_LEVEL_1); -+ -+ ret = elan3mmu_lock_ptbl (l2ptbl, LK_PTBL_FAILOK, elan3mmu, addr, PTBL_LEVEL_2, pl2lock, flags); -+ -+ ASSERT (ret == LK_PTBL_OK); -+ ASSERT (l2ptbl->ptbl_elan3mmu == elan3mmu); -+ ASSERT (l2ptbl->ptbl_parent->ptbl_elan3mmu == elan3mmu); -+ -+ l2ptp = PTBL_TO_PTADDR(l2ptbl); -+ -+ for (i = 0; i < ELAN3_L2_ENTRIES; i++, l2ptp += ELAN3_PTP_SIZE) -+ { -+ tl2ptp = elan3_readptp (dev, l2ptp); -+ switch (ELAN3_PTP_TYPE(tl2ptp)) -+ { -+ case ELAN3_ET_PTE: -+ lXptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tl2ptp); -+ idx = (PTP_TO_PT_PADDR(tl2ptp) - PTBL_TO_PTADDR(lXptbl))/ELAN3_PTE_SIZE; -+ -+ HAT_PRINTF3 (2, "elan3mmu_l2inval: l2ptbl %p : lXptbl %p idx %d\n", -+ l2ptbl, lXptbl, idx); -+ -+ /* invalidate the L2 pte. */ -+ elan3_writeptp (dev, l2ptp, invalidptp); -+ if (! (attr & PTE_UNLOAD_NOFLUSH)) -+ ElanFlushTlb (dev); -+ -+ l2ptbl->ptbl_valid--; -+ elan3mmu_free_pte ( dev, elan3mmu, lXptbl, idx); -+ -+ HAT_PRINTF3 (2, "elan3mmu_l2inval: dec valid for level %d ptbl %p to %d\n", PTBL_LEVEL(l2ptbl->ptbl_flags), l2ptbl, l2ptbl->ptbl_valid); -+ -+ break; -+ -+ case ELAN3_ET_PTP: -+ HAT_PRINTF5 (2, "elan3mmu_l2inval: l2ptbl %p : ptp %lx (%x) addr %x (%d)\n", -+ l2ptbl, l2ptp, tl2ptp, addr, i); -+ -+ /* invalidate the L2 ptp. */ -+ elan3_writeptp (dev, l2ptp, invalidptp); -+ if (! (attr & PTE_UNLOAD_NOFLUSH)) -+ ElanFlushTlb (dev); -+ -+ /* unload the level 3 page table */ -+ l3ptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tl2ptp); -+ ret = elan3mmu_l3inval (elan3mmu, l3ptbl, attr | PTE_UNLOAD_NOFLUSH, addr, &l3lock, &l3flags); -+ -+ if (ret == LK_PTBL_OK) -+ { -+ if ((l3ptbl->ptbl_flags & PTBL_KEEP) == 0 && l3ptbl->ptbl_valid == 0) -+ { -+ /* decrement the valid count of the level 2 page table, and */ -+ /* free off the level 3 page table */ -+ HAT_PRINTF1 (2, "elan3mmu_l2inval: free l3ptbl %p\n", l3ptbl); -+ -+ l2ptbl->ptbl_valid--; -+ elan3mmu_free_l3ptbl (elan3mmu->elan3mmu_dev, l3ptbl, l3lock, l3flags); -+ -+ HAT_PRINTF3 (2, "elan3mmu_l2inval: dec valid for level %d ptbl %p to %d\n", -+ PTBL_LEVEL(l2ptbl->ptbl_flags), l2ptbl, l2ptbl->ptbl_valid); -+ } -+ else -+ { -+ /* need to keep this page table, so even though its now empty, */ -+ /* chain it back in */ -+ HAT_PRINTF1 (2, "elan3mmu_l2inval: keep l3ptbl %p\n", l3ptbl); -+ -+ elan3_writeptp (dev, l2ptp, tl2ptp); -+ elan3mmu_unlock_ptbl (l3ptbl, l3lock, l3flags); -+ } -+ } -+ else -+ { -+ l2ptbl->ptbl_valid--; -+ -+ HAT_PRINTF3 (2, "elan3mmu_l2inval: dec valid for level %d ptbl %p to %d\n", -+ PTBL_LEVEL(l2ptbl->ptbl_flags), l2ptbl, l2ptbl->ptbl_valid); -+ } -+ break; -+ -+ case ELAN3_ET_INVALID: -+ break; -+ -+ default: -+ panic ("elan3mmu_l2inval: found pte in level 2 page table"); -+ /* NOTREACHED */ -+ } -+ -+ if (l2ptbl->ptbl_valid == 0) -+ break; -+ -+ addr += ELAN3_L2_SIZE; -+ } -+ -+ ASSERT (PTBL_IS_LOCKED(l2ptbl->ptbl_flags)); -+ -+ return (ret); -+} -+ -+int -+elan3mmu_l3inval (ELAN3MMU *elan3mmu, ELAN3_PTBL *l3ptbl, int attr, E3_Addr addr, spinlock_t **pl3lock, unsigned long *flags) -+{ -+ int ret; -+ -+ HAT_PRINTF3 (2, "elan3mmu_l3inval: l3ptbl %p parent %p addr %x\n", l3ptbl, l3ptbl->ptbl_parent, addr); -+ -+ ASSERT (PTBL_IS_LOCKED (l3ptbl->ptbl_parent->ptbl_flags)); -+ ASSERT (PTBL_LEVEL (l3ptbl->ptbl_parent->ptbl_flags) == PTBL_LEVEL_2); -+ ASSERT (l3ptbl->ptbl_parent->ptbl_elan3mmu == elan3mmu); -+ ASSERT (l3ptbl->ptbl_parent->ptbl_base == VA2BASE (ELAN3_L2_BASE(addr))); -+ -+ ret = elan3mmu_lock_ptbl (l3ptbl, LK_PTBL_FAILOK, elan3mmu, addr, PTBL_LEVEL_3, pl3lock, flags); -+ -+ ASSERT (ret == LK_PTBL_OK); -+ ASSERT (PTBL_LEVEL (l3ptbl->ptbl_flags) == PTBL_LEVEL_3); -+ -+ elan3mmu_unload_loop (elan3mmu, l3ptbl, 0, ELAN3_L3_ENTRIES, attr); -+ -+ ASSERT (PTBL_IS_LOCKED (l3ptbl->ptbl_flags)); -+ -+ return (ret); -+ } -+ -+int -+elan3mmu_lock_this_ptbl (ELAN3_PTBL *ptbl, int flag, spinlock_t **plock, unsigned long *flags) -+{ -+ int level = PTBL_LEVEL (ptbl->ptbl_flags); -+ spinlock_t *lock = elan3mmu_ptbl_to_lock (level, ptbl); -+ -+ local_irq_save (*flags); -+ -+ if ((flag & LK_PTBL_NOWAIT) == 0) -+ spin_lock (lock); -+ else if (! spin_trylock (lock)) { -+ local_irq_restore (*flags); -+ return (LK_PTBL_FAILED); -+ } -+ -+ if (level != PTBL_LEVEL (ptbl->ptbl_flags)) -+ { -+ spin_unlock (lock); -+ local_irq_restore (*flags); -+ return (LK_PTBL_MISMATCH); -+ } -+ -+ ptbl->ptbl_flags |= PTBL_LOCKED; -+ *plock = lock; -+ return (LK_PTBL_OK); -+} -+ -+int -+elan3mmu_lock_ptbl (ELAN3_PTBL *ptbl, u_int flag, ELAN3MMU *elan3mmu, E3_Addr va, int level, spinlock_t **plock, unsigned long *flags) -+{ -+ spinlock_t *lock = elan3mmu_ptbl_to_lock (level, ptbl); -+ int res = LK_PTBL_MISMATCH; -+ -+ local_irq_save (*flags); -+ -+ if ((flag & LK_PTBL_NOWAIT) == 0) -+ spin_lock (lock); -+ else if (spin_trylock (lock) == 0) { -+ local_irq_restore(*flags); -+ return (LK_PTBL_FAILED); -+ } -+ -+ if (PTBL_LEVEL (ptbl->ptbl_flags) != level) -+ { -+ res = LK_PTBL_MISMATCH; -+ goto mismatch; -+ } -+ -+ /* We have the right mutex, so check that its the ptbl we want. */ -+ switch (level) -+ { -+ case PTBL_LEVEL_1: va = ELAN3_L1_BASE(va); break; -+ case PTBL_LEVEL_2: va = ELAN3_L2_BASE(va); break; -+ case PTBL_LEVEL_3: va = ELAN3_L3_BASE(va); break; -+ } -+ -+ if (ptbl->ptbl_elan3mmu != elan3mmu || ptbl->ptbl_base != VA2BASE(va)) -+ { -+ res = LK_PTBL_MISMATCH; -+ goto mismatch; -+ } -+ -+ ASSERT ((ptbl->ptbl_flags & PTBL_LOCKED) == 0); -+ ptbl->ptbl_flags |= PTBL_LOCKED; -+ -+ *plock = lock; -+ return (LK_PTBL_OK); -+ -+mismatch: -+ if (! (flag & LK_PTBL_FAILOK)) -+ panic ("elan3mmu: failed to lock ptbl\n"); -+ -+ spin_unlock (lock); -+ local_irq_restore(*flags); -+ return (res); -+} -+ -+void -+elan3mmu_unlock_ptbl (ELAN3_PTBL *ptbl, spinlock_t *lock, unsigned long flags) -+{ -+ ptbl->ptbl_flags &= ~PTBL_LOCKED; -+ spin_unlock_irqrestore (lock,flags); -+} -+ -+static spinlock_t * -+elan3mmu_ptbl_to_lock (int level, ELAN3_PTBL *ptbl) -+{ -+ switch (level) -+ { -+ case PTBL_LEVEL_3: return (&l3ptbl_lock[L3PTBL_MTX_HASH(ptbl)]); -+ case PTBL_LEVEL_2: return (&l2ptbl_lock[L2PTBL_MTX_HASH(ptbl)]); -+ case PTBL_LEVEL_1: return (&l1ptbl_lock[L1PTBL_MTX_HASH(ptbl)]); -+ case PTBL_LEVEL_X: -+ panic ("elan3mmu: ptbl_to_lock, bad level X"); -+ default: -+ panic ("elan3mmu: ptbl_to_lock, bad level"); -+ /* NOTREACHED */ -+ } -+ return (NULL); -+} -+ -+void -+elan3mmu_display (ELAN3MMU *elan3mmu, E3_Addr addr) -+{ -+ ELAN3_DEV *dev = elan3mmu->elan3mmu_dev; -+ ELAN3_PTBL *l1ptbl; -+ sdramaddr_t l1ptp; -+ spinlock_t *l1lock; -+ ELAN3_PTE tl1pte; -+ ELAN3_PTP tl1ptp; -+ E3_Addr l1base; -+ ELAN3_PTBL *l2ptbl; -+ sdramaddr_t l2ptp; -+ ELAN3_PTE tl2pte; -+ spinlock_t *l2lock; -+ ELAN3_PTP tl2ptp; -+ E3_Addr l2base; -+ ELAN3_PTBL *l3ptbl; -+ sdramaddr_t l3pte; -+ ELAN3_PTE tl3pte; -+ spinlock_t *l3lock; -+ ELAN3_PTBL *lXptbl; -+ int idx; -+ unsigned long flags; -+ -+ elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: elan3mmu %p addr %x\n", elan3mmu, addr); -+ -+ l1ptbl = elan3mmu->elan3mmu_l1ptbl; -+ -+ if (l1ptbl == NULL) -+ return; -+ -+ l1ptp = PTBL_TO_PTADDR(l1ptbl) + ELAN3_L1_INDEX(addr)*ELAN3_PTP_SIZE; -+ l1base = ELAN3_L1_BASE(addr); -+ -+ tl1ptp = elan3_readptp (dev, l1ptp); -+ elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: l1ptbl %p l1ptp %lx l1base %x : tl1ptp %x\n", l1ptbl, l1ptp, l1base, tl1ptp); -+ -+ switch (ELAN3_PTP_TYPE(tl1ptp)) -+ { -+ case ELAN3_ET_PTE: -+ elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: level 1 page table for pte %x\n", tl1ptp); -+ -+ lXptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tl1ptp); -+ idx = (PTP_TO_PT_PADDR(tl1ptp) - PTBL_TO_PTADDR(lXptbl))/ELAN3_PTE_SIZE; -+ -+ elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: lXptbl %p idx %d\n",lXptbl, idx); -+ -+ tl1pte = elan3_readpte (dev,(PTBL_TO_PTADDR (lXptbl) + idx * ELAN3_PTE_SIZE)); -+ -+ switch (elan3mmu_lock_ptbl (l1ptbl, LK_PTBL_FAILOK, elan3mmu, addr, PTBL_LEVEL_1, &l1lock, &flags)) -+ { -+ case LK_PTBL_OK: -+ elan3mmu_unlock_ptbl (l1ptbl, l1lock, flags); -+ elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: lvl 1 l1pte matches value %llx\n", (long long) tl1pte); -+ break; -+ -+ case LK_PTBL_FAILED: -+ panic ("elan3mmu_display: l1 lock failed"); -+ /* NOTREACHED */ -+ -+ case LK_PTBL_MISMATCH: -+ elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: PTBL_MISMATCH : lvl 1 ptbl %p flags %x elan3mmu %p base %x (%p %x) %llx\n", -+ l1ptbl, l1ptbl->ptbl_flags, l1ptbl->ptbl_elan3mmu, l1ptbl->ptbl_base, elan3mmu, addr, (long long)tl1pte); -+ -+ break; -+ default: -+ panic ("elan3mmu_display: lvl 1 elan3mmu_lock_ptbl returned bad value"); -+ /* NOTREACHED */ -+ } -+ return; -+ -+ case ELAN3_ET_INVALID: -+ return; -+ -+ case ELAN3_ET_PTP: -+ break; -+ -+ default: -+ panic ("elan3mmu_display: found bad entry in level 1 page table"); -+ /* NOTREACHED */ -+ } -+ -+ elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: chain to level 2 ptbl from ptp %x\n", tl1ptp); -+ -+ l2ptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tl1ptp); -+ l2ptp = PTBL_TO_PTADDR(l2ptbl) + ELAN3_L2_INDEX(addr)*ELAN3_PTP_SIZE; -+ l2base = ELAN3_L2_BASE(addr); -+ -+ tl2ptp = elan3_readptp (dev, l2ptp); -+ elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: l2ptbl %p l2ptp %lx l2base %x : tl2ptp %x\n", -+ l2ptbl, l2ptp, l2base, tl2ptp); -+ -+ switch (ELAN3_PTP_TYPE(tl2ptp)) -+ { -+ case ELAN3_ET_PTE: -+ elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: level 2 page table for pte %x\n", tl2ptp); -+ -+ lXptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tl2ptp); -+ idx = (PTP_TO_PT_PADDR(tl2ptp) - PTBL_TO_PTADDR(lXptbl))/ELAN3_PTE_SIZE; -+ -+ elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: lXptbl %p idx %d\n",lXptbl, idx); -+ -+ tl2pte = elan3_readpte (dev,(PTBL_TO_PTADDR (lXptbl) + idx * ELAN3_PTE_SIZE)); -+ -+ switch (elan3mmu_lock_ptbl (l2ptbl, LK_PTBL_FAILOK, elan3mmu, addr, PTBL_LEVEL_2, &l2lock, &flags)) -+ { -+ case LK_PTBL_OK: -+ elan3mmu_unlock_ptbl (l2ptbl, l2lock, flags); -+ elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: lvl 2 l1pte matches value %llx\n", (long long)tl2pte); -+ break; -+ -+ case LK_PTBL_FAILED: -+ panic ("elan3mmu_display: l2 lock failed"); -+ /* NOTREACHED */ -+ -+ case LK_PTBL_MISMATCH: -+ elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: PTBL_MISMATCH : lvl 2 ptbl %p flags %x elan3mmu %p base %x (%p %x) %llx\n", -+ l2ptbl, l2ptbl->ptbl_flags, l2ptbl->ptbl_elan3mmu, l2ptbl->ptbl_base, elan3mmu, addr, (long long) tl2pte); -+ -+ break; -+ default: -+ panic ("elan3mmu_display: lvl 2 elan3mmu_lock_ptbl returned bad value"); -+ /* NOTREACHED */ -+ } -+ return; -+ -+ case ELAN3_ET_INVALID: -+ return; -+ -+ case ELAN3_ET_PTP: -+ break; -+ -+ default: -+ panic ("elan3mmu_display: found bad entry in level 2 page table"); -+ /* NOTREACHED */ -+ } -+ -+ elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: chain to level 3 page table from ptp %x\n", tl2ptp); -+ -+ l3ptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tl2ptp); -+ l3pte = PTBL_TO_PTADDR(l3ptbl) + ELAN3_L3_INDEX(addr)*ELAN3_PTE_SIZE; -+ -+ elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: l3ptbl %p l3pte %lx\n",l3ptbl, l3pte); -+ -+ tl3pte = elan3_readpte (dev, l3pte); -+ switch (elan3mmu_lock_ptbl (l3ptbl, LK_PTBL_FAILOK, elan3mmu, addr, PTBL_LEVEL_3, &l3lock, &flags)) -+ { -+ case LK_PTBL_OK: -+ elan3mmu_unlock_ptbl (l3ptbl, l3lock, flags); -+ elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: l3pte matches value %llx\n", (long long) tl3pte); -+ break; -+ -+ case LK_PTBL_FAILED: -+ panic ("elan3mmu_display: l3 lock failed"); -+ /* NOTREACHED */ -+ -+ case LK_PTBL_MISMATCH: -+ elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: PTBL_MISMATCH : ptbl %p flags %x elan3mmu %p base %x (%p %x) %llx\n", -+ l3ptbl, l3ptbl->ptbl_flags, l3ptbl->ptbl_elan3mmu, l3ptbl->ptbl_base, elan3mmu, addr, (long long) tl3pte); -+ -+ break; -+ -+ default: -+ panic ("elan3mmu_display: elan3mmu_lock_ptbl returned bad value"); -+ /* NOTREACHED */ -+ } -+} -+ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/elan3/elan3mmu_linux.c linux-2.6.9/drivers/net/qsnet/elan3/elan3mmu_linux.c ---- clean/drivers/net/qsnet/elan3/elan3mmu_linux.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan3/elan3mmu_linux.c 2004-12-14 05:19:38.000000000 -0500 -@@ -0,0 +1,284 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: elan3mmu_linux.c,v 1.53 2004/12/14 10:19:38 mike Exp $" -+/* $Source: /cvs/master/quadrics/elan3mod/elan3/vm/elan3mmu_linux.c,v $*/ -+ -+#include -+#include -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+/* -+ * Strategy for syncing main <-> elan pte's: -+ * -+ * Install callbacks for linux flush_tlb_page(), flush_tlb_range(), -+ * flush_tlb_all(), and flush_tlb_mm() so when a main PTE changes, -+ * the elan translations, if any, are invalidated. They can then be -+ * faulted in again with the correct physical page, perms, etc., on demand. -+ * -+ * Callbacks are stacked on the mm_struct, one per context. We also stack -+ * a ctxt pointer so we don't have to do lookups on every call. -+ * -+ * Sanity check -- we clearly want to flush the elan PTEs in these -+ * situations, all of which are covered by tlb_flush_{page,range}() -+ * -+ * 1) kernel/vmscan.c::try_to_swap_out() swaps out a page -+ * -+ * 2) kernel/mremap.c::copy_one_pte() moves a page as a result of the -+ * mremap system call -+ * -+ * 3) kernel/mprotect.c::change_pte_range() changes the permissions of a -+ * page as the result of the mprotect system call -+ * -+ * Other Notes: -+ * -+ * Dirty a page in the mains page tables when it is faulted into the elan. -+ * This way it will not be thrown away by the swapper. -+ * -+ * Pages write protected for COW are copied by elan3mmu_main_pagefault() -+ * when a writeable translation is loaded into the elan. -+ */ -+ -+caddr_t elan3mmu_kernel_invalid_space; -+ELAN3_PTE elan3mmu_kernel_invalid_pte_val; -+ -+void -+elan3mmu_init_osdep (void) -+{ -+ pte_t *pte; -+ -+ KMEM_GETPAGES (elan3mmu_kernel_invalid_space, caddr_t, 1, TRUE); -+ -+ ASSERT(elan3mmu_kernel_invalid_space != NULL); -+ -+ pte = find_pte_kernel ((unsigned long) elan3mmu_kernel_invalid_space); -+ -+ elan3mmu_kernel_invalid_pte_val = ELAN3_PTE_64_BIT | (pte_phys(*pte) & ELAN3_PTE_PFN_MASK) | ELAN3_PERM_REMOTEREAD | ELAN3_ET_PTE; -+ -+#ifdef __alpha -+ /* -+ * NOTE: Elan sign-extends bit 48 of the physical address, so if we need to -+ * set any of bits 63:48, then we will set them all by setting bit 48/ -+ */ -+ if (alpha_mv.pci_dac_offset & 0xFFFF000000000000ull) -+ elan3mmu_kernel_invalid_pte_val |= (1ull << 48); -+ else -+ elan3mmu_kernel_invalid_pte_val |= alpha_mv.pci_dac_offset; -+#endif -+ -+ HAT_PRINTF(0x10, "elan3mmu_invalid_space at %p phys=%llx pte=%llx\n", elan3mmu_kernel_invalid_space, -+ (unsigned long long) pte_phys(*pte), (unsigned long long) elan3mmu_kernel_invalid_pte_val); -+} -+ -+void -+elan3mmu_fini_osdep() -+{ -+ KMEM_FREEPAGES (elan3mmu_kernel_invalid_space, 1); -+} -+ -+void -+elan3mmu_alloc_osdep (ELAN3MMU *elan3mmu) -+{ -+ elan3mmu->elan3mmu_coproc_mm = current->mm; -+} -+ -+/* -+ * Convert physical page frame number to elan pte. -+ */ -+ELAN3_PTE -+elan3mmu_phys_to_pte (ELAN3_DEV *dev, physaddr_t paddr, int perm) -+{ -+ ELAN3_PTE newpte; -+ -+ ASSERT (paddr != 0); -+ -+ if ((paddr & dev->SdramPhysMask) == dev->SdramPhysBase) /* SDRAM, turn on PTE_LOCAL bit */ -+ { -+ PRINTF(NULL, DBG_HAT, "elan3mmu_phys_to_pte: phys %llx SDRAM\n", (unsigned long long) paddr); -+ -+ newpte = ELAN3_PTE_LOCAL | (paddr & ELAN3_PTE_PFN_MASK & ~dev->SdramPhysMask) | perm | ELAN3_ET_PTE; -+ } -+#if defined(LINUX_ALPHA) -+ else if ((paddr & dev->PciPhysMask) == dev->PciPhysBase) -+ { -+ PRINTF(NULL, DBG_HAT, "elan3mmu_phys_to_pte: phys %llx PCI\n", (unsigned long long) paddr); -+ newpte = ELAN3_PTE_64_BIT | (paddr & ELAN3_PTE_PFN_MASK & ~dev->PciPhysMask) | perm | ELAN3_ET_PTE; -+ } -+#endif -+ else /* main memory, must convert to PCI view */ -+ { -+ PRINTF(NULL, DBG_HAT, "elan3mmu_phys_to_pte: phys %llx is main memory\n", (unsigned long long) paddr); -+ -+ /* main memory, just set the architecture specific PTE_BYPASS bit */ -+ /* This requires the Tsunami chipset being programmed to support -+ * the monster window option. This is in linux-2.4.5 and later kernels -+ * and is also patched into the RH 7.1/2.4.3-12 Alpha kernel -+ */ -+ newpte = ELAN3_PTE_64_BIT | (paddr & ELAN3_PTE_PFN_MASK) | perm | ELAN3_ET_PTE; -+ -+#ifdef __alpha -+ /* -+ * NOTE: Elan sign-extends bit 48 of the physical address, so if we need to -+ * set any of bits 63:48, then we will set them all by setting bit 48/ -+ */ -+ if (alpha_mv.pci_dac_offset & 0xFFFF000000000000ull) -+ newpte |= (1ull << 48); -+ else -+ newpte |= alpha_mv.pci_dac_offset; -+#endif -+ } -+ -+ if ( ELAN3_PERM_WRITEABLE( perm )) -+ newpte |= ( ELAN3_PTE_MOD | ELAN3_PTE_REF ); -+ else -+ newpte |= ( ELAN3_PTE_REF ) ; -+ -+ return (newpte); -+} -+ -+ELAN3_PTE -+elan3mmu_kernel_invalid_pte (ELAN3MMU *elan3mmu) -+{ -+ if (elan3mmu->elan3mmu_dev->Devinfo.dev_revision_id == PCI_REVISION_ID_ELAN3_REVB) -+ return (elan3mmu_kernel_invalid_pte_val); -+ return (ELAN3_INVALID_PTE); -+} -+ -+/* -+ * Invalidate a range of addresses for specified context. -+ */ -+void -+elan3mmu_pte_range_unload (ELAN3MMU *elan3mmu, struct mm_struct *mm, caddr_t addr, unsigned long len) -+{ -+ E3_Addr eaddr; -+ ELAN3MMU_RGN *rgn; -+ unsigned long span; -+ -+ spin_lock (&elan3mmu->elan3mmu_lock); -+ -+ for (; len; len -= span, addr += span) -+ { -+ rgn = elan3mmu_findrgn_main (elan3mmu, addr, 0); -+ -+ if (rgn == NULL || (rgn->rgn_mbase + rgn->rgn_len) < addr) -+ span = len; -+ else if (rgn->rgn_mbase > addr) -+ span = MIN(len, rgn->rgn_mbase - addr); -+ else -+ { -+ span = MIN(len, (rgn->rgn_mbase + rgn->rgn_len) - addr); -+ eaddr = rgn->rgn_ebase + (addr - rgn->rgn_mbase); -+ -+ HAT_PRINTF(0x10, " unloading eaddr %x main %p (%ld pages)\n", -+ eaddr, addr, btopr(span)); -+ elan3mmu_unload (elan3mmu, eaddr, span, PTE_UNLOAD); -+ } /* takes care of elan tlb flush also */ -+ } -+ -+ spin_unlock (&elan3mmu->elan3mmu_lock); -+} -+ -+/* -+ * -+ */ -+void -+elan3mmu_update_range (ELAN3MMU *elan3mmu, struct mm_struct *mm, caddr_t vaddr, E3_Addr eaddr, u_int len, u_int perm) -+{ -+ u_int roperm = ELAN3_PERM_READONLY(perm & ELAN3_PTE_PERM_MASK) | (perm & ~ELAN3_PTE_PERM_MASK); -+ u_int off; -+ -+ HAT_PRINTF3(1, "elan3mmu_update_range (elan3mmu %p addr %p -> %p)\n", elan3mmu, vaddr, vaddr+len-1); -+ -+ while (len > 0) -+ { -+ pte_t *pte_ptr; -+ pte_t pte_value; -+ -+ pte_ptr = find_pte_map(mm, (unsigned long)vaddr); -+ if (pte_ptr) { -+ pte_value = *pte_ptr; -+ pte_unmap(pte_ptr); -+ } -+ -+ HAT_PRINTF(0x10, " elan3mmu_update_range %x (%p) %s\n", eaddr, vaddr, -+ !pte_ptr ? "invalid" : pte_none(pte_value) ? "none " : !pte_present(pte_value) ? "swapped " : -+ !pte_write(pte_value) ? "RO/COW" : "OK"); -+ -+ if (pte_ptr && !pte_none(pte_value) && pte_present(pte_value)) -+ for (off = 0; off < PAGE_SIZE; off += ELAN3_PAGE_SIZE) -+ elan3mmu_pteload (elan3mmu, PTBL_LEVEL_3, eaddr + off, pte_phys(pte_value) + off, pte_write(pte_value) ? perm : roperm, PTE_LOAD|PTE_NO_SLEEP|PTE_NO_STEAL); -+ vaddr += PAGESIZE; -+ eaddr += PAGESIZE; -+ len -= PAGESIZE; -+ } -+} -+ -+/* -+ * Update a range of addresses for specified context. -+ */ -+void -+elan3mmu_pte_range_update (ELAN3MMU *elan3mmu, struct mm_struct *mm,caddr_t vaddr, unsigned long len) -+{ -+ E3_Addr eaddr; -+ ELAN3MMU_RGN *rgn; -+ unsigned long span; -+ -+ spin_lock (&elan3mmu->elan3mmu_lock); -+ -+ for (; len; len -= span, vaddr += span) -+ { -+ rgn = elan3mmu_findrgn_main (elan3mmu, vaddr, 0); -+ -+ if (rgn == NULL || (rgn->rgn_mbase + rgn->rgn_len) < vaddr) -+ span = len; -+ else if (rgn->rgn_mbase > vaddr) -+ span = MIN(len, rgn->rgn_mbase - vaddr); -+ else -+ { -+ span = MIN(len, (rgn->rgn_mbase + rgn->rgn_len) - vaddr); -+ eaddr = rgn->rgn_ebase + (vaddr - rgn->rgn_mbase); -+ -+ HAT_PRINTF(0x10, " updating eaddr %u main %p (%ld pages)\n", -+ eaddr, vaddr, btopr(span)); -+ -+ elan3mmu_update_range(elan3mmu, mm, vaddr, eaddr, span, rgn->rgn_perm); -+ } -+ } -+ -+ spin_unlock (&elan3mmu->elan3mmu_lock); -+} -+ -+/* -+ * Invalidate all ptes for the given context. -+ */ -+void -+elan3mmu_pte_ctxt_unload(ELAN3MMU *elan3mmu) -+{ -+ ELAN3_PTBL *l1ptbl = (elan3mmu ? elan3mmu->elan3mmu_l1ptbl : NULL); -+ spinlock_t *l1mtx; -+ unsigned long flags; -+ -+ if (l1ptbl && elan3mmu_lock_ptbl (l1ptbl, LK_PTBL_FAILOK, elan3mmu, (E3_Addr) 0, 1, &l1mtx, &flags) == LK_PTBL_OK) -+ { -+ elan3mmu_l1inval(elan3mmu, elan3mmu->elan3mmu_l1ptbl, 0); -+ elan3mmu_unlock_ptbl (l1ptbl, l1mtx, flags); -+ } -+} -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/elan3/elan3ops.c linux-2.6.9/drivers/net/qsnet/elan3/elan3ops.c ---- clean/drivers/net/qsnet/elan3/elan3ops.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan3/elan3ops.c 2003-09-24 09:57:25.000000000 -0400 -@@ -0,0 +1,170 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: elan3ops.c,v 1.4 2003/09/24 13:57:25 david Exp $" -+/* $Source: /cvs/master/quadrics/elan3mod/elan3/os/elan3ops.c,v $*/ -+ -+#include -+#include -+ -+#include -+#include -+#include -+ -+extern ELAN_STATS_OPS elan3_device_stats_ops; -+ -+ELAN_DEV_OPS elan3_dev_ops = { -+ -+ get_position, -+ set_position, -+ -+ ELAN_DEV_OPS_VERSION -+}; -+ -+ELAN_STATS_OPS elan3_device_stats_ops = { -+ ELAN_STATS_OPS_VERSION, -+ -+ stats_get_index_name, -+ stats_get_block, -+ stats_clear_block -+}; -+ -+static char *elan3_device_stats_names[ELAN3_NUM_STATS] = -+{ -+ "version field", /* not cleared */ -+ "elan interrupts", -+ "tlb flushes", -+ "traps with invalid context", -+ "interrupts com queue half full", -+ "cproc traps", -+ "dproc traps", -+ "tproc traps", -+ "iproc traps", -+ "event interrupts", -+ "elan page faults", -+ "EopBadAcks", -+ "EopResets", -+ "InputterBadLength", -+ "InputterCRCDiscards", -+ "InputterCRCErrors", -+ "InputterCRCBad", -+ "errors in dma data", -+ "errors after dma identify", -+ "errors after thread identify", -+ "dma retries", -+ "dma output timeouts", -+ "dma packet ack errors", -+ "forced tproc traps", -+ "too many instruction traps", -+ "output timeouts", -+ "packet ack errors", -+ "LockError", -+ "DeskewError", -+ "PhaseError", -+ "DataError", -+ "FifoOvFlow0", -+ "FifoOvFlow1", -+ "link error value on data error", -+ "correctable ecc errors", -+ "uncorrectable ecc errors", -+ "multiple ecc errors", -+ "sdram bytes free", /* not cleared */ -+ "longest interrupt in ticks", -+ "punts of event int's to thread", -+ "reschedules of event int's thread" -+}; -+ -+int -+stats_get_index_name (void *arg, uint index, caddr_t name) -+{ -+ copyout (elan3_device_stats_names[index], name, strlen (elan3_device_stats_names[index]) + 1 /* with \0 */); -+ -+ return (0); -+} -+ -+int -+stats_get_block (void *arg, uint entries, ulong *value) -+{ -+ ELAN3_DEV *dev = (ELAN3_DEV *) arg; -+ -+ if ( entries > ELAN3_NUM_STATS ) /* if space too big only send valid portion */ -+ entries = ELAN3_NUM_STATS; -+ -+ copyout(&dev->Stats, value, sizeof(ulong) * entries); -+ -+ return (0); -+} -+ -+int -+stats_clear_block (void *arg) -+{ -+ ELAN3_DEV *dev = (ELAN3_DEV *) arg; -+ u_long *ptr = (u_long *) &dev->Stats; -+ int n; -+ -+ for (n = 0; n < ELAN3_NUM_STATS; n++) -+ { -+ switch (n) -+ { -+ case offsetof (ELAN3_STATS, Version)/sizeof(u_long): -+ case offsetof (ELAN3_STATS, SdramBytesFree)/sizeof(u_long): -+ break; -+ default: -+ ptr[n] = (ulong)0; -+ } -+ } -+ return (0); -+} -+ -+int -+get_position (void *user_data, ELAN_POSITION *position) -+{ -+ ELAN3_DEV *dev = (ELAN3_DEV *)user_data; -+ -+ copyout(&dev->Position, position, sizeof(ELAN_POSITION)); -+ -+ return (0); -+} -+ -+int -+set_position (void *user_data, unsigned short nodeId, unsigned short numNodes) -+{ -+ ELAN3_DEV *dev = (ELAN3_DEV *)user_data; -+ -+ if (ComputePosition (&dev->Position, nodeId, numNodes, dev->Devinfo.dev_num_down_links_value) != 0) -+ return (EINVAL); -+ -+ return (0); -+} -+ -+int -+elan3_register_dev_stats(ELAN3_DEV * dev) -+{ -+ char name[ELAN_STATS_NAME_MAX_LEN+1]; -+ -+ sprintf (name, ELAN3_STATS_DEV_FMT, dev->Instance); -+ -+ elan_stats_register(&dev->StatsIndex, -+ name, -+ sizeof (elan3_device_stats_names)/sizeof (elan3_device_stats_names[0]), -+ &elan3_device_stats_ops, -+ (void *)dev); -+ -+ return (0); -+} -+ -+void -+elan3_deregister_dev_stats(ELAN3_DEV * dev) -+{ -+ elan_stats_deregister(dev->StatsIndex); -+} -+ -+/* -+ * Local variables: -+ * c-file-style: "linux" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/elan3/elandebug.c linux-2.6.9/drivers/net/qsnet/elan3/elandebug.c ---- clean/drivers/net/qsnet/elan3/elandebug.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan3/elandebug.c 2003-09-24 09:57:25.000000000 -0400 -@@ -0,0 +1,151 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: elandebug.c,v 1.25 2003/09/24 13:57:25 david Exp $" -+/* $Source: /cvs/master/quadrics/elan3mod/elan3/os/elandebug.c,v $*/ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+ -+void -+elan3_debugf (void *p, unsigned int mode, char *fmt,...) -+{ -+ char prefix[128]; -+ -+#if defined (DIGITAL_UNIX) -+#define PREFIX_FMT "[%lx.%08x]" -+#define PREFIX_VAL (int)CURTHREAD() -+#else -+#define PREFIX_FMT "[%lx.%04d]" -+#define PREFIX_VAL (current->pid) -+#endif -+ -+ if ((unsigned long) p > DBG_NTYPES) -+ { -+ ELAN3_CTXT *ctxt = (ELAN3_CTXT *) p; -+ -+ if (elan3_debug_display_ctxt && (ctxt->Capability.cap_mycontext & MAX_ROOT_CONTEXT_MASK) != elan3_debug_display_ctxt) -+ return; -+ if (elan3_debug_ignore_ctxt && (ctxt->Capability.cap_mycontext & MAX_ROOT_CONTEXT_MASK) == elan3_debug_ignore_ctxt) -+ return; -+ -+ if (ctxt->Capability.cap_mycontext == ELAN_CAP_UNINITIALISED) -+ sprintf (prefix, PREFIX_FMT " (XXX) ", lbolt, PREFIX_VAL); -+ else -+ sprintf (prefix, PREFIX_FMT " (%03x) ", lbolt, PREFIX_VAL, -+ ctxt->Capability.cap_mycontext & MAX_ROOT_CONTEXT_MASK); -+ } -+ else -+ { -+ char *what; -+ -+ if (elan3_debug_ignore_dev & (1 << ((unsigned long) p))) -+ return; -+ -+ switch ((unsigned long) p) -+ { -+ case (int) DBG_DEVICE: what = "dev"; break; -+ case (int) DBG_KCOMM: what = "kcm"; break; -+ case (int) DBG_ICS: what = "ics"; break; -+ case (int) DBG_USER: what = "usr"; break; -+ default: what = NULL; break; -+ } -+ -+ if (what) -+ sprintf (prefix, PREFIX_FMT " [%s] ", lbolt, PREFIX_VAL, what); -+ else -+ sprintf (prefix, PREFIX_FMT " [%3d] ", lbolt, PREFIX_VAL, (int)(long)what); -+ } -+ -+ { -+ va_list ap; -+ -+ va_start (ap, fmt); -+ qsnet_vdebugf ((((mode & elan3_debug_buffer)?QSNET_DEBUG_BUFFER:0)|((mode & elan3_debug_console)?QSNET_DEBUG_CONSOLE:0)) , prefix, fmt, ap); -+ va_end (ap); -+ } -+} -+ -+ -+void -+elan3_alloc_panicstate (ELAN3_DEV *dev, int allocsdram) -+{ -+ register int bank; -+ -+ if (dev->PanicState.RegPtr == NULL) -+ KMEM_ZALLOC (dev->PanicState.RegPtr, E3_Regs *, sizeof (E3_Regs), 1); -+ -+ if (allocsdram) -+ for (bank = 0; bank < ELAN3_SDRAM_NUM_BANKS; bank++) -+ if (dev->PanicState.Sdram[bank] == NULL && dev->SdramBanks[bank].Size) -+ KMEM_ZALLOC (dev->PanicState.Sdram[bank], char *, dev->SdramBanks[bank].Size, 1); -+} -+ -+void -+elan3_free_panicstate (ELAN3_DEV *dev) -+{ -+ register int bank; -+ -+ if (dev->PanicState.RegPtr != NULL) -+ KMEM_FREE (dev->PanicState.RegPtr, sizeof (E3_Regs)); -+ -+ for (bank = 0; bank < ELAN3_SDRAM_NUM_BANKS; bank++) -+ if (dev->PanicState.Sdram[bank] != NULL && dev->SdramBanks[bank].Size) -+ KMEM_FREE (dev->PanicState.Sdram[bank], dev->SdramBanks[bank].Size); -+ -+ bzero (&dev->PanicState, sizeof (dev->PanicState)); -+} -+ -+void -+elan3_save_panicstate (ELAN3_DEV *dev) -+{ -+ register int bank; -+ -+ if (dev->PanicState.RegPtr) -+ { -+ printk ("elan%d: saving state on panic .....\n", dev->Devinfo.dev_instance); -+ -+ bcopy ((void *) dev->RegPtr, (void *) dev->PanicState.RegPtr, sizeof (E3_Regs)); -+ -+ for (bank = 0; bank < ELAN3_SDRAM_NUM_BANKS; bank++) -+ if (dev->SdramBanks[bank].Size && dev->PanicState.Sdram[bank]) -+ elan3_sdram_copyq_from_sdram (dev, (bank << ELAN3_SDRAM_BANK_SHIFT), dev->PanicState.Sdram[bank], dev->SdramBanks[bank].Size); -+ -+ } -+} -+ -+int -+elan3_assfail (ELAN3_DEV *dev, char *string, char *file, int line) -+{ -+ if (panicstr) -+ return (0); -+ -+ printk ("elan: assertion failed '%s' File '%s' Line %d\n", string, file, line); -+ -+#if defined(LINUX) -+ elan3_save_panicstate (dev); -+ -+ panic ("elan: assertion failed '%s' File '%s' Line %d\n", string, file, line); -+#else -+ cmn_err (CE_PANIC, "elan: assertion failed '%s' File '%s' Line %d\n", string, file, line); -+#endif -+ /*NOTREACHED*/ -+ return (0); -+} -+ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/elan3/elandev_generic.c linux-2.6.9/drivers/net/qsnet/elan3/elandev_generic.c ---- clean/drivers/net/qsnet/elan3/elandev_generic.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan3/elandev_generic.c 2005-07-20 07:35:36.000000000 -0400 -@@ -0,0 +1,1867 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: elandev_generic.c,v 1.115.2.2 2005/07/20 11:35:36 mike Exp $" -+/* $Source: /cvs/master/quadrics/elan3mod/elan3/os/elandev_generic.c,v $*/ -+ -+#include -+#include -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+/* -+ * Module globals, configurable from system file. -+ */ -+u_int elan3_debug = 0; -+u_int elan3_debug_console = 0; -+u_int elan3_debug_buffer = -1; -+u_int elan3_debug_ignore_dev = 0; -+u_int elan3_debug_ignore_kcomm = 0; -+u_int elan3_debug_ignore_ctxt = 0; -+u_int elan3_debug_display_ctxt = 0; -+ -+int eventint_punt_loops; -+int eventint_punt_ticks; -+int eventint_resched_ticks; -+ -+static void InitialiseDmaBuffers (ELAN3_DEV *dev, ioaddr_t CmdPort); -+static int ProbeSdram (ELAN3_DEV *dev); -+static void InitialiseSdram (ELAN3_DEV *dev); -+static void ReEnableErrorInterrupts (void *arg); -+void PollForDmaHungup (void *arg); -+static void elan3_event_interrupt (ELAN3_DEV *dev); -+ -+/* -+ * BaseAddr is ptr to the start of a table aligned on a power of two byte address. -+ * SizePower must be in the range of 6 to 12. It defines the number of valid contexts as -+ * shown below. -+ * -+ * SizePower Valid Contexts Table size in bytes. -+ * 6 64 1k -+ * 7 128 2k -+ * 8 256 4K -+ * 9 512 8k -+ * 10 1024 16k -+ * 11 2048 32k -+ * 12 4096 64k -+ */ -+#define GEN_CONTEXT_PTR(BaseAddr, SizePower) (((E3_uint32) BaseAddr) | \ -+ (~((1 << ((SizePower) - 6)) - 1) & 0x3f)) -+ -+int -+InitialiseElan (ELAN3_DEV *dev, ioaddr_t CmdPort) -+{ -+ E3_IprocTrapHeader_BE TrapCleanup[4]; -+ E3_ContextControlBlock ContextControlBlock; -+ sdramaddr_t ptr; -+ int res; -+ int i; -+ -+ eventint_punt_loops = 100; -+ eventint_punt_ticks = (hz/100); -+ eventint_resched_ticks = (hz/4); -+ -+ dev->Stats.Version = ELAN3_STATS_VERSION; -+ dev->Position.pos_mode = ELAN_POS_UNKNOWN; -+ -+ /* -+ * The elan should have already been reset, so the interrupt mask -+ * should be 0 and the schedule status register should be set to -+ * its initial state -+ */ -+ ASSERT (dev->InterruptMask == 0); -+ ASSERT ((read_reg32 (dev, Exts.SchCntReg) & HaltStopAndExtTestMask) == Sched_Initial_Value); -+ -+ /* -+ * Write any value here to clear out the half full and error bits of the command -+ * overflow queues. -+ */ -+ write_reg32 (dev, ComQueueStatus, 0); -+ -+ /* Initialise the cache tags before touching the SDRAM */ -+ /* we initialise them to "map" the bottom of SDRAM */ -+ for (i = 0; i < E3_NumCacheLines; i++) -+ { -+ write_cache_tag (dev, Tags[i][0].Value, 0x0000000000000000ULL); -+ write_cache_tag (dev, Tags[i][1].Value, 0x0000080000000000ULL); -+ write_cache_tag (dev, Tags[i][2].Value, 0x0000100000000000ULL); -+ write_cache_tag (dev, Tags[i][3].Value, 0x0000180000000000ULL); -+ } -+ -+#ifndef CONFIG_MPSAS -+ for (i = 0; i < E3_NumCacheLines*(E3_CACHELINE_SIZE/sizeof(E3_uint64)); i++) -+ { -+ write_cache_set (dev, Set0[i], 0xcac1ecac1ecac1e0ULL); -+ write_cache_set (dev, Set1[i], 0xcac1ecac1ecac1e1ULL); -+ write_cache_set (dev, Set2[i], 0xcac1ecac1ecac1e2ULL); -+ write_cache_set (dev, Set3[i], 0xcac1ecac1ecac1e3ULL); -+ } -+#endif -+ -+ if ((res = ProbeSdram(dev)) != ESUCCESS) -+ return (res); -+ -+ /* Enable all cache sets before initialising the sdram allocators */ -+ write_reg32 (dev, Cache_Control_Reg.ContReg, (dev->Cache_Control_Reg |= CONT_EN_ALL_SETS)); -+ -+ InitialiseSdram (dev); -+ -+ dev->TAndQBase = elan3_sdram_alloc (dev, ELAN3_TANDQ_SIZE); -+ dev->ContextTable = elan3_sdram_alloc (dev, ELAN3_CONTEXT_SIZE); -+ dev->ContextTableSize = ELAN3_NUM_CONTEXTS; -+ dev->CommandPortTraps[0] = elan3_sdram_alloc (dev, ELAN3_COMMAND_TRAP_SIZE); -+ dev->CommandPortTraps[1] = elan3_sdram_alloc (dev, ELAN3_COMMAND_TRAP_SIZE); -+ dev->CurrentCommandPortTrap = 0; -+ -+ PRINTF3 (DBG_DEVICE, DBG_CONFIG, "InitialiseElan: ContextTable %08lx TAndQ %08lx CommandPortTrap %08lx\n", -+ dev->ContextTable, dev->TAndQBase, dev->CommandPortTraps[0]); -+ -+ /* Allocate the thread amd dma trap areas */ -+ KMEM_ZALLOC (dev->ThreadTrap, THREAD_TRAP *, sizeof (THREAD_TRAP), TRUE); -+ KMEM_ZALLOC (dev->DmaTrap, DMA_TRAP *, sizeof (DMA_TRAP), TRUE); -+ -+ /* Allocate the ctxt table */ -+ KMEM_ZALLOC (dev->CtxtTable, ELAN3_CTXT **, dev->ContextTableSize * sizeof ( ELAN3_CTXT *), TRUE); -+ -+ /* Initialise halt queue list */ -+ dev->HaltOperationsTailpp = &dev->HaltOperations; -+ -+ /* From elan3/code/harness/elanstuff.c */ -+ /* Init the clock. */ -+ write_ureg64 (dev, Clock.NanoSecClock, 0); -+ -+ /* Init the instruction count reg. */ -+ write_ureg32 (dev, InstCount.s.StatsCount, 0); -+ -+ /* Init the stats control reg. Must be done before the count regs.*/ -+ write_ureg32 (dev, StatCont.StatsControl, 0); -+ -+ /* Init the stats count regs. */ -+ write_ureg32 (dev, StatCounts[0].s.StatsCount, 0); -+ write_ureg32 (dev, StatCounts[1].s.StatsCount, 0); -+ write_ureg32 (dev, StatCounts[2].s.StatsCount, 0); -+ write_ureg32 (dev, StatCounts[3].s.StatsCount, 0); -+ write_ureg32 (dev, StatCounts[4].s.StatsCount, 0); -+ write_ureg32 (dev, StatCounts[5].s.StatsCount, 0); -+ write_ureg32 (dev, StatCounts[6].s.StatsCount, 0); -+ write_ureg32 (dev, StatCounts[7].s.StatsCount, 0); -+ -+ /* -+ * Initialise the Context_Ptr and Fault_Base_Ptr -+ */ -+ write_reg32 (dev, Fault_Base_Ptr, dev->TAndQBase + offsetof(E3_TrapAndQueue, IProcSysCntx)); -+ write_reg32 (dev, Context_Ptr, GEN_CONTEXT_PTR (dev->ContextTable, ELAN3_LN2_NUM_CONTEXTS)); -+ -+ /* scrub the TProc Registers */ -+ for (i = 0; i < 8; i++) -+ write_reg32 (dev, Globals[i], 0xdeadbabe); -+ for (i = 0; i < 8; i++) -+ write_reg32 (dev, Outs[i], 0xdeadbabe); -+ for (i = 0; i < 8; i++) -+ write_reg32 (dev, Locals[i], 0xdeadbabe); -+ for (i = 0; i < 8; i++) -+ write_reg32 (dev, Ins[i], 0xdeadbabe); -+ -+ /* -+ * Initialise the Queue pointers. Arrange them so that the starting positions are -+ * farthest apart in one set of the cache. Thus 512 bytes apart, but with cntx0 -+ * thread the same as the interrupt queue. -+ */ -+ write_reg32 (dev, TProc_NonSysCntx_FPtr, dev->TAndQBase + offsetof (E3_TrapAndQueue, NonSysCntxThreadQueue[0xc0])); -+ write_reg32 (dev, TProc_NonSysCntx_BPtr, dev->TAndQBase + offsetof (E3_TrapAndQueue, NonSysCntxThreadQueue[0xc0])); -+ write_reg32 (dev, TProc_SysCntx_FPtr, dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxThreadQueue[0x80])); -+ write_reg32 (dev, TProc_SysCntx_BPtr, dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxThreadQueue[0x80])); -+ -+ write_reg32 (dev, DProc_NonSysCntx_FPtr, dev->TAndQBase + offsetof (E3_TrapAndQueue, NonSysCntxDmaQueue[0])); -+ write_reg32 (dev, DProc_NonSysCntx_BPtr, dev->TAndQBase + offsetof (E3_TrapAndQueue, NonSysCntxDmaQueue[0])); -+ write_reg32 (dev, DProc_SysCntx_FPtr, dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxDmaQueue[0x10])); -+ write_reg32 (dev, DProc_SysCntx_BPtr, dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxDmaQueue[0x10])); -+ -+ dev->Event_Int_Queue_FPtr = dev->TAndQBase + offsetof (E3_TrapAndQueue, EventIntQueue[0x80]); -+ write_reg32 (dev, Event_Int_Queue_FPtr, dev->Event_Int_Queue_FPtr); -+ write_reg32 (dev, Event_Int_Queue_BPtr, dev->TAndQBase + offsetof (E3_TrapAndQueue, EventIntQueue[0x80])); -+ -+ -+ /* Initialise Input_Trap_Base to last 8 Kbytes of trap area, uCode adds the right offset */ -+ write_reg32 (dev, Input_Trap_Base, dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxThreadQueue[0])); -+ -+ /* Ptr to word used to save the SP to when a thread deschedules */ -+ write_reg32 (dev, Thread_SP_Save_Ptr, dev->TAndQBase + offsetof (E3_TrapAndQueue, Thread_SP_Save)); -+ -+ /* Initialise the command trap base */ -+ write_reg32 (dev, CProc_TrapSave_Addr, dev->CommandPortTraps[0]); -+ -+ /* Initialise the set event tracing registers */ -+ write_reg32 (dev, Event_Trace_Ptr, 0); -+ write_reg32 (dev, Event_Trace_Mask, 0); -+ -+ /* Initialise Tlb_Line_Value to zero. The TLB cannot be read while either the */ -+ /* uCode or thread proc might be running. Must be set to 0. */ -+ write_reg64 (dev, Tlb_Line_Value, 0); -+ -+ /* Control register. Cache everything, Enable MMU, RefreshRate=3, CasLatency=1, StartSDR */ -+ dev->Cache_Control_Reg |= CONT_MMU_ENABLE | CONT_EN_ALL_SETS | CONT_CACHE_ALL | CONT_ENABLE_ECC; -+ -+#if ELAN3_PAGE_SHIFT == 13 -+ dev->Cache_Control_Reg |= CONT_ENABLE_8K_PAGES; -+#endif -+ -+ write_reg32 (dev, Cache_Control_Reg.ContReg, dev->Cache_Control_Reg); -+ -+ /* -+ * Initialise the context table to be discard for all contexts -+ */ -+ ContextControlBlock.rootPTP = 0; -+ ContextControlBlock.filter = E3_CCB_DISCARD_ALL; -+ ContextControlBlock.VPT_mask = 0; -+ ContextControlBlock.VPT_ptr = 0; -+ -+ for (i = 0, ptr = dev->ContextTable; i < ELAN3_NUM_CONTEXTS; i++, ptr += sizeof (E3_ContextControlBlock)) -+ elan3_sdram_copyl_to_sdram (dev, &ContextControlBlock, ptr, sizeof (E3_ContextControlBlock)); -+ -+ /* From elan3/code/trap_handler/init.c */ -+ /* -+ * Initialise the Trap And Queue area in Elan SDRAM. -+ */ -+ TrapCleanup[0].s.TrTypeCntx.TypeContext = 0; -+ TrapCleanup[0].s.TrAddr = 0; -+ TrapCleanup[0].s.IProcTrapStatus.Status = CRC_STATUS_GOOD; -+ TrapCleanup[0].s.TrData0 = 0; -+ TrapCleanup[1].s.TrTypeCntx.TypeContext = 0; -+ TrapCleanup[1].s.TrAddr = 0; -+ TrapCleanup[1].s.IProcTrapStatus.Status = CRC_STATUS_GOOD; -+ TrapCleanup[1].s.TrData0 = 0; -+ TrapCleanup[2].s.TrTypeCntx.TypeContext = 0; -+ TrapCleanup[2].s.TrAddr = 0; -+ TrapCleanup[2].s.IProcTrapStatus.Status = CRC_STATUS_GOOD; -+ TrapCleanup[2].s.TrData0 = 0; -+ TrapCleanup[3].s.TrTypeCntx.TypeContext = 0; -+ TrapCleanup[3].s.TrAddr = 0; -+ TrapCleanup[3].s.IProcTrapStatus.Status = CRC_STATUS_GOOD; -+ TrapCleanup[3].s.TrData0 = 0; -+ -+ elan3_sdram_writel (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, IProcSysCntx.s.FaultContext), 0); -+ elan3_sdram_writel (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, IProcSysCntx.s.FSR.Status), 0); -+ elan3_sdram_writel (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, IProcNonSysCntx.s.FaultContext), 0); -+ elan3_sdram_writel (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, IProcNonSysCntx.s.FSR.Status), 0); -+ -+ /* Must now zero all the FSRs so that a subsequent Fault can be seen */ -+ elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, CProc), 16); -+ -+ elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProc), 16); -+ elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData0), 64); -+ -+ elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, TProc), 16); -+ elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, ThreadProcData), 16); -+ elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, ThreadProcInst), 16); -+ elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, ThreadProcOpen), 16); -+ -+ elan3_sdram_copyq_to_sdram (dev, TrapCleanup, dev->TAndQBase + offsetof (E3_TrapAndQueue, VCh0_C0_TrHead[0]), 64); -+ elan3_sdram_copyq_to_sdram (dev, TrapCleanup, dev->TAndQBase + offsetof (E3_TrapAndQueue, VCh1_C0_TrHead[0]), 64); -+ -+ elan3_sdram_copyq_to_sdram (dev, TrapCleanup, dev->TAndQBase + offsetof (E3_TrapAndQueue, VCh0_NonC0_TrHead[0]), 64); -+ elan3_sdram_copyq_to_sdram (dev, TrapCleanup, dev->TAndQBase + offsetof (E3_TrapAndQueue, VCh1_NonC0_TrHead[0]), 64); -+ -+ InitialiseDmaBuffers(dev, CmdPort); -+ -+ /* reserve a halt operation for flushing the context filter */ -+ ReserveHaltOperations (dev, 1, TRUE); -+ -+ /* Allow the Thread/Dma to run */ -+ CLEAR_SCHED_STATUS (dev, HaltThread | HaltDmas); -+ -+ /* Enable All Interrrupts */ -+ SET_INT_MASK (dev, (INT_PciMemErr | INT_SDRamInt | INT_EventInterrupt | INT_LinkError | INT_ComQueue | -+ INT_TProc | INT_CProc | INT_DProc | INT_IProcCh1NonSysCntx | -+ INT_IProcCh1SysCntx | INT_IProcCh0NonSysCntx | INT_IProcCh0SysCntx)); -+ -+ /* Take the link out of boundary scan */ -+ SET_SCHED_LINK_VALUE (dev, 0, 0); -+ -+ /* And clear any link errors */ -+ PULSE_SCHED_STATUS (dev, ClearLinkErrorInt); -+ -+ /* XXXX: clear discard context 0, AFTER setting up the kernel comms */ -+ CLEAR_SCHED_STATUS (dev, DiscardSysCntxIn | DiscardNonSysCntxIn); -+ -+ /* Start a thread to handle excessive Event Interrrupts */ -+ if (kernel_thread_create (elan3_event_interrupt, (caddr_t) dev) == NULL) -+ { -+ panic ("InitialiseElan: cannot start elan3_event_interrupt\n"); -+ return (EFAIL); -+ } -+ dev->EventInterruptThreadStarted = 1; -+ -+ ReserveHaltOperations (dev, 1, TRUE); -+ -+ PollForDmaHungup (dev); -+ -+#if defined(IOPROC_PATCH_APPLIED) && !defined(NO_PTRACK) -+ /* Register the device and stats with elanmod for RMS -+ * but only if we've got the coproc patch applied */ -+ dev->DeviceIdx = elan_dev_register(&dev->Devinfo, &elan3_dev_ops, (void *) dev); -+ -+ elan3_register_dev_stats(dev); -+#endif -+ -+ return (ESUCCESS); -+} -+ -+static void -+InitialiseDmaBuffers(ELAN3_DEV *dev, ioaddr_t CmdPort) -+{ -+ register int i; -+ -+ /* GNAT sw-elan3/3908: -+ * Clear down the power on state of the Dma_Desc registers to make sure we don't -+ * try and interpret them when a trap happens. -+ */ -+ write_reg32 (dev, Dma_Desc.dma_type, 0); -+ write_reg32 (dev, Dma_Desc.dma_size, 0); -+ write_reg32 (dev, Dma_Desc.dma_source, 0); -+ write_reg32 (dev, Dma_Desc.dma_dest, 0); -+ write_reg32 (dev, Dma_Desc.dma_destEvent, 0); -+ write_reg32 (dev, Dma_Desc.dma_destCookieVProc, 0); -+ write_reg32 (dev, Dma_Desc.dma_srcEvent, 0); -+ write_reg32 (dev, Dma_Desc.dma_srcCookieVProc, 0); -+ -+ /* -+ * The following is a sequence of writes to remove X's from the dma buffers and -+ * registers. It is only safe to write these registers after reset and before any -+ * dma's have been issued. The chip will NOT function corectly if they are written at -+ * any other time or in a different order. -+ */ -+ write_reg64 (dev, Exts.Dmas.DmaWrs.LdAlignment, 0); -+ write_reg64 (dev, Exts.Dmas.DmaWrs.LdDmaType, 0); -+ write_reg64 (dev, Exts.Dmas.DmaWrs.ResetAckNLdBytesToWr, ((u_longlong_t)0x1000) << 32); -+ write_reg64 (dev, Exts.Dmas.DmaWrs.LdBytesToRd, ((u_longlong_t)0x100) << 32); -+ -+ for (i=0;i<(4*8);i++) -+ write_reg64 (dev, Dma_Alignment_Port[0], 0); -+ -+ /* -+ * This is used to clear out X's from some of the trap registers. This is required to -+ * prevent the first traps from possibly writting X's into the SDram and upsetting the -+ * ECC value. It requires that the trap save area registers have been set up but does -+ * not require any translations to be ready. -+ */ -+ writel (-1, (void *)(CmdPort + offsetof (E3_CommandPort, SetEvent))); -+ while ((read_reg32 (dev, Exts.InterruptReg) & INT_CProc) == 0) -+ { -+ mb(); -+ DELAY (1); -+ } -+ -+ write_reg32 (dev, CProc_TrapSave_Addr, dev->CommandPortTraps[dev->CurrentCommandPortTrap]); -+ -+ PULSE_SCHED_STATUS(dev, RestartCProc); -+} -+ -+void -+FinaliseElan (ELAN3_DEV *dev) -+{ -+ ELAN3_PTBL_GR *ptg; -+ ELAN3_HALTOP *op; -+ ELAN3_HALTOP *chain = NULL; -+ int bank; -+ int indx; -+ int size; -+ unsigned long flags; -+ int level; -+ -+#if defined(IOPROC_PATCH_APPLIED) && !defined(NO_PTRACK) -+ elan_stats_deregister (dev->StatsIndex); -+ elan_dev_deregister(&dev->Devinfo); -+#endif -+ -+ /* Cancel the dma poller */ -+ cancel_timer_fn (&dev->DmaPollTimeoutId); -+ -+ /* release it's halt operation */ -+ ReleaseHaltOperations (dev, 1); -+ -+ /* stop all kernel threads */ -+ dev->ThreadsShouldStop = 1; -+ -+ spin_lock_irqsave (&dev->IntrLock, flags); -+ while (dev->EventInterruptThreadStarted && !dev->EventInterruptThreadStopped) -+ { -+ kcondvar_wakeupall (&dev->IntrWait, &dev->IntrLock); -+ kcondvar_wait (&dev->IntrWait, &dev->IntrLock, &flags); -+ } -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+ -+ /* Set the interrupt mask to 0 and the schedule control register to run nothing */ -+ SET_INT_MASK (dev, 0); -+ SET_SCHED_STATUS (dev, DiscardNonSysCntxIn | DiscardSysCntxIn | HaltThread | HaltDmas); -+ -+ /* Cancel any link error timeout */ -+ if (timer_fn_queued(&dev->ErrorTimeoutId)) -+ cancel_timer_fn (&dev->ErrorTimeoutId); -+ -+ /* Free of and page tables that have been allocated */ -+ spin_lock (&dev->PtblGroupLock); -+ for(level=0; level<4; level++) -+ { -+ while ((ptg = dev->Level[level].PtblGroupList) != NULL) -+ { -+ dev->Level[level].PtblGroupList = ptg->pg_next; -+ -+ elan3_sdram_free (dev, ptg->pg_addr, PTBL_GROUP_SIZE); -+ FREE_PTBL_GR(ptg); -+ } -+ } -+ -+ spin_unlock (&dev->PtblGroupLock); -+ -+ /* Free of all halt operations */ -+ spin_lock_irqsave (&dev->FreeHaltLock, flags); -+ while ((op = dev->FreeHaltOperations) != NULL) -+ { -+ dev->FreeHaltOperations = op->Next; -+ -+ /* Keep a list of 'freed' ops for later KMEM_FREE call */ -+ op->Next = chain; -+ chain = op; -+ } -+ spin_unlock_irqrestore (&dev->FreeHaltLock, flags); -+ -+ /* Have now dropped the spinlock - can call KMEM_FREE */ -+ while ((op = chain) != NULL) -+ { -+ chain = op->Next; -+ -+ KMEM_FREE (op, sizeof (ELAN3_HALTOP)); -+ } -+ -+ /* Free of the ctxt table */ -+ KMEM_FREE (dev->CtxtTable, dev->ContextTableSize * sizeof (ELAN3_CTXT *)); -+ -+ /* Free of the thread and dma atrap areas */ -+ KMEM_FREE (dev->ThreadTrap, sizeof (THREAD_TRAP)); -+ KMEM_FREE (dev->DmaTrap, sizeof (DMA_TRAP)); -+ -+ /* Free of the memsegs and pages */ -+ for (bank = 0; bank < ELAN3_SDRAM_NUM_BANKS; bank++) -+ { -+ if (dev->SdramBanks[bank].Size) -+ { -+ UnmapDeviceRegister (dev, &dev->SdramBanks[bank].Handle); -+ -+ KMEM_FREE (dev->SdramBanks[bank].PtblGroups, sizeof (ELAN3_PTBL_GR *) * (dev->SdramBanks[bank].Size / PTBL_GROUP_SIZE)); -+ -+ for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; size <= dev->SdramBanks[bank].Size; indx++, size <<= 1) -+ KMEM_FREE (dev->SdramBanks[bank].Bitmaps[indx], sizeof (bitmap_t)*BT_BITOUL(dev->SdramBanks[bank].Size/size)); -+ } -+ } -+ elan3_sdram_fini (dev); -+} -+ -+#define INIT_PATTERN(offset) (0xBEEC000000000011ull | ((u_longlong_t)(offset)) << 16) -+#define FREE_PATTERN(offset) (0xBEEC000000000022ull | ((u_longlong_t)(offset)) << 16) -+ -+static int -+ProbeSdram (ELAN3_DEV *dev) -+{ -+ int Instance; -+ u_int Bank; -+ int MemSpaceSize; -+ int BankMaxSize; -+ int BankOffset; -+ int BankSize; -+ ioaddr_t BankBase; -+ ioaddr_t PageBase; -+ ioaddr_t PageBase1; -+ ioaddr_t PageBase2; -+ DeviceMappingHandle BankHandle; -+ DeviceMappingHandle PageHandle; -+ DeviceMappingHandle PageHandle1; -+ DeviceMappingHandle PageHandle2; -+ register int i; -+ u_longlong_t value; -+ extern int sdram_bank_limit; -+ -+ /* NOTE: The Cache control register is set to only enable cache set 0 */ -+ /* and has ECC disabled */ -+ Instance = dev->Instance; -+ -+ /* Determine the size of the SDRAM from the BAR register */ -+ if (DeviceRegisterSize (dev, ELAN3_BAR_SDRAM, &MemSpaceSize) != ESUCCESS) -+ { -+ printk ("elan%d: cannot determine SDRAM size\n", Instance); -+ return (EFAIL); -+ } -+ -+ elan3_sdram_init (dev); -+ -+ BankMaxSize = MemSpaceSize / ELAN3_SDRAM_NUM_BANKS; -+ -+ for (Bank = 0; Bank < ELAN3_SDRAM_NUM_BANKS; Bank++) -+ { -+ BankOffset = Bank * BankMaxSize; -+ -+ PRINTF3 (DBG_DEVICE, DBG_CONFIG, "elan%d: Probing RAM Bank %d (max size %08x)\n", Instance, Bank, BankMaxSize); -+ -+ /* Probe the memory bank by mapping two pages that are the size of the cache apart */ -+ /* this guarantees that when we store the second pattern we displace the first pattern */ -+ /* from the cache, also store the second pattern again the size of the cache up again */ -+ /* to ensure that the SDRAM wires don't stay floating at pattern1 */ -+ -+ if (MapDeviceRegister (dev, ELAN3_BAR_SDRAM, &BankBase, BankOffset, PAGESIZE, &BankHandle) != ESUCCESS) -+ { -+ printk ("elan%d: Cannot probe memory bank %d\n", Instance, Bank); -+ continue; -+ } -+ -+ if (MapDeviceRegister (dev, ELAN3_BAR_SDRAM, &PageBase1, BankOffset + ELAN3_MAX_CACHE_SIZE, PAGESIZE, &PageHandle1) != ESUCCESS) -+ { -+ printk ("elan%d: Cannot probe memory bank %d\n", Instance, Bank); -+ UnmapDeviceRegister (dev, &BankHandle); -+ continue; -+ } -+ -+ if (MapDeviceRegister (dev, ELAN3_BAR_SDRAM, &PageBase2, BankOffset + 2*ELAN3_MAX_CACHE_SIZE, PAGESIZE, &PageHandle2) != ESUCCESS) -+ { -+ printk ("elan%d: Cannot probe memory bank %d\n", Instance, Bank); -+ UnmapDeviceRegister (dev, &BankHandle); -+ UnmapDeviceRegister (dev, &PageHandle1); -+ continue; -+ } -+ -+#define PATTERN0 (0x5555555555555555ull) -+#define PATTERN1 (0xAAAAAAAAAAAAAAAAull) -+ writeq (PATTERN0, (u_longlong_t *) BankBase); -+ writeq (PATTERN1, (u_longlong_t *) PageBase1); -+ writeq (PATTERN1, (u_longlong_t *) PageBase2); -+ -+ mmiob(); -+ -+ value = readq ((u_longlong_t *) BankBase); -+ -+ if (value != PATTERN0) -+ { -+ UnmapDeviceRegister (dev, &BankHandle); -+ UnmapDeviceRegister (dev, &PageHandle1); -+ UnmapDeviceRegister (dev, &PageHandle2); -+ continue; -+ } -+ -+ writeq (PATTERN1, (u_longlong_t *) BankBase); -+ writeq (PATTERN0, (u_longlong_t *) PageBase1); -+ writeq (PATTERN0, (u_longlong_t *) PageBase2); -+ -+ mmiob(); -+ -+ value = readq ((u_longlong_t *) BankBase); -+ if (value != PATTERN1) -+ { -+ UnmapDeviceRegister (dev, &BankHandle); -+ UnmapDeviceRegister (dev, &PageHandle1); -+ UnmapDeviceRegister (dev, &PageHandle2); -+ continue; -+ } -+ UnmapDeviceRegister (dev, &PageHandle1); -+ UnmapDeviceRegister (dev, &PageHandle2); -+ -+ /* Bank is present, so work out its size, we store tha maximum size at the base */ -+ /* and then store the address at each address on every power of two address until */ -+ /* we reach the minimum mappable size (PAGESIZE), we then read back the value at the */ -+ /* base to determine the bank size */ -+ writeq ((u_longlong_t) BankMaxSize, (u_longlong_t *) BankBase); -+ -+ for (BankSize = (BankMaxSize>>1); BankSize > PAGESIZE; BankSize >>= 1) -+ { -+ if (MapDeviceRegister (dev, ELAN3_BAR_SDRAM, &PageBase, BankOffset + BankSize, PAGESIZE, &PageHandle) == ESUCCESS) -+ { -+ writeq (BankSize, (u_longlong_t *) PageBase); -+ UnmapDeviceRegister (dev, &PageHandle); -+ } -+ } -+ mmiob(); -+ -+ BankSize = (u_long) readq ((u_longlong_t *) BankBase); -+ -+ if (sdram_bank_limit == 0 || BankSize <= (sdram_bank_limit * 1024 * 1024)) -+ printk ("elan%d: memory bank %d is %dK\n", Instance, Bank, BankSize / 1024); -+ else -+ { -+ BankSize = (sdram_bank_limit * 1024 * 1024); -+ printk ("elan%d: limit memory bank %d to %dK\n", Instance, Bank, BankSize / 1024); -+ } -+ -+ UnmapDeviceRegister (dev, &BankHandle); -+ -+ /* Now map all of this bank into the kernel */ -+ if (MapDeviceRegister (dev, ELAN3_BAR_SDRAM, &BankBase, BankOffset, BankSize, &BankHandle) != ESUCCESS) -+ { -+ printk ("elan%d: Cannot initialise memory bank %d\n", Instance, Bank); -+ continue; -+ } -+ -+ dev->SdramBanks[Bank].Size = BankSize; -+ dev->SdramBanks[Bank].Mapping = BankBase; -+ dev->SdramBanks[Bank].Handle = BankHandle; -+ -+#ifndef CONFIG_MPSAS -+ /* Initialise it for ECC */ -+ preemptable_start { -+ for (i = 0; i < BankSize; i += 8) -+ { -+ elan3_sdram_writeq (dev, (Bank << ELAN3_SDRAM_BANK_SHIFT) | i, INIT_PATTERN(BankOffset+i)); -+ -+ preemptable_check(); -+ } -+ } preemptable_end; -+#endif -+ } -+ -+ return (ESUCCESS); -+} -+ -+static void -+InitialiseSdram (ELAN3_DEV *dev) -+{ -+ int indx, size, b; -+ -+ for (b = 0; b < ELAN3_SDRAM_NUM_BANKS; b++) -+ { -+ ELAN3_SDRAM_BANK *bank = &dev->SdramBanks[b]; -+ -+ if (bank->Size == 0) -+ continue; -+ -+ /* allocate a ptbl group pointer for each possible ptbl group in this bank */ -+ KMEM_ZALLOC (bank->PtblGroups, ELAN3_PTBL_GR **, sizeof (ELAN3_PTBL_GR *) * bank->Size/PTBL_GROUP_SIZE, TRUE); -+ -+ /* allocate the buddy allocator bitmaps */ -+ for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; size <= bank->Size; indx++, size <<= 1) -+ KMEM_ZALLOC (bank->Bitmaps[indx], bitmap_t *, sizeof (bitmap_t)*BT_BITOUL(bank->Size/size), TRUE); -+ -+ /* and add it to the sdram buddy allocator */ -+ elan3_sdram_add (dev, (b << ELAN3_SDRAM_BANK_SHIFT), (b << ELAN3_SDRAM_BANK_SHIFT) + bank->Size); -+ } -+} -+ -+#include -+ -+int -+ReadVitalProductData (ELAN3_DEV *dev, int *CasLatency) -+{ -+ DeviceMappingHandle RomHandle; -+ unsigned char *RomBase; -+ unsigned char *PCIDataPtr; -+ unsigned char *VPDPtr; -+ unsigned char *lim; -+ int type; -+ int i, len, len2; -+ char name[3] = "XX"; -+ char value[256]; -+ int finished = 0; -+ -+ -+ /* default valud for CAS latency is 3 */ -+ (*CasLatency) = CAS_LATENCY_3; -+ -+ if (MapDeviceRegister (dev, ELAN3_BAR_EBUS, (ioaddr_t *) &RomBase, ELAN3_EBUS_ROM_OFFSET, ELAN3_EBUS_ROM_SIZE, &RomHandle) != ESUCCESS) -+ { -+ printk ("elan%d: Cannot map ROM\n", dev->Instance); -+ return (EFAIL); -+ } -+ -+ /* Check the ROM signature */ -+ if (RomBase[0] != 0x55 || RomBase[1] != 0xAA) -+ { -+ printk ("elan%d: Invalid ROM signature %02x %02x\n", dev->Instance, RomBase[0], RomBase[1]); -+ return (ESUCCESS); -+ } -+ -+ PCIDataPtr = RomBase + ((RomBase[0x19] << 8) | RomBase[0x18]); -+ -+ /* check the pci data structure */ -+ if (PCIDataPtr[0] != 'P' || PCIDataPtr[1] != 'C' || PCIDataPtr[2] != 'I' || PCIDataPtr[3] != 'R') -+ { -+ printk ("elan%d: Invalid PCI Data structure\n", dev->Instance); -+ return (ESUCCESS); -+ } -+ -+ /* Extract the VPD pointer */ -+ VPDPtr = RomBase + ((PCIDataPtr[9] << 8) | PCIDataPtr[8]); -+ -+ if (VPDPtr == RomBase) -+ { -+ printk ("elan%d: No Vital Product Data\n", dev->Instance); -+ return (ESUCCESS); -+ } -+ -+ while (! finished) -+ { -+ type = *VPDPtr++; -+ -+ if (type & LARGE_RESOURCE_BIT) -+ { -+ len = *(VPDPtr++); -+ len += *(VPDPtr++) << 8; -+ -+ switch (type & ~LARGE_RESOURCE_BIT) -+ { -+ case LARGE_RESOURCE_STRING: -+ printk ("elan%d: ", dev->Instance); -+ for (i = 0; i < len; i++) -+ printk ("%c", *VPDPtr++); -+ printk ("\n"); -+ break; -+ -+ case LARGE_RESOURCE_VENDOR_DEFINED: -+ VPDPtr += len; -+ break; -+ -+ case LARGE_RESOURCE_VITAL_PRODUCT_DATA: -+ for (lim = VPDPtr + len; VPDPtr < lim; ) -+ { -+ name[0] = *VPDPtr++; -+ name[1] = *VPDPtr++; -+ len2 = *VPDPtr++; -+ -+ for (i = 0; i < len2 && VPDPtr < lim; i++) -+ value[i] = *VPDPtr++; -+ value[i] = '\0'; -+ -+ if (! strcmp (name, "SN")) -+ printk ("elan%d: Serial Number - %s\n", dev->Instance, value); -+ -+ if (! strcmp (name, "Z0")) -+ (*CasLatency) = (strcmp (value, "CAS_LATENCY_2") ? CAS_LATENCY_3 : CAS_LATENCY_2); -+ } -+ break; -+ -+ default: -+ printk ("elan%d: unknown large resource %x\n", dev->Instance, type); -+ finished = 1; -+ break; -+ } -+ } -+ else -+ { -+ len = type & 0x7; -+ -+ switch (type >> 3) -+ { -+ case SMALL_RESOURCE_COMPATIBLE_DEVICE_ID: -+ VPDPtr += len; -+ break; -+ -+ case SMALL_RESOURCE_VENDOR_DEFINED: -+ VPDPtr += len; -+ break; -+ -+ case SMALL_RESOURCE_END_TAG: -+ finished = 1; -+ break; -+ -+ default: -+ printk ("elan%d: unknown small resource %x\n", dev->Instance, type >> 3); -+ finished = 1; -+ break; -+ } -+ } -+ } -+ -+ UnmapDeviceRegister (dev, &RomHandle); -+ return (ESUCCESS); -+} -+ -+void -+ElanSetPtblGr (ELAN3_DEV *dev, sdramaddr_t offset, ELAN3_PTBL_GR *ptg) -+{ -+ int bank = offset >> ELAN3_SDRAM_BANK_SHIFT; -+ -+ dev->SdramBanks[bank].PtblGroups[(offset & (ELAN3_SDRAM_BANK_SIZE-1)) / PTBL_GROUP_SIZE] = ptg; -+} -+ -+ELAN3_PTBL_GR * -+ElanGetPtblGr (ELAN3_DEV *dev, sdramaddr_t offset) -+{ -+ int bank = offset >> ELAN3_SDRAM_BANK_SHIFT; -+ -+ return (dev->SdramBanks[bank].PtblGroups[(offset & (ELAN3_SDRAM_BANK_SIZE-1)) / PTBL_GROUP_SIZE]); -+} -+ -+void -+ElanFlushTlb (ELAN3_DEV *dev) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave (&dev->TlbLock, flags); -+ BumpStat (dev, TlbFlushes); -+ -+ write_reg32 (dev, Cache_Control_Reg.ContReg, dev->Cache_Control_Reg | MMU_FLUSH); -+ mmiob(); -+ spin_unlock_irqrestore (&dev->TlbLock, flags); -+ -+ while (! (read_reg32 (dev, Cache_Control_Reg.ContReg) & MMU_FLUSHED)) -+ mb(); -+} -+ -+void -+KillNegativeDma (ELAN3_DEV *dev, void *arg) -+{ -+ DMA_TRAP *trap = dev->DmaTrap; -+ E3_Status_Reg status; -+ sdramaddr_t FPtr, BPtr; -+ sdramaddr_t Base, Top; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&dev->IntrLock, flags); -+ -+ ASSERT (read_reg32 (dev, Exts.InterruptReg) & INT_DProcHalted); -+ -+ /* Initialise the trap to deliver to the offending user process */ -+ trap->Status.Status = read_reg32 (dev, Exts.DProcStatus.Status); -+ trap->PacketInfo.Value = 0; -+ -+ bzero (&trap->FaultSave, sizeof (trap->FaultSave)); -+ bzero (&trap->Data0, sizeof (trap->Data0)); -+ bzero (&trap->Data1, sizeof (trap->Data1)); -+ bzero (&trap->Data2, sizeof (trap->Data2)); -+ bzero (&trap->Data3, sizeof (trap->Data3)); -+ -+ /* run down the kernel dma run queue and panic on a -ve length dma */ -+ FPtr = read_reg32 (dev, DProc_SysCntx_FPtr); -+ BPtr = read_reg32 (dev, DProc_SysCntx_BPtr); -+ Base = dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxDmaQueue[0]); -+ Top = dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxDmaQueue[E3_SysCntxQueueSize-1]); -+ -+ while (FPtr != BPtr) -+ { -+ elan3_sdram_copyq_from_sdram (dev, FPtr, &trap->Desc, sizeof (E3_DMA_BE)); -+ -+ if (trap->Desc.s.dma_size > E3_MAX_DMA_SIZE) -+ panic ("KillNegativeDma: -ve sized kernel dma\n"); -+ -+ FPtr = (FPtr == Top) ? Base : FPtr + sizeof (E3_DMA); -+ } -+ -+ /* run down the user dma run queue and "remove" and -ve length dma's */ -+ FPtr = read_reg32 (dev, DProc_NonSysCntx_FPtr); -+ BPtr = read_reg32 (dev, DProc_NonSysCntx_BPtr); -+ Base = dev->TAndQBase + offsetof (E3_TrapAndQueue, NonSysCntxDmaQueue[0]); -+ Top = dev->TAndQBase + offsetof (E3_TrapAndQueue, NonSysCntxDmaQueue[E3_NonSysCntxQueueSize-1]); -+ -+ while (FPtr != BPtr) -+ { -+ elan3_sdram_copyq_from_sdram (dev, FPtr, &trap->Desc, sizeof (E3_DMA_BE)); -+ -+ if (trap->Desc.s.dma_size > E3_MAX_DMA_SIZE) -+ { -+ PRINTF3 (NULL, DBG_INTR, "KillNegativeDma: remove dma - context %d size %d SuspendAddr %x\n", -+ trap->Desc.s.dma_u.s.Context, trap->Desc.s.dma_size, trap->Status.s.SuspendAddr); -+ -+ trap->Status.s.TrapType = trap->Status.s.SuspendAddr; -+ trap->Status.s.Context = trap->Desc.s.dma_u.s.Context; -+ -+ DeliverDProcTrap (dev, trap, 0); -+ -+ /* -+ * Remove the DMA from the queue by replacing it with one with -+ * zero size and no events. -+ * -+ * NOTE: we must preserve the SYS_CONTEXT_BIT since the Elan uses this -+ * to mark the approriate run queue as empty. -+ */ -+ trap->Desc.s.dma_type = 0; -+ trap->Desc.s.dma_size = 0; -+ trap->Desc.s.dma_source = (E3_Addr) 0; -+ trap->Desc.s.dma_dest = (E3_Addr) 0; -+ trap->Desc.s.dma_destCookieVProc = (E3_Addr) 0; -+ trap->Desc.s.dma_srcEvent = (E3_Addr) 0; -+ trap->Desc.s.dma_srcCookieVProc = (E3_Addr) 0; -+ -+ elan3_sdram_copyq_to_sdram (dev, &trap->Desc, FPtr, sizeof (E3_DMA_BE)); -+ } -+ -+ FPtr = (FPtr == Top) ? Base : FPtr + sizeof (E3_DMA); -+ } -+ -+ status.Status = read_reg32 (dev, Exts.DProcStatus.Status); -+ -+ if (status.s.SuspendAddr == MI_DequeueNonSysCntxDma || -+ status.s.SuspendAddr == MI_DequeueSysCntxDma || -+ status.s.SuspendAddr == MI_DmaLoop) -+ { -+ PRINTF0 (NULL, DBG_INTR, "KillNegativeDma: unlock dma processor\n"); -+ write_reg32 (dev, Exts.Dmas.DmaWrs.LdAlignment, 0); -+ write_reg32 (dev, Exts.Dmas.DmaWrs.LdDmaType, 0); -+ mmiob(); -+ -+ DELAY (10); -+ -+ write_reg32 (dev, Exts.Dmas.DmaWrs.LdAlignment, 0); -+ write_reg32 (dev, Exts.Dmas.DmaWrs.LdDmaType, 0); -+ mmiob(); -+ } -+ -+ PRINTF0 (NULL, DBG_INTR, "KillNegativeDma: dma processor restarted\n"); -+ -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+ -+ schedule_timer_fn (&dev->DmaPollTimeoutId, PollForDmaHungup, (void *) dev, 1); -+} -+ -+void -+ForceTProcTrap (ELAN3_DEV *dev, void *arg) -+{ -+ printk ("elan%d: forced tproc trap .....\n", dev->Instance); -+ -+ schedule_timer_fn (&dev->DmaPollTimeoutId, PollForDmaHungup, (void *) dev, 1); -+} -+ -+void -+PollForDmaHungup (void *arg) -+{ -+ ELAN3_DEV *dev = (ELAN3_DEV *) arg; -+ unsigned long flags; -+ E3_Status_Reg status; -+ E3_uint32 insn1, insn3; -+ register int i; -+ -+ if (read_reg32 (dev, Dma_Desc.dma_size) > E3_MAX_DMA_SIZE) -+ { -+ status.Status = read_reg32 (dev, Exts.DProcStatus); -+ -+ PRINTF2 (NULL, DBG_INTR, "PollForDmaHungup: size %x SuspendAddr %x\n", read_reg32 (dev, Dma_Desc.dma_size), status.s.SuspendAddr); -+ -+ if (status.s.SuspendAddr == MI_DequeueNonSysCntxDma || -+ status.s.SuspendAddr == MI_DequeueSysCntxDma || -+ status.s.SuspendAddr == MI_DmaLoop) -+ { -+ printk ("elan%d: PollForDmaHungup: size %x context %d SuspendAddr %x\n", -+ dev->Instance, read_reg32 (dev, Dma_Desc.dma_size), -+ status.s.Context, status.s.SuspendAddr); -+ -+ PRINTF2 (NULL, DBG_INTR, "PollForDmaHungup: dma_size %x status %x\n", -+ read_reg32 (dev, Dma_Desc.dma_size), status.Status); -+ -+ spin_lock_irqsave (&dev->IntrLock, flags); -+ QueueHaltOperation (dev, 0, NULL, INT_DProcHalted, KillNegativeDma, NULL); -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+ -+ return; -+ } -+ } -+ -+ status.Status = read_reg32 (dev, Exts.TProcStatus); -+ if (status.s.WakeupFunction == WakeupStopped) -+ { -+ E3_uint32 PC = read_reg32 (dev, ExecutePC); -+ -+ /* See if it's likely that the thread is really "stuck" on a waitevent/break -+ * instruction ......... */ -+ for (i = 0; i < 10; i++) -+ { -+ status.Status = read_reg32 (dev, Exts.TProcStatus); -+ insn1 = read_reg32 (dev, IBufferReg[1]); -+ insn3 = read_reg32 (dev, IBufferReg[3]); -+ -+ if (! (status.s.WakeupFunction == WakeupStopped && read_reg32 (dev, ExecutePC) == PC && /* stopping and it could be a break/waitevent */ -+ (insn1 == 0x81a00000 || insn3 == 0x81a00000 || /* break instruction */ -+ insn1 == 0x81b00000 || insn3 == 0x81b00000))) /* waitevent instruction */ -+ break; -+ } -+ -+ if (i == 10) -+ { -+ printk ("elan%d: forcing tproc trap from %s instruction at pc %x\n", dev->Instance, -+ (insn1 == 0x81a00000 || insn3 == 0x81a00000) ? "break" : "waitevent", PC); -+ -+ spin_lock_irqsave (&dev->IntrLock, flags); -+ QueueHaltOperation (dev, 0, NULL, INT_TProcHalted, ForceTProcTrap, NULL); -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+ return; -+ } -+ } -+ -+ schedule_timer_fn (&dev->DmaPollTimeoutId, PollForDmaHungup, (void *) dev, 10); -+} -+ -+/*=======================================================================================*/ -+/* -+ * Interrupt handler. -+ */ -+static void -+ReEnableErrorInterrupts (void *arg) -+{ -+ ELAN3_DEV *dev = (ELAN3_DEV *) arg; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&dev->IntrLock, flags); -+ -+ if ((dev->SchCntReg & LinkBoundaryScan) == 0) -+ ENABLE_INT_MASK (dev, INT_ErrorInterrupts); -+ -+ PRINTF1 (DBG_DEVICE, DBG_INTR, "ReEnableErrorInterrupts: IntMask=%x\n", read_reg32 (dev, Exts.InterruptMask)); -+ -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+} -+ -+void -+CheckForExcessiveErrorRate (ELAN3_DEV *dev) -+{ -+ if (dev->ErrorTime == (lbolt/hz)) -+ { -+ if (dev->ErrorsPerTick++ > 100) -+ { -+ PRINTF0 (DBG_DEVICE, DBG_INTR, "CheckForExcessiveErrorRate: too many links errors, disabling interrupt\n"); -+ -+ DISABLE_INT_MASK (dev, INT_ErrorInterrupts); -+ -+ schedule_timer_fn (&dev->ErrorTimeoutId, ReEnableErrorInterrupts, (void *) dev, hz); -+ } -+ } -+ else -+ { -+ dev->ErrorTime = (lbolt/hz); -+ dev->ErrorsPerTick = 0; -+ } -+} -+/*=======================================================================================*/ -+/* -+ * Interrupt handler. -+ */ -+static void -+HandlePciMemErr (ELAN3_DEV *dev) -+{ -+ PRINTF0 (DBG_DEVICE, DBG_INTR, "HandlePciMemErr : masking out interrupt\n"); -+ -+ ElanBusError (dev); -+ panic ("elan pci memory error\n"); -+} -+ -+static void -+HandleSDRamInterrupt (ELAN3_DEV *dev) -+{ -+ E3_uint32 EccStatus0 = read_reg32 (dev, ECC_STATUS0); -+ E3_uint32 EccStatus1 = read_reg32 (dev, ECC_STATUS1); -+ unsigned long flags; -+ -+ PRINTF5 (DBG_DEVICE, DBG_INTR, "elan: ECC error - Addr=%x UE=%x CE=%x ME=%x Syn=%x\n", -+ EccStatus0 & ECC_ADDR_MASK, EccStatus0 & ECC_UE_MASK, -+ EccStatus0 & ECC_CE_MASK, EccStatus0 & ECC_ME_MASK, -+ EccStatus1 & ECC_SYN_MASK); -+ -+ if (EccStatus0 & (ECC_UE_MASK|ECC_CE_MASK)) -+ { -+ printk ("elan%d: ECC memory error (Address=%08x Syndrome=%02x %s%s%s)\n", -+ dev->Instance, -+ (EccStatus0 & ECC_ADDR_MASK), (EccStatus1 & ECC_SYN_MASK), -+ (EccStatus0 & ECC_UE_MASK) ? "Uncorrectable " : "", -+ (EccStatus0 & ECC_CE_MASK) ? "Correctable " : "", -+ (EccStatus0 & ECC_ME_MASK) ? "Multiple Errors " : ""); -+ } -+ -+ if (EccStatus0 & ECC_UE_MASK) -+ panic ("elan: Uncorrectable ECC memory error"); -+ if (EccStatus0 & ECC_CE_MASK) -+ BumpStat (dev, CorrectableErrors); -+ if (EccStatus0 & ECC_ME_MASK) -+ BumpStat (dev, MultipleErrors); -+ -+ /* -+ * Clear the interrupt and reset the error flags. -+ * Note. Might loose an UE or CE if it occurs between reading the status and -+ * clearing the interrupt. I don't think this matters very much as the -+ * status reg will only be used to identify a bad simm. -+ */ -+ -+ spin_lock_irqsave (&dev->TlbLock, flags); -+ write_reg32 (dev, Cache_Control_Reg.ContReg, dev->Cache_Control_Reg | CLEAR_SDRAM_ERROR); -+ mmiob(); -+ spin_unlock_irqrestore (&dev->TlbLock, flags); -+ -+ CheckForExcessiveErrorRate (dev); -+} -+ -+static int -+HandleEventInterrupt (ELAN3_DEV *dev, int nticks, unsigned long *flags) -+{ -+ E3_uint32 Fptr = dev->Event_Int_Queue_FPtr; -+ E3_uint32 Bptr = read_reg32 (dev, Event_Int_Queue_BPtr); /* PCI read */ -+ long tlim = lbolt + nticks; -+ long count = 0; -+ ELAN3_CTXT *ctxt; -+ -+ ASSERT (SPINLOCK_HELD (&dev->IntrLock)); -+ ASSERT ((dev->InterruptMask & INT_EventInterrupt) == 0); -+ -+ while (Fptr != Bptr) -+ { -+ while (Fptr != Bptr) -+ { -+ E3_EventInt_BE EvInt; -+ E3_uint32 Context; -+ -+ /* If we're running in the interrupt handler and have seen a high -+ * rate of event interrupts then punt to the thread - however on -+ * Linux the elan interrupt handler can block the timer interrupt, -+ * and so lbolt (jiffies) is not incremented, hence we punt after -+ a number of loops instead */ -+#if defined(LINUX) -+ if (in_interrupt() && ++count > eventint_punt_loops) -+ return (EAGAIN); -+#endif -+ -+ if (nticks && ((int) (lbolt - tlim)) > 0) -+ { -+ PRINTF2 (DBG_DEVICE, DBG_INTR, "HandleEventInterrupt: Fptr %x Bptr %x punting to thread\n", Fptr, Bptr); -+ return (EAGAIN); -+ } -+ -+ elan3_sdram_copyq_from_sdram (dev, Fptr, (void *) &EvInt, 8); /* PCI read */ -+ -+ /* The context number is held in the top 16 bits of the EventContext */ -+ Context = (EvInt.s.EventContext >> 16) & MAX_ROOT_CONTEXT_MASK; -+ -+ PRINTF2 (DBG_DEVICE, DBG_INTR, "HandleEventInterrupt: Context %d : Cookie %x\n", Context, EvInt.s.IntCookie); -+ -+ ctxt = ELAN3_DEV_CTX_TABLE(dev, Context); -+ -+ /* Work out new fptr, and store it in the device, since we'll be dropping the IntrLock */ -+ Fptr = E3_EVENT_INTQ_NEXT(Fptr); -+ dev->Event_Int_Queue_FPtr = Fptr; -+ -+ if (ctxt == NULL) -+ { -+ PRINTF3 (DBG_DEVICE, DBG_INTR, "HandleEventInterrupt: Fptr %x Bptr %x context %d invalid\n", -+ Fptr, Bptr, Context); -+ BumpStat (dev, InvalidContext); -+ } -+ else -+ { -+ BumpStat (dev, EventInterrupts); -+ -+ spin_unlock_irqrestore (&dev->IntrLock, *flags); -+ QueueEventInterrupt (ctxt, EvInt.s.IntCookie); -+ spin_lock_irqsave (&dev->IntrLock, *flags); -+ } -+ -+ /* Re-read the FPtr, since we've dropped the IntrLock */ -+ Fptr = dev->Event_Int_Queue_FPtr; -+ -+ /* Store the new FPtr to the elan, this also clears the interrupt. */ -+ write_reg32 (dev, Event_Int_Queue_FPtr, Fptr); /* PCI write */ -+ -+ mmiob(); -+ } -+ -+ mb(); -+ Bptr = read_reg32 (dev, Event_Int_Queue_BPtr); /* PCI read */ -+ } -+ -+ return (ESUCCESS); -+} -+ -+int -+SetLinkBoundaryScan (ELAN3_DEV *dev) -+{ -+ int res = ESUCCESS; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&dev->IntrLock, flags); -+ if ((dev->SchCntReg & LinkBoundaryScan) != 0) -+ res = EAGAIN; -+ else -+ { -+ PRINTF0 (DBG_DEVICE, DBG_BSCAN, "SetLinkBoundaryScan: setting link into boundary scan mode\n"); -+ -+ /* -+ * We're going to set the link into boundary scan mode, so firstly -+ * set the inputters to discard everything. -+ */ -+ if (dev->DiscardAllCount++ == 0) -+ SetSchedStatusRegister (dev, read_reg32 (dev, Exts.InterruptReg), NULL); -+ -+ /* -+ * Now disable the error interrupts -+ */ -+ DISABLE_INT_MASK (dev, INT_ErrorInterrupts); -+ -+ /* -+ * And set the link into boundary scan mode, and drive -+ * a reset token onto the link. -+ */ -+ SET_SCHED_LINK_VALUE (dev, 1, LinkResetToken); -+ } -+ -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+ -+ return (res); -+} -+ -+void -+ClearLinkBoundaryScan (ELAN3_DEV *dev) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave (&dev->IntrLock, flags); -+ if ((dev->SchCntReg & LinkBoundaryScan) != 0) -+ { -+ PRINTF0 (DBG_DEVICE, DBG_BSCAN, "ClearLinkBoundaryScan: taking link out of boundary scan mode\n"); -+ -+ /* -+ * Take the link out of boundary scan -+ */ -+ SET_SCHED_LINK_VALUE (dev, 0, 0); -+ -+ /* -+ * Clear any link errors. -+ */ -+ PULSE_SCHED_STATUS (dev, ClearLinkErrorInt); -+ -+ /* -+ * Re-enable the error interrupts. -+ */ -+ if (! timer_fn_queued(&dev->ErrorTimeoutId)) -+ ENABLE_INT_MASK (dev, INT_ErrorInterrupts); -+ -+ /* -+ * And stop the inputter from discarding all packets. -+ */ -+ if (--dev->DiscardAllCount == 0) -+ SetSchedStatusRegister (dev, 0, NULL); -+ } -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+} -+ -+int -+WriteBoundaryScanValue (ELAN3_DEV *dev, int value) -+{ -+ int res = 0; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&dev->IntrLock, flags); -+ if ((dev->SchCntReg & LinkBoundaryScan) != 0) -+ { -+ PRINTF1 (DBG_DEVICE, DBG_BSCAN, "WriteBoundaryScanValue: driving value 0x%x onto link\n", value); -+ SET_SCHED_LINK_VALUE (dev, 1, value); -+ -+ res = read_reg32 (dev, Exts.LinkState); -+ -+ PRINTF1 (DBG_DEVICE, DBG_BSCAN, "WriteBoundaryScanValue: return 0x%x\n", res); -+ } -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+ -+ return (res); -+} -+ -+int -+ReadBoundaryScanValue(ELAN3_DEV *dev, int link) -+{ -+ int res; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&dev->IntrLock, flags); -+ if ((dev->SchCntReg & LinkBoundaryScan) == 0) -+ { -+ PRINTF1 (DBG_DEVICE, DBG_BSCAN, "ReadBoundaryScanValue: set linkval 0x%x\n", link); -+ SET_SCHED_LINK_VALUE (dev, 0, link); -+ } -+ res = read_reg32 (dev, Exts.LinkState); -+ PRINTF1 (DBG_DEVICE, DBG_BSCAN, "ReadBoundaryScanValue: return 0x%x\n", res); -+ -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+ -+ return (res); -+} -+ -+static int -+ReadLinkVal (ELAN3_DEV *dev, int link) -+{ -+ if ((dev->SchCntReg & LinkBoundaryScan) == 0) -+ SET_SCHED_LINK_VALUE (dev, 0, link); -+ -+ return (read_reg32 (dev, Exts.LinkState)); -+} -+ -+static void -+HandleLinkError (ELAN3_DEV *dev) -+{ -+ E3_uint32 value = read_reg32 (dev, Exts.LinkErrorTypes); -+ -+ PRINTF1 (DBG_DEVICE, DBG_LINKERR, "HandleLinkError: LinkErrorTypes %08x - clearing\n", value); -+ -+ if (value & LS_LockError) BumpStat (dev, LockError); -+ if (value & LS_DeskewError) BumpStat (dev, DeskewError); -+ if (value & LS_PhaseError) BumpStat (dev, PhaseError); -+ if (value & LS_DataError) BumpStat (dev, DataError); -+ if (value & LS_FifoOvFlow0) BumpStat (dev, FifoOvFlow0); -+ if (value & LS_FifoOvFlow1) BumpStat (dev, FifoOvFlow1); -+ -+ if (value & LS_DataError) -+ dev->Stats.LinkErrorValue = ReadLinkVal (dev, 12) | (ReadLinkVal (dev, 13) << 9); -+ -+ PULSE_SCHED_STATUS (dev, ClearLinkErrorInt); -+ -+ CheckForExcessiveErrorRate (dev); -+} -+ -+static void -+HandleErrorInterrupt (ELAN3_DEV *dev, E3_uint32 Pend) -+{ -+ if (Pend & INT_PciMemErr) -+ HandlePciMemErr (dev); -+ -+ if (Pend & INT_SDRamInt) -+ HandleSDRamInterrupt (dev); -+ -+ if (Pend & INT_LinkError) -+ HandleLinkError (dev); -+} -+ -+static void -+HandleAnyIProcTraps (ELAN3_DEV *dev, E3_uint32 Pend) -+{ -+ E3_uint32 RestartBits = 0; -+ -+ if (Pend & INT_IProcCh0SysCntx) -+ { -+ HandleIProcTrap (dev, 0, Pend, -+ dev->TAndQBase + offsetof (E3_TrapAndQueue, IProcSysCntx), -+ dev->TAndQBase + offsetof (E3_TrapAndQueue, VCh0_C0_TrHead[0]), -+ dev->TAndQBase + offsetof (E3_TrapAndQueue, VCh0_C0_TrData[0])); -+ -+ RestartBits |= RestartCh0SysCntx; -+ } -+ -+ if (Pend & INT_IProcCh1SysCntx) -+ { -+ HandleIProcTrap (dev, 1, Pend, -+ dev->TAndQBase + offsetof (E3_TrapAndQueue, IProcSysCntx), -+ dev->TAndQBase + offsetof (E3_TrapAndQueue, VCh1_C0_TrHead[0]), -+ dev->TAndQBase + offsetof (E3_TrapAndQueue, VCh1_C0_TrData[0])); -+ -+ RestartBits |= RestartCh1SysCntx; -+ } -+ -+ if (Pend & INT_IProcCh0NonSysCntx) -+ { -+ HandleIProcTrap (dev, 0, Pend, -+ dev->TAndQBase + offsetof (E3_TrapAndQueue, IProcNonSysCntx), -+ dev->TAndQBase + offsetof (E3_TrapAndQueue, VCh0_NonC0_TrHead[0]), -+ dev->TAndQBase + offsetof (E3_TrapAndQueue, VCh0_NonC0_TrData[0])); -+ -+ RestartBits |= RestartCh0NonSysCntx; -+ } -+ -+ -+ if (Pend & INT_IProcCh1NonSysCntx) -+ { -+ HandleIProcTrap (dev, 1, Pend, -+ dev->TAndQBase + offsetof (E3_TrapAndQueue, IProcNonSysCntx), -+ dev->TAndQBase + offsetof (E3_TrapAndQueue, VCh1_NonC0_TrHead[0]), -+ dev->TAndQBase + offsetof (E3_TrapAndQueue, VCh1_NonC0_TrData[0])); -+ RestartBits |= RestartCh1NonSysCntx; -+ } -+ -+ PULSE_SCHED_STATUS (dev, RestartBits); -+} -+ -+static void -+elan3_event_interrupt (ELAN3_DEV *dev) -+{ -+ unsigned long flags; -+ -+ kernel_thread_init("elan3_event_int"); -+ -+ spin_lock_irqsave (&dev->IntrLock, flags); -+ for (;;) -+ { -+ /* Make sure we never sleep with the EventInterrupt disabled */ -+ if (! (dev->InterruptMask & INT_EventInterrupt)) -+ { -+ if (HandleEventInterrupt (dev, eventint_resched_ticks, &flags) != ESUCCESS) -+ BumpStat (dev, EventRescheds); -+ -+ ENABLE_INT_MASK (dev, INT_EventInterrupt); -+ } -+ -+ if (dev->ThreadsShouldStop) -+ break; -+ -+ kcondvar_wait (&dev->IntrWait, &dev->IntrLock, &flags); -+ } -+ -+ dev->EventInterruptThreadStopped = 1; -+ kcondvar_wakeupall (&dev->IntrWait, &dev->IntrLock); -+ -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+ -+ kernel_thread_exit (); -+} -+ -+int -+InterruptHandler (ELAN3_DEV *dev) -+{ -+ E3_uint32 Mask; -+ E3_uint32 Pend; -+ E3_uint32 RestartBits; -+ int deliverDProcTrap; -+ int deliverTProcTrap; -+ static long lboltsave; -+ int loop_count = 0; -+ unsigned long flags; -+ int tproc_delivered; -+ -+ spin_lock_irqsave (&dev->IntrLock, flags); -+ -+ BumpStat (dev, Interrupts); -+ -+ Mask = dev->InterruptMask; -+ Pend = read_reg32 (dev, Exts.InterruptReg); /* PCI read */ -+ -+ /* Save the lbolt so we know how long in do loop or in event handling */ -+ lboltsave = lbolt; -+ -+ if ((Pend & Mask) == INT_EventInterrupt) -+ { -+ DISABLE_INT_MASK (dev, INT_EventInterrupt); -+ -+ if (HandleEventInterrupt (dev, eventint_punt_ticks, &flags) == ESUCCESS) -+ ENABLE_INT_MASK (dev, INT_EventInterrupt); -+ else -+ { -+ BumpStat (dev, EventPunts); -+ -+ kcondvar_wakeupone (&dev->IntrWait, &dev->IntrLock); -+ } -+ -+ if ((lbolt - lboltsave) > dev->Stats.LongestInterrupt) -+ dev->Stats.LongestInterrupt = (lbolt - lboltsave); -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+ return (ESUCCESS); -+ } -+ -+ if ((Pend & Mask) == 0) -+ { -+ PRINTF3 (DBG_DEVICE, DBG_INTR, "InterruptHandler: Spurious Pend %x Mask %x SchedStatus %x\n", -+ Pend, Mask, read_reg32 (dev, Exts.SchCntReg)); -+ -+ if ((lbolt - lboltsave) > dev->Stats.LongestInterrupt) -+ dev->Stats.LongestInterrupt = (lbolt - lboltsave); -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+ return (EFAIL); -+ } -+ -+ PRINTF3 (DBG_DEVICE, DBG_INTR, "InterruptHandler: Pend %x Mask %08x SchedStatus %x\n", -+ Pend, Mask, read_reg32 (dev, Exts.SchCntReg)); -+ -+ do { -+ loop_count++; -+ RestartBits = 0; -+ -+ if (Pend & Mask & (INT_CProc | INT_ComQueue)) -+ HandleCProcTrap (dev, Pend, &Mask); -+ -+ tproc_delivered = 0; -+ -+ if (Pend & Mask & INT_TProc) { -+ ELAN_REG_REC(Pend); -+ tproc_delivered = 1; -+ deliverTProcTrap = HandleTProcTrap (dev, &RestartBits); -+ } -+ else -+ deliverTProcTrap = 0; -+ -+ if (Pend & Mask & INT_DProc) -+ deliverDProcTrap = HandleDProcTrap (dev, &RestartBits); -+ else -+ deliverDProcTrap = 0; -+ -+ ASSERT ((RestartBits & RestartDProc) == 0 || (read_reg32 (dev, Exts.DProcStatus.Status) >> 29) == 4); -+ ASSERT ((RestartBits & RestartDProc) == 0 || elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProc.s.FSR.Status)) == 0); -+ ASSERT ((RestartBits & RestartDProc) == 0 || elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData0.s.FSR.Status)) == 0); -+ ASSERT ((RestartBits & RestartDProc) == 0 || elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData1.s.FSR.Status)) == 0); -+ ASSERT ((RestartBits & RestartDProc) == 0 || elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData2.s.FSR.Status)) == 0); -+ ASSERT ((RestartBits & RestartDProc) == 0 || elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData3.s.FSR.Status)) == 0); -+ -+ PULSE_SCHED_STATUS (dev, RestartBits); /* Restart any processors which had trapped. */ -+ SET_INT_MASK (dev, Mask); /* And install the new interrupt mask */ -+ -+ if ((Pend & Mask & INT_TProc) && deliverTProcTrap) -+ DeliverTProcTrap (dev, dev->ThreadTrap, Pend); -+ -+ if ((Pend & Mask & INT_DProc) && deliverDProcTrap) -+ DeliverDProcTrap (dev, dev->DmaTrap, Pend); -+ -+ if (Pend & Mask & INT_Inputters) -+ HandleAnyIProcTraps (dev, Pend); -+ -+ if (Pend & Mask & INT_EventInterrupt) -+ { -+ DISABLE_INT_MASK (dev, INT_EventInterrupt); -+ -+ if (loop_count == 1 && HandleEventInterrupt (dev, eventint_punt_ticks, &flags) == ESUCCESS) /* always punt to the thread if we've */ -+ ENABLE_INT_MASK (dev, INT_EventInterrupt); /* been round the loop once */ -+ else -+ { -+ BumpStat (dev, EventPunts); -+ -+ kcondvar_wakeupone (&dev->IntrWait, &dev->IntrLock); -+ } -+ } -+ -+ if (Pend & (INT_Halted | INT_Discarding)) -+ ProcessHaltOperations (dev, Pend); -+ -+ if (Pend & Mask & INT_ErrorInterrupts) -+ HandleErrorInterrupt (dev, Pend); -+ -+ Mask = dev->InterruptMask; -+ Pend = read_reg32 (dev, Exts.InterruptReg); /* PCI read */ -+ -+ if (tproc_delivered) -+ ELAN_REG_REC(Pend); -+ -+ PRINTF3 (DBG_DEVICE, DBG_INTR, "InterruptHandler: Pend %x Mask %08x SchedStatus %x\n", -+ Pend, Mask, read_reg32 (dev, Exts.SchCntReg)); -+ } while ((Pend & Mask) != 0); -+ -+ if ((lbolt - lboltsave) > dev->Stats.LongestInterrupt) -+ dev->Stats.LongestInterrupt = (lbolt - lboltsave); -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+ -+ PRINTF2 (DBG_DEVICE, DBG_INTR, "InterruptHandler: lbolt is %lx; start lbolt is %lx\n", -+ lbolt, lboltsave); -+ -+ return (ESUCCESS); -+} -+ -+void -+SetSchedStatusRegister (ELAN3_DEV *dev, E3_uint32 Pend, volatile E3_uint32 *Maskp) -+{ -+ E3_uint32 HaltMask = dev->HaltOperationsMask; -+ E3_uint32 Mask = Maskp ? *Maskp : dev->InterruptMask; -+ E3_uint32 ClearBits = 0; -+ E3_uint32 SetBits = 0; -+ -+ PRINTF5 (DBG_DEVICE, DBG_INTR, "SetSchedStatusRegister: HaltOperationsMask=%x HaltAll=%d HaltDmaDequeue=%d HaltThread=%d DiscardAll=%d\n", -+ HaltMask, dev->HaltAllCount, dev->HaltDmaDequeueCount, dev->HaltThreadCount, dev->DiscardAllCount); -+ -+ if (dev->FlushCommandCount) -+ SetBits |= FlushCommandQueues; -+ -+ if ((HaltMask & INT_DProcHalted) || dev->HaltAllCount) -+ { -+ SetBits |= HaltDmas | HaltDmaDequeue; -+ if (Pend & INT_DProcHalted) -+ Mask &= ~INT_DProcHalted; -+ else -+ Mask |= INT_DProcHalted; -+ } -+ -+ if (dev->HaltDmaDequeueCount) -+ { -+ SetBits |= HaltDmaDequeue; -+ if (Pend & INT_DProcHalted) -+ Mask &= ~INT_DProcHalted; -+ else -+ Mask |= INT_DProcHalted; -+ } -+ -+ if ((HaltMask & INT_TProcHalted) || dev->HaltAllCount || dev->HaltThreadCount) -+ { -+ SetBits |= HaltThread; -+ if (Pend & INT_TProcHalted) -+ Mask &= ~INT_TProcHalted; -+ else -+ Mask |= INT_TProcHalted; -+ } -+ -+ if ((HaltMask & INT_DiscardingSysCntx) || dev->DiscardAllCount) -+ { -+ SetBits |= DiscardSysCntxIn; -+ if (Pend & INT_DiscardingSysCntx) -+ Mask &= ~INT_DiscardingSysCntx; -+ else -+ Mask |= INT_DiscardingSysCntx; -+ } -+ -+ if ((HaltMask & INT_DiscardingNonSysCntx) || dev->DiscardNonContext0Count || dev->DiscardAllCount) -+ { -+ SetBits |= DiscardNonSysCntxIn; -+ if (Pend & INT_DiscardingNonSysCntx) -+ Mask &= ~INT_DiscardingNonSysCntx; -+ else -+ Mask |= INT_DiscardingNonSysCntx; -+ } -+ -+ if (dev->HaltNonContext0Count) -+ SetBits |= StopNonSysCntxs; -+ -+ ClearBits = SetBits ^ (FlushCommandQueues | HaltDmas | HaltDmaDequeue | HaltThread | -+ DiscardSysCntxIn | DiscardNonSysCntxIn | StopNonSysCntxs); -+ -+ PRINTF4 (DBG_DEVICE, DBG_INTR, "SetSchedStatusRegister: SetBits=%x InterruptMask=%x InterruptReg=%x Mask=%x\n", -+ SetBits, dev->InterruptMask, read_reg32 (dev, Exts.InterruptReg), Mask); -+ -+ MODIFY_SCHED_STATUS (dev, SetBits, ClearBits); -+ -+ if (Maskp) -+ *Maskp = Mask; /* copyback new interrupt mask */ -+ else -+ SET_INT_MASK(dev, Mask); -+} -+ -+void -+FreeHaltOperation (ELAN3_DEV *dev, ELAN3_HALTOP *op) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave (&dev->FreeHaltLock, flags); -+ op->Next = dev->FreeHaltOperations; -+ dev->FreeHaltOperations = op; -+ spin_unlock_irqrestore (&dev->FreeHaltLock, flags); -+} -+ -+int -+ReserveHaltOperations (ELAN3_DEV *dev, int count, int cansleep) -+{ -+ ELAN3_HALTOP *op; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&dev->FreeHaltLock, flags); -+ while ((dev->NumHaltOperations - dev->ReservedHaltOperations) < count) -+ { -+ spin_unlock_irqrestore (&dev->FreeHaltLock, flags); -+ -+ KMEM_ZALLOC (op, ELAN3_HALTOP *, sizeof (ELAN3_HALTOP), cansleep); -+ -+ if (op == NULL) -+ return (FALSE); -+ -+ spin_lock_irqsave (&dev->FreeHaltLock, flags); -+ -+ dev->NumHaltOperations++; -+ -+ op->Next = dev->FreeHaltOperations; -+ dev->FreeHaltOperations = op; -+ } -+ -+ dev->ReservedHaltOperations += count; -+ -+ spin_unlock_irqrestore (&dev->FreeHaltLock, flags); -+ -+ return (TRUE); -+} -+ -+void -+ReleaseHaltOperations (ELAN3_DEV *dev, int count) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave (&dev->FreeHaltLock, flags); -+ dev->ReservedHaltOperations -= count; -+ spin_unlock_irqrestore (&dev->FreeHaltLock, flags); -+} -+ -+void -+QueueHaltOperation (ELAN3_DEV *dev, E3_uint32 Pend, volatile E3_uint32 *Maskp, -+ E3_uint32 ReqMask, void (*Function)(ELAN3_DEV *, void *), void *Arguement) -+{ -+ ELAN3_HALTOP *op; -+ -+ ASSERT (SPINLOCK_HELD (&dev->IntrLock)); -+ -+ spin_lock (&dev->FreeHaltLock); -+ op = dev->FreeHaltOperations; -+ -+ ASSERT (op != NULL); -+ -+ dev->FreeHaltOperations = op->Next; -+ spin_unlock (&dev->FreeHaltLock); -+ -+ op->Mask = ReqMask; -+ op->Function = (void (*)(void *, void *))Function; -+ op->Arguement = Arguement; -+ -+ dev->HaltOperationsMask |= ReqMask; /* Add our bits to the global bits needed. */ -+ SetSchedStatusRegister (dev, Pend, Maskp); /* Set the control register and the interrupt mask */ -+ -+ /* -+ * If the condition is already satisfied, then SetSchedStatusRegister will -+ * have masked out the interrupt, so re-enable it now to take it straight -+ * away -+ */ -+ if (Maskp == NULL) -+ { -+ if ((read_reg32 (dev, Exts.InterruptReg) & ReqMask) == ReqMask) -+ ENABLE_INT_MASK (dev, ReqMask); -+ } -+ else -+ { -+ if ((Pend & ReqMask) == ReqMask) -+ *Maskp |= ReqMask; -+ } -+ -+ *dev->HaltOperationsTailpp = op; /* Queue at end of list, since ProcessHaltOperations */ -+ dev->HaltOperationsTailpp = &op->Next; /* drops the IntrLock while running down the list */ -+ op->Next = NULL; -+} -+ -+void -+ProcessHaltOperations (ELAN3_DEV *dev, E3_uint32 Pend) -+{ -+ E3_uint32 Mask; -+ ELAN3_HALTOP *op; -+ ELAN3_HALTOP **prevp; -+ E3_uint32 haltMask; -+ ELAN3_HALTOP *next; -+ -+ PRINTF1 (DBG_DEVICE, DBG_INTR, "ProcessHaltOperations: Pend %x\n", Pend); -+ -+ for (;;) -+ { -+ ELAN3_HALTOP *head = NULL; -+ ELAN3_HALTOP **tailp = &head; -+ -+ /* -+ * Generate a list of halt operations which can be called now. -+ */ -+ for (haltMask = 0, prevp = &dev->HaltOperations; (op = *prevp) != NULL; ) -+ { -+ if ((Pend & op->Mask) != op->Mask) -+ { -+ haltMask |= op->Mask; -+ prevp = &op->Next; -+ } -+ else -+ { -+ *prevp = op->Next; /* remove from list */ -+ if (op->Next == NULL) -+ dev->HaltOperationsTailpp = prevp; -+ -+ *tailp = op; /* add to local list */ -+ op->Next = NULL; -+ tailp = &op->Next; -+ } -+ } -+ -+ if (head == NULL) /* nothing to do, so update */ -+ { /* the schedule status register */ -+ dev->HaltOperationsMask = haltMask; /* and the interrupt mask */ -+ SetSchedStatusRegister (dev, Pend, NULL); -+ return; -+ } -+ -+ /* -+ * flush the command queues, before calling any operations -+ */ -+ Mask = dev->InterruptMask; -+ -+ if (dev->FlushCommandCount++ == 0) -+ SetSchedStatusRegister (dev, Pend, &Mask); -+ -+ if ((read_reg32 (dev, ComQueueStatus) & ComQueueNotEmpty) != 0) -+ { -+ if (dev->HaltThreadCount++ == 0) -+ SetSchedStatusRegister (dev, Pend, &Mask); -+ -+ CAPTURE_CPUS(); -+ -+ while ((read_reg32 (dev, ComQueueStatus) & ComQueueNotEmpty) != 0) -+ mb(); -+ -+ RELEASE_CPUS(); -+ -+ if (--dev->HaltThreadCount == 0) -+ SetSchedStatusRegister (dev, Pend, &Mask); -+ } -+ -+ if (read_reg32 (dev, Exts.InterruptReg) & INT_CProc) -+ { -+ PRINTF0 (DBG_DEVICE, DBG_INTR, "ProcessHaltOperations: command processor has trapped\n"); -+ HandleCProcTrap (dev, Pend, &Mask); -+ } -+ -+ if (--dev->FlushCommandCount == 0) -+ SetSchedStatusRegister (dev, Pend, &Mask); -+ -+ PRINTF2 (DBG_DEVICE, DBG_INTR, "ProcessHaltOperations: interrupt mask %08x -> %08x\n", -+ dev->InterruptMask, Mask); -+ -+ SET_INT_MASK (dev, Mask); -+ spin_unlock (&dev->IntrLock); -+ -+ /* -+ * now process the list of operations -+ * we have -+ */ -+ for (op = head; op != NULL; op = next) -+ { -+ next = op->Next; -+ -+ op->Function (dev, op->Arguement); -+ -+ FreeHaltOperation (dev, op); -+ } -+ -+ spin_lock (&dev->IntrLock); -+ } -+} -+ -+int -+ComputePosition (ELAN_POSITION *pos, unsigned nodeId, unsigned numNodes, unsigned numDownLinksVal) -+{ -+ int i, lvl, n; -+ char numDownLinks[ELAN_MAX_LEVELS]; -+ -+ if (nodeId >= numNodes) -+ return (EINVAL); -+ -+ for (i = 0; i < ELAN_MAX_LEVELS; i++, numDownLinksVal >>= 4) -+ numDownLinks[i] = numDownLinksVal & 7; -+ -+ for (lvl = 0, n = numNodes; n > ((lvl % 3) == 2 ? 8 : 4) && lvl < ELAN_MAX_LEVELS; lvl++) -+ { -+ if (numDownLinks[lvl] == 0) -+ numDownLinks[lvl] = 4; -+ -+ if ((n % numDownLinks[lvl]) != 0) -+ return (EINVAL); -+ -+ n /= numDownLinks[lvl]; -+ } -+ -+ if (numDownLinks[lvl] == 0) -+ numDownLinks[lvl] = n; -+ -+ if (numDownLinks[lvl] != n) -+ return (EINVAL); -+ -+ for (i = 0; i <= lvl; i++) -+ pos->pos_arity[i] = numDownLinks[lvl - i]; -+ -+ pos->pos_nodes = numNodes; -+ pos->pos_levels = lvl + 1; -+ pos->pos_nodeid = nodeId; -+ pos->pos_mode = ELAN_POS_MODE_SWITCHED; -+ -+ return (0); -+} -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/elan3/elandev_linux.c linux-2.6.9/drivers/net/qsnet/elan3/elandev_linux.c ---- clean/drivers/net/qsnet/elan3/elandev_linux.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan3/elandev_linux.c 2005-09-07 10:35:03.000000000 -0400 -@@ -0,0 +1,2395 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "$Id: elandev_linux.c,v 1.112.2.7 2005/09/07 14:35:03 mike Exp $" -+/* $Source: /cvs/master/quadrics/elan3mod/elan3/os/elandev_linux.c,v $*/ -+ -+#include -+#include -+ -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+ -+#include -+#include -+#include -+#include -+ -+#include -+#include -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,2,0) -+#error please use a 2.2 series kernel or newer -+#endif -+ -+/* Minor numbers encoded as : -+ * [5:0] device number -+ * [15:6] function number -+ */ -+#define ELAN3_DEVICE_MASK 0x3F -+ -+#define ELAN3_MINOR_CONTROL 0 -+#define ELAN3_MINOR_MEM 1 -+#define ELAN3_MINOR_USER 2 -+#define ELAN3_MINOR_SHIFT 6 -+ -+#define ELAN3_DEVICE(inode) (MINOR(inode->i_rdev) & ELAN3_DEVICE_MASK) -+#define ELAN3_MINOR(inode) (MINOR(inode->i_rdev) >> ELAN3_MINOR_SHIFT) -+ -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) -+# define SetPageReserved(page) set_bit(PG_reserved, &(page)->flags) -+# define ClearPageReserved(page) clear_bit(PG_reserved, &(page)->flags) -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23) -+typedef void irqreturn_t; -+#endif -+# define IRQ_NONE -+# define IRQ_HANDLED -+# define IRQ_RETVAL(x) -+#endif -+ -+#if defined(LINUX_SPARC) || defined(LINUX_PPC64) -+#define __io_remap_page_range(from,offset,size,prot) remap_page_range(from,offset,size,prot) -+#define __remap_page_range(from,offset,size,prot) remap_page_range(from,offset,size,prot) -+#elif defined(NO_RMAP) -+#define __io_remap_page_range(from,offset,size,prot) io_remap_page_range(from,offset,size,prot) -+#define __remap_page_range(from,offset,size,prot) remap_page_range(from,offset,size,prot) -+#else -+#define __io_remap_page_range(from,offset,size,prot) io_remap_page_range(vma,from,offset,size,prot) -+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9) -+#define __remap_page_range(from,offset,size,prot) remap_pfn_range(vma,from,(offset)>>PAGE_SHIFT,size,prot) -+#else -+#define __remap_page_range(from,offset,size,prot) remap_page_range(vma,from,offset,size,prot) -+#endif -+#endif -+ -+/* -+ * Function prototypes. -+ */ -+static int elanattach(int instance, struct pci_dev *pcidev); -+static int elandetach(int instance); -+ -+static int elan3_open (struct inode *inode, struct file *file); -+static int elan3_ioctl (struct inode *inode, struct file *file, -+ unsigned int cmd, unsigned long arg); -+static int elan3_mmap (struct file *file, struct vm_area_struct *vm_area); -+static int elan3_release (struct inode *inode, struct file *file); -+ -+static int elan3_reboot_event (struct notifier_block *self, unsigned long event, void *buffer); -+static int elan3_panic_event (struct notifier_block *self, unsigned long event, void *buffer); -+ -+static irqreturn_t InterruptHandlerWrapper(int irq, void *dev_id, struct pt_regs *regs); -+ -+static int ConfigurePci(ELAN3_DEV *dev); -+static int ResetElan(ELAN3_DEV *dev, ioaddr_t intPalAddr); -+ -+static void elan3_shutdown_devices(int panicing); -+ -+/* -+ * Globals. -+ */ -+static ELAN3_DEV *elan3_devices[ELAN3_MAX_CONTROLLER]; -+static int NodeId = ELAN3_INVALID_NODE; -+static int NumNodes; -+static int DownLinks; -+static int RandomRoutingDisabled; -+int BackToBackMaster; -+int BackToBackSlave; -+int enable_sdram_writecombining; -+int sdram_bank_limit; -+extern int LwpNice; -+ -+char * elan_reg_rec_file [ELAN_REG_REC_MAX]; -+int elan_reg_rec_line [ELAN_REG_REC_MAX]; -+long elan_reg_rec_lbolt[ELAN_REG_REC_MAX]; -+int elan_reg_rec_cpu [ELAN_REG_REC_MAX]; -+E3_uint32 elan_reg_rec_reg [ELAN_REG_REC_MAX]; -+int elan_reg_rec_index; -+ -+MODULE_AUTHOR("Quadrics Ltd."); -+MODULE_DESCRIPTION("Elan3 Device Driver"); -+ -+MODULE_LICENSE("GPL"); -+ -+module_param(NodeId,uint, 0); -+module_param(NumNodes,uint, 0); -+module_param(RandomRoutingDisabled,uint, 0); -+module_param(DownLinks,uint, 0); -+module_param(BackToBackMaster,uint, 0); -+module_param(BackToBackSlave,uint, 0); -+module_param(LwpNice, uint, 0); -+module_param(elan3_debug, uint, 0); -+module_param(elan3_debug_console, uint, 0); -+module_param(elan3_debug_buffer, uint, 0); -+module_param(elan3mmu_debug, uint, 0); -+module_param(sdram_bank_limit, uint, 0); -+ -+/* elan3/os/context.c */ -+EXPORT_SYMBOL(elan3_alloc); -+EXPORT_SYMBOL(elan3_attach); -+EXPORT_SYMBOL(elan3_doattach); -+EXPORT_SYMBOL(elan3_free); -+EXPORT_SYMBOL(elan3_detach); -+EXPORT_SYMBOL(elan3_dodetach); -+EXPORT_SYMBOL(elan3_block_inputter); -+EXPORT_SYMBOL(CheckCommandQueueFlushed); -+ -+/* elan3/os/sdram.c */ -+EXPORT_SYMBOL(elan3_sdram_alloc); -+EXPORT_SYMBOL(elan3_sdram_free); -+EXPORT_SYMBOL(elan3_sdram_to_phys); -+EXPORT_SYMBOL(elan3_sdram_writeb); -+EXPORT_SYMBOL(elan3_sdram_writew); -+EXPORT_SYMBOL(elan3_sdram_writel); -+EXPORT_SYMBOL(elan3_sdram_writeq); -+EXPORT_SYMBOL(elan3_sdram_readb); -+EXPORT_SYMBOL(elan3_sdram_readw); -+EXPORT_SYMBOL(elan3_sdram_readl); -+EXPORT_SYMBOL(elan3_sdram_readq); -+EXPORT_SYMBOL(elan3_sdram_zerob_sdram); -+EXPORT_SYMBOL(elan3_sdram_zerow_sdram); -+EXPORT_SYMBOL(elan3_sdram_zerol_sdram); -+EXPORT_SYMBOL(elan3_sdram_zeroq_sdram); -+EXPORT_SYMBOL(elan3_sdram_copyb_to_sdram); -+EXPORT_SYMBOL(elan3_sdram_copyw_to_sdram); -+EXPORT_SYMBOL(elan3_sdram_copyl_to_sdram); -+EXPORT_SYMBOL(elan3_sdram_copyq_to_sdram); -+EXPORT_SYMBOL(elan3_sdram_copyb_from_sdram); -+EXPORT_SYMBOL(elan3_sdram_copyw_from_sdram); -+EXPORT_SYMBOL(elan3_sdram_copyl_from_sdram); -+EXPORT_SYMBOL(elan3_sdram_copyq_from_sdram); -+ -+/* elan3/os/tproc.c */ -+EXPORT_SYMBOL(DeliverTProcTrap); -+EXPORT_SYMBOL(HandleTProcTrap); -+EXPORT_SYMBOL(SaveThreadToStack); -+ -+/* elan3/os/tprocinsts.c */ -+EXPORT_SYMBOL(RollThreadToClose); -+ -+/* elan3/os/iproc.c */ -+EXPORT_SYMBOL(InspectIProcTrap); -+EXPORT_SYMBOL(IProcTrapString); -+EXPORT_SYMBOL(SimulateUnlockQueue); -+ -+/* elan3/os/cproc.c */ -+EXPORT_SYMBOL(HandleCProcTrap); -+ -+/* elan3/os/route_table.c */ -+EXPORT_SYMBOL(GenerateRoute); -+EXPORT_SYMBOL(LoadRoute); -+EXPORT_SYMBOL(InvalidateRoute); -+EXPORT_SYMBOL(ValidateRoute); -+EXPORT_SYMBOL(ClearRoute); -+EXPORT_SYMBOL(GenerateProbeRoute); -+EXPORT_SYMBOL(GenerateCheckRoute); -+ -+/* elan3/os/elandev_generic.c */ -+EXPORT_SYMBOL(elan3_debug); -+EXPORT_SYMBOL(QueueHaltOperation); -+EXPORT_SYMBOL(ReleaseHaltOperations); -+EXPORT_SYMBOL(ReserveHaltOperations); -+ -+/* elan3/vm/elan3mmu_generic.c */ -+EXPORT_SYMBOL(elan3mmu_pteload); -+EXPORT_SYMBOL(elan3mmu_unload); -+EXPORT_SYMBOL(elan3mmu_set_context_filter); -+EXPORT_SYMBOL(elan3mmu_reserve); -+EXPORT_SYMBOL(elan3mmu_attach); -+EXPORT_SYMBOL(elan3mmu_detach); -+EXPORT_SYMBOL(elan3mmu_release); -+/* elan3/vm/elan3mmu_linux.c */ -+EXPORT_SYMBOL(elan3mmu_phys_to_pte); -+EXPORT_SYMBOL(elan3mmu_kernel_invalid_pte); -+ -+/* elan3/os/elan3_debug.c */ -+EXPORT_SYMBOL(elan3_debugf); -+ -+/* elan3/os/minames.c */ -+EXPORT_SYMBOL(MiToName); -+ -+/* elan3/os/elandev_generic.c */ -+EXPORT_SYMBOL(MapDeviceRegister); -+EXPORT_SYMBOL(UnmapDeviceRegister); -+ -+EXPORT_SYMBOL(elan_reg_rec_lbolt); -+EXPORT_SYMBOL(elan_reg_rec_file); -+EXPORT_SYMBOL(elan_reg_rec_index); -+EXPORT_SYMBOL(elan_reg_rec_cpu); -+EXPORT_SYMBOL(elan_reg_rec_reg); -+EXPORT_SYMBOL(elan_reg_rec_line); -+ -+/* -+ * Standard device entry points. -+ */ -+#if defined(CONFIG_DUMP) || defined(CONFIG_DUMP_MODULE) -+ -+#include -+ -+static int elan3_dump_event (struct notifier_block *self, unsigned long event, void *buffer); -+ -+static struct notifier_block elan3_dump_notifier = -+{ -+ notifier_call: elan3_dump_event, -+ priority: 0, -+}; -+ -+static int -+elan3_dump_event (struct notifier_block *self, unsigned long event, void *buffer) -+{ -+ if ( event == DUMP_BEGIN ) -+ elan3_shutdown_devices (FALSE); -+ -+ return (NOTIFY_DONE); -+} -+ -+#endif -+ -+static struct file_operations elan3_fops = { -+ ioctl: elan3_ioctl, /* ioctl */ -+ mmap: elan3_mmap, /* mmap */ -+ open: elan3_open, /* open */ -+ release: elan3_release, /* release */ -+}; -+ -+static struct notifier_block elan3_reboot_notifier = -+{ -+ notifier_call: elan3_reboot_event, -+ priority: 0, -+}; -+ -+#if !defined(NO_PANIC_NOTIFIER) -+ -+static int elan3_panic_event (struct notifier_block *self, unsigned long event, void *buffer); -+ -+static struct notifier_block elan3_panic_notifier = -+{ -+ notifier_call: elan3_panic_event, -+ priority: 0, -+}; -+ -+static int -+elan3_panic_event (struct notifier_block *self, unsigned long event, void *buffer) -+{ -+ elan3_shutdown_devices (TRUE); -+ -+ return (NOTIFY_DONE); -+} -+ -+#endif /* !defined(NO_PANIC_NOTIFIER) */ -+ -+ELAN3_DEV * -+elan3_device (int instance) -+{ -+ if (instance < 0 || instance >= ELAN3_MAX_CONTROLLER) -+ return ((ELAN3_DEV *) NULL); -+ return elan3_devices[instance]; -+} -+EXPORT_SYMBOL(elan3_device); -+ -+/* -+ * Called at rmmod time. elandetach() for each card + general cleanup. -+ */ -+#ifdef MODULE -+static void __exit elan3_exit(void) -+{ -+ int i; -+ -+ printk("elan: preparing to remove module\n"); -+ -+#if defined(CONFIG_DUMP) || defined(CONFIG_DUMP_MODULE) -+ unregister_dump_notifier (&elan3_dump_notifier); -+#endif -+ unregister_reboot_notifier (&elan3_reboot_notifier); -+#if !defined(NO_PANIC_NOTIFIER) -+ notifier_chain_unregister (&panic_notifier_list, &elan3_panic_notifier); -+#endif -+ -+ /* call elandetach() for each device configured. */ -+ for (i = 0; i < ELAN3_MAX_CONTROLLER; i++) -+ if (elan3_devices[i] != NULL) -+ elandetach(i); -+ -+ FinaliseNetworkErrorResolver(); -+ elan3mmu_fini(); -+ -+ cookie_fini(); -+ unregister_chrdev(ELAN3_MAJOR, ELAN3_NAME); -+ -+ elan3_procfs_fini(); -+ -+ printk("elan: module removed\n"); -+} -+ -+/* -+ * Called at insmod time. First we perform general driver initialization, -+ * then call elanattach() for each card. -+ */ -+#ifdef MODULE -+static int __init elan3_init(void) -+#else -+__initfunc(int elan3_init(void)) -+#endif -+{ -+ int e; -+ int boards; -+ struct pci_dev *dev; -+ char revid; -+ -+ elan_reg_rec_index=0; -+ { -+ int i; -+ for(i=0;islot_name); -+#else -+ printk ("elan at pci %s - RevA device not supported\n", pci_name(dev)); -+#endif -+ else -+ { -+ if (boards < ELAN3_MAX_CONTROLLER) -+ /* Count successfully attached devices */ -+ boards += ((elanattach(boards, dev) == 0) ? 1 : 0); -+ else -+ { -+ printk ("elan: max controllers = %d\n", ELAN3_MAX_CONTROLLER); -+ break; -+ } -+ } -+ } -+#if defined(CONFIG_DUMP) || defined(CONFIG_DUMP_MODULE) -+ register_dump_notifier (&elan3_dump_notifier); -+#endif -+ register_reboot_notifier (&elan3_reboot_notifier); -+#if !defined(NO_PANIC_NOTIFIER) -+ notifier_chain_register (&panic_notifier_list, &elan3_panic_notifier); -+#endif -+ -+ return 0; -+} -+ -+/* Declare the module init and exit functions */ -+module_init(elan3_init); -+module_exit(elan3_exit); -+ -+#endif -+ -+static void -+elan3_shutdown_devices(int panicing) -+{ -+ ELAN3_DEV *dev; -+ unsigned long flags; -+ register int i; -+ -+ local_irq_save (flags); -+ for (i = 0; i < ELAN3_MAX_CONTROLLER; i++) -+ { -+ if ((dev = elan3_devices[i]) != NULL) -+ { -+ if (! panicing) spin_lock (&dev->IntrLock); -+ -+ printk(KERN_INFO "elan%d: forcing link into reset\n", dev->Instance); -+ -+ /* -+ * We're going to set the link into boundary scan mode, so firstly -+ * set the inputters to discard everything. -+ */ -+ if (dev->DiscardAllCount++ == 0) -+ SetSchedStatusRegister (dev, read_reg32 (dev, Exts.InterruptReg), NULL); -+ -+ dev->LinkShutdown = 1; -+ -+ /* -+ * Now disable the error interrupts -+ */ -+ DISABLE_INT_MASK (dev, INT_ErrorInterrupts); -+ -+ /* -+ * And set the link into boundary scan mode, and drive -+ * a reset token onto the link. -+ */ -+ SET_SCHED_LINK_VALUE (dev, 1, LinkResetToken); -+ -+ if (! panicing) spin_unlock (&dev->IntrLock); -+ } -+ } -+ local_irq_restore (flags); -+} -+ -+static int -+elan3_reboot_event (struct notifier_block *self, unsigned long event, void *buffer) -+{ -+ if (! (event == SYS_RESTART || event == SYS_HALT || event == SYS_POWER_OFF)) -+ return (NOTIFY_DONE); -+ -+ elan3_shutdown_devices (FALSE); -+ -+ return (NOTIFY_DONE); -+} -+ -+#include -+/* -+ * Called by init_module() for each card discovered on PCI. -+ */ -+static int -+elanattach(int instance, struct pci_dev *pcidev) -+{ -+ ELAN3_DEV *dev; -+ int ramSize; -+ int level; -+ ioaddr_t sdramAddr, cmdPortAddr, intPalAddr; -+ DeviceMappingHandle handle; -+ -+ printk("elan%d: attach, irq=%d\n", instance, pcidev->irq); -+ -+ /* -+ * Allocate the ELAN3_DEV structure. -+ */ -+ KMEM_ZALLOC(dev, ELAN3_DEV *, sizeof(ELAN3_DEV), TRUE); -+ if (dev == NULL) { -+ printk ("elan%d: KMEM_ALLOC failed\n", instance); -+ return (-ENOMEM); -+ } -+ elan3_devices[instance] = dev; -+ dev->Osdep.pci = pcidev; -+ -+ dev->Instance = instance; -+ -+ /* Initialise the device information */ -+ pci_read_config_word (pcidev, PCI_VENDOR_ID, &dev->Devinfo.dev_vendor_id); -+ pci_read_config_word (pcidev, PCI_DEVICE_ID, &dev->Devinfo.dev_device_id); -+ pci_read_config_byte (pcidev, PCI_REVISION_ID, &dev->Devinfo.dev_revision_id); -+ -+ dev->Devinfo.dev_instance = instance; -+ dev->Devinfo.dev_rail = instance; -+ dev->Devinfo.dev_driver_version = 0; -+ dev->Devinfo.dev_num_down_links_value = DownLinks; -+ -+ dev->Position.pos_mode = ELAN_POS_UNKNOWN; -+ dev->Position.pos_random_disabled = RandomRoutingDisabled; -+ -+ /* -+ * Set up PCI config regs. -+ */ -+ if (ConfigurePci(dev) != ESUCCESS) -+ goto fail0; -+ -+ /* -+ * Determine the PFnums of the SDRAM and command port -+ */ -+ if (MapDeviceRegister(dev, ELAN3_BAR_SDRAM, &sdramAddr, 0, PAGESIZE, &handle) != ESUCCESS) -+ goto fail1; -+ -+ DeviceRegisterSize(dev, ELAN3_BAR_SDRAM, &ramSize); -+ -+ dev->SdramPhysMask = ~((physaddr_t) ramSize - 1); -+ dev->SdramPhysBase = kmem_to_phys((void *) sdramAddr); -+ -+ UnmapDeviceRegister (dev, &handle); -+ -+#if defined(LINUX_ALPHA) -+ /* -+ * consider a physical address to be on the same pci bus -+ * as us if it's physical address is "close" to our sdram -+ * physical address. -+ * this is almost certainly incorrect for large memory (> 2Gb) -+ * i386 machines - and is only correct for alpha for 32 bit -+ * base address registers. -+ * -+ * Modified this to match the Tru64 driver value; -+ * i.e. PciPhysMask = 0xfffffffffffc0000 -+ */ -+# define PCI_ADDR_MASK (0x7FFFFFFFl) -+ -+ dev->PciPhysMask = ~PCI_ADDR_MASK; -+ dev->PciPhysBase = dev->SdramPhysBase & dev->PciPhysMask; -+#endif -+ /* -+ * Now reset the elan chip. -+ */ -+ if (MapDeviceRegister(dev, ELAN3_BAR_REGISTERS, &dev->RegPtr, 0, 0, &dev->RegHandle) != ESUCCESS) -+ goto fail1; -+ -+ if (MapDeviceRegister(dev, ELAN3_BAR_EBUS, &intPalAddr, ELAN3_EBUS_INTPAL_OFFSET, PAGESIZE, -+ &handle) != ESUCCESS) -+ goto fail2; -+ -+ ResetElan(dev, intPalAddr); -+ -+ UnmapDeviceRegister (dev, &handle); -+ -+ /* -+ * Initialise the device mutex's which must be accessible from the -+ * interrupt handler. -+ */ -+ kcondvar_init (&dev->IntrWait); -+ spin_lock_init (&dev->IntrLock); -+ spin_lock_init (&dev->TlbLock); -+ spin_lock_init (&dev->CProcLock); -+ spin_lock_init (&dev->FreeHaltLock); -+ for(level=0; level<4; level++) -+ spin_lock_init (&dev->Level[level].PtblLock); -+ spin_lock_init (&dev->PtblGroupLock); -+ -+ /* -+ * Add the interrupt handler, -+ */ -+ if (request_irq(dev->Osdep.pci->irq, InterruptHandlerWrapper, -+ SA_SHIRQ, "elan3", dev) != 0) { -+ printk ("elan%d: request_irq failed\n", instance); -+ goto fail3; -+ } -+ -+ if (MapDeviceRegister(dev, ELAN3_BAR_COMMAND_PORT, &cmdPortAddr, 0, PAGESIZE, &handle) != ESUCCESS) -+ goto fail4; -+ -+ if (InitialiseElan(dev, cmdPortAddr) == EFAIL) { -+ printk ("elan%d: InitialiseElan failed\n", instance); -+ UnmapDeviceRegister (dev, &handle); -+ goto fail4; -+ } -+ UnmapDeviceRegister (dev, &handle); -+ -+ /* If our nodeid is defined, then set it now */ -+ if (NodeId != ELAN3_INVALID_NODE && ComputePosition (&dev->Position, NodeId, NumNodes, DownLinks) == 0) -+ { -+ if (RandomRoutingDisabled & ((1 << (dev->Position.pos_levels-1))-1)) -+ printk ("elan%d: NodeId=%d NodeLevel=%d NumNodes=%d (random routing disabled 0x%x)\n", -+ dev->Instance, dev->Position.pos_nodeid, dev->Position.pos_levels, dev->Position.pos_nodes, RandomRoutingDisabled); -+ else -+ printk ("elan%d: NodeId=%d NodeLevel=%d NumNodes=%d (random routing ok)\n", -+ dev->Instance, dev->Position.pos_nodeid, dev->Position.pos_levels, dev->Position.pos_nodes); -+ } -+ -+ if (BackToBackMaster || BackToBackSlave) -+ { -+ dev->Position.pos_mode = ELAN_POS_MODE_BACKTOBACK; -+ dev->Position.pos_nodeid = (BackToBackMaster == 0); -+ dev->Position.pos_nodes = 2; -+ dev->Position.pos_levels = 1; -+ dev->Position.pos_arity[0] = 2; -+ -+ printk ("elan%d: back-to-back %s - elan node %d\n", dev->Instance, -+ BackToBackMaster ? "master" : "slave", dev->Position.pos_nodeid); -+ } -+ -+ elan3_procfs_device_init (dev); -+ -+ /* Success */ -+ return (0); -+ -+fail4: -+ free_irq(dev->Osdep.pci->irq, dev); -+ -+fail3: -+ kcondvar_destroy (&dev->IntrWait); -+ spin_lock_destroy (&dev->IntrLock); -+ spin_lock_destroy (&dev->InfoLock); -+ spin_lock_destroy (&dev->TlbLock); -+ spin_lock_destroy (&dev->CProcLock); -+ spin_lock_destroy (&dev->FreeHaltLock); -+ spin_lock_destroy (&dev->Level1PtblLock); -+ spin_lock_destroy (&dev->Level2PtblLock); -+ spin_lock_destroy (&dev->Level3PtblLock); -+ spin_lock_destroy (&dev->PtblGroupLock); -+ -+fail2: -+ UnmapDeviceRegister (dev, &dev->RegHandle); -+ -+fail1: -+ pci_disable_device (dev->Osdep.pci); -+fail0: -+ KMEM_FREE(dev, sizeof(ELAN3_DEV)); -+ -+ elan3_devices[instance] = NULL; -+ -+ /* Failure */ -+ return (-ENODEV); -+} -+ -+/* -+ * Called by elan3_exit() for each board found on PCI. -+ */ -+static int -+elandetach(int instance) -+{ -+ ELAN3_DEV *dev = elan3_devices[instance]; -+ -+ printk("elan%d: detach\n", instance); -+ -+ elan3_procfs_device_fini (dev); -+ -+ FinaliseElan (dev); -+ -+ UnmapDeviceRegister (dev, &dev->RegHandle); -+ -+ free_irq(dev->Osdep.pci->irq, dev); -+ -+ pci_disable_device(dev->Osdep.pci); -+ -+ kcondvar_destroy (&dev->IntrWait); -+ spin_lock_destroy (&dev->IntrLock); -+ spin_lock_destroy (&dev->InfoLock); -+ spin_lock_destroy (&dev->TlbLock); -+ spin_lock_destroy (&dev->CProcLock); -+ spin_lock_destroy (&dev->FreeHaltLock); -+ spin_lock_destroy (&dev->Level1PtblLock); -+ spin_lock_destroy (&dev->Level2PtblLock); -+ spin_lock_destroy (&dev->Level3PtblLock); -+ spin_lock_destroy (&dev->PtblGroupLock); -+ -+ KMEM_FREE(dev, sizeof(ELAN3_DEV)); -+ elan3_devices[instance] = NULL; -+ -+ return 0; -+} -+ -+/* -+ * generic ioctls - available on control and user devices. -+ */ -+ -+static int -+device_stats_ioctl (ELAN3_DEV *dev, unsigned long arg) -+{ -+ ELAN3IO_STATS_STRUCT *args; -+ -+ KMEM_ALLOC(args, ELAN3IO_STATS_STRUCT *, sizeof(ELAN3IO_STATS_STRUCT), TRUE); -+ -+ if (args == NULL) -+ return (-ENOMEM); -+ -+ if (copy_from_user (args, (void *) arg, sizeof (ELAN3IO_STATS_STRUCT))) -+ { -+ KMEM_FREE(args, sizeof(ELAN3IO_STATS_STRUCT)); -+ return (-EFAULT); -+ } -+ -+ switch (args->which) -+ { -+ case ELAN3_SYS_STATS_DEVICE: -+ if (copy_to_user (args->ptr, &dev->Stats, sizeof (ELAN3_STATS))) -+ { -+ KMEM_FREE(args, sizeof(ELAN3IO_STATS_STRUCT)); -+ return (-EFAULT); -+ } -+ KMEM_FREE(args, sizeof(ELAN3IO_STATS_STRUCT)); -+ return (0); -+ -+ case ELAN3_SYS_STATS_MMU: -+ if (copy_to_user (args->ptr, &elan3mmu_global_stats, sizeof (ELAN3MMU_GLOBAL_STATS))) -+ { -+ KMEM_FREE(args, sizeof(ELAN3IO_STATS_STRUCT)); -+ return (-EFAULT); -+ } -+ KMEM_FREE(args, sizeof(ELAN3IO_STATS_STRUCT)); -+ return (0); -+ -+ default: -+ KMEM_FREE(args, sizeof(ELAN3IO_STATS_STRUCT)); -+ return (-EINVAL); -+ } -+} -+ -+/* -+ * /dev/elan3/controlX - control device -+ * -+ */ -+ -+typedef struct control_private -+{ -+ u_int pr_boundary_scan; -+} CONTROL_PRIVATE; -+ -+static int -+control_open (struct inode *inode, struct file *file) -+{ -+ CONTROL_PRIVATE *pr; -+ -+ KMEM_ALLOC(pr, CONTROL_PRIVATE *, sizeof (CONTROL_PRIVATE), TRUE); -+ -+ if (pr == NULL) -+ return (-ENOMEM); -+ -+ pr->pr_boundary_scan = 0; -+ -+ file->private_data = (void *) pr; -+ -+ MOD_INC_USE_COUNT; -+ -+ return (0); -+} -+ -+static int -+control_release (struct inode *inode, struct file *file) -+{ -+ ELAN3_DEV *dev = elan3_devices[ELAN3_DEVICE(inode)]; -+ CONTROL_PRIVATE *pr = (CONTROL_PRIVATE *) file->private_data; -+ -+ if (pr->pr_boundary_scan) -+ ClearLinkBoundaryScan(dev); -+ -+ KMEM_FREE (pr, sizeof(CONTROL_PRIVATE)); -+ -+ MOD_DEC_USE_COUNT; -+ return (0); -+} -+ -+static int -+control_ioctl (struct inode *inode, struct file *file, -+ unsigned int cmd, unsigned long arg) -+{ -+ ELAN3_DEV *dev = elan3_devices[ELAN3_DEVICE(inode)]; -+ CONTROL_PRIVATE *pr = (CONTROL_PRIVATE *) file->private_data; -+ int res; -+ -+ switch (cmd) -+ { -+ case ELAN3IO_SET_BOUNDARY_SCAN: -+ if (SetLinkBoundaryScan (dev) == 0) -+ pr->pr_boundary_scan = 1; -+ return (0); -+ -+ case ELAN3IO_CLEAR_BOUNDARY_SCAN: -+ if (pr->pr_boundary_scan == 0) -+ return (-EINVAL); -+ -+ pr->pr_boundary_scan = 0; -+ -+ ClearLinkBoundaryScan (dev); -+ return (0); -+ -+ case ELAN3IO_READ_LINKVAL: -+ { -+ E3_uint32 val; -+ -+ if (pr->pr_boundary_scan == 0) -+ return (-EINVAL); -+ -+ if (copy_from_user(&val, (E3_uint32 *)arg, sizeof(E3_uint32))) -+ return (-EFAULT); -+ -+ val = ReadBoundaryScanValue (dev, val); -+ -+ if (copy_to_user((E3_uint32 *)arg, &val, sizeof(E3_uint32))) -+ return (-EFAULT); -+ return (0); -+ } -+ -+ case ELAN3IO_WRITE_LINKVAL: -+ { -+ E3_uint32 val; -+ -+ if (pr->pr_boundary_scan == 0) -+ return (-EINVAL); -+ -+ if (copy_from_user(&val, (E3_uint32 *)arg, sizeof(E3_uint32))) -+ return (-EFAULT); -+ -+ val = WriteBoundaryScanValue (dev, val); -+ -+ if (copy_to_user((E3_uint32 *)arg, &val, sizeof(E3_uint32))) -+ return (-EFAULT); -+ -+ return (0); -+ } -+ -+ case ELAN3IO_SET_POSITION: -+ { -+ ELAN3IO_SET_POSITION_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_SET_POSITION_STRUCT))) -+ return (-EFAULT); -+ -+ if (ComputePosition (&dev->Position, args.nodeId, args.numNodes, dev->Devinfo.dev_num_down_links_value) != 0) -+ return (-EINVAL); -+ -+ return (0); -+ } -+ -+ case ELAN3IO_SET_DEBUG: -+ { -+ ELAN3IO_SET_DEBUG_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_SET_DEBUG_STRUCT))) -+ return (-EFAULT); -+ -+ if (! strcmp (args.what, "elan3_debug")) -+ elan3_debug = args.value; -+ else if (! strcmp (args.what, "elan3_debug_console")) -+ elan3_debug_console = args.value; -+ else if (! strcmp (args.what, "elan3_debug_buffer")) -+ elan3_debug_buffer = args.value; -+ else if (! strcmp (args.what, "elan3_debug_ignore_dev")) -+ elan3_debug_ignore_dev = args.value; -+ else if (! strcmp (args.what, "elan3_debug_ignore_ctxt")) -+ elan3_debug_ignore_ctxt = args.value; -+ else if (! strcmp (args.what, "elan3mmu_debug")) -+ elan3mmu_debug = args.value; -+ -+ return (0); -+ } -+ -+ case ELAN3IO_NETERR_SERVER: -+ { -+ ELAN3IO_NETERR_SERVER_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_NETERR_SERVER_STRUCT))) -+ return (-EFAULT); -+ -+ res = AddNeterrServerSyscall (args.elanid, args.addr, args.name, NULL); -+ return (set_errno (res)); -+ } -+ -+ case ELAN3IO_NETERR_FIXUP: -+ { -+ NETERR_MSG *msg; -+ -+ KMEM_ALLOC(msg, NETERR_MSG *, sizeof (NETERR_MSG), TRUE); -+ -+ if (msg == NULL) -+ return (set_errno (ENOMEM)); -+ -+ if (copy_from_user (msg, (void *) arg, sizeof (NETERR_MSG))) -+ res = EFAULT; -+ else -+ res = ExecuteNetworkErrorFixup (msg); -+ -+ KMEM_FREE (msg, sizeof (NETERR_MSG)); -+ return (set_errno (res)); -+ } -+ -+ case ELAN3IO_STATS: -+ return (device_stats_ioctl (dev, arg)); -+ -+ case ELAN3IO_GET_DEVINFO: -+ { -+ if (copy_to_user ((void *) arg, &dev->Devinfo, sizeof (ELAN_DEVINFO))) -+ return (-EFAULT); -+ return (0); -+ } -+ -+ case ELAN3IO_GET_POSITION: -+ { -+ if (copy_to_user ((void *) arg, &dev->Position, sizeof (ELAN_POSITION))) -+ return (-EFAULT); -+ return (0); -+ } -+ default: -+ return (-EINVAL); -+ } -+} -+ -+static int -+control_mmap (struct file *file, struct vm_area_struct *vma) -+{ -+ ELAN3_DEV *dev = elan3_devices[ELAN3_DEVICE(file->f_dentry->d_inode)]; -+ int space = OFF_TO_SPACE(vma->vm_pgoff << PAGE_SHIFT); -+ int off = OFF_TO_OFFSET(vma->vm_pgoff << PAGE_SHIFT); -+ int size; -+ ioaddr_t addr; -+ DeviceMappingHandle handle; -+ physaddr_t phys; -+ -+ if (space < ELAN3_BAR_SDRAM || space > ELAN3_BAR_EBUS) -+ return (-EINVAL); -+ -+ if (off < 0 || DeviceRegisterSize (dev, space, &size) != ESUCCESS || off > size) -+ return (-EINVAL); -+ -+ if (MapDeviceRegister(dev, space, &addr, off, PAGESIZE, &handle) != ESUCCESS) -+ return (-EINVAL); -+ -+ phys = kmem_to_phys((caddr_t) addr); -+ UnmapDeviceRegister(dev, &handle); -+ -+ if (__remap_page_range(vma->vm_start, phys, vma->vm_end - vma->vm_start, vma->vm_page_prot)) -+ return (-EAGAIN); -+ -+ return (0); -+} -+ -+/* -+ * /dev/elan3/sdramX - sdram access device -+ */ -+typedef struct mem_page -+{ -+ struct mem_page *pg_next; -+ sdramaddr_t pg_addr; -+ u_long pg_pgoff; -+ u_int pg_ref; -+} MEM_PAGE; -+ -+#define MEM_HASH_SIZE 32 -+#define MEM_HASH(pgoff) ((pgoff) & (MEM_HASH_SIZE-1)) -+ -+typedef struct mem_private -+{ -+ ELAN3_DEV *pr_dev; -+ MEM_PAGE *pr_pages[MEM_HASH_SIZE]; -+ spinlock_t pr_lock; -+} MEM_PRIVATE; -+ -+static void -+mem_freepage (MEM_PRIVATE *pr, MEM_PAGE *pg) -+{ -+ PRINTF (DBG_DEVICE, DBG_SEG, "mem_freepage: pr=%p pgoff=%lx pg=%p ref=%d\n", pr, pg->pg_pgoff, pg, pg->pg_ref); -+ -+ elan3_sdram_free (pr->pr_dev, pg->pg_addr, PAGE_SIZE); -+ KMEM_FREE (pg, sizeof(MEM_PAGE)); -+} -+ -+static MEM_PAGE * -+mem_getpage (MEM_PRIVATE *pr, u_long pgoff, virtaddr_t addr) -+{ -+ int hashval = MEM_HASH (pgoff); -+ MEM_PAGE *npg = NULL; -+ MEM_PAGE *pg; -+ -+ PRINTF (DBG_DEVICE, DBG_SEG, "mem_getpage: pr=%p pgoff=%lx addr=%lx\n", pr, pgoff, addr); -+ -+ again: -+ spin_lock (&pr->pr_lock); -+ for (pg = pr->pr_pages[hashval]; pg; pg = pg->pg_next) -+ if (pg->pg_pgoff == pgoff) -+ break; -+ -+ if (pg != NULL) -+ { -+ PRINTF (DBG_DEVICE, DBG_SEG, "mem_getpage: pr=%p pgoff=%lx addr=%lx -> found %p addr=%lx\n", pr, pgoff, addr, pg, pg->pg_addr); -+ -+ pg->pg_ref++; -+ spin_unlock (&pr->pr_lock); -+ -+ if (npg != NULL) /* we'd raced and someone else had created */ -+ mem_freepage (pr, npg); /* this page - so free of our new one*/ -+ return (pg); -+ } -+ -+ if (npg != NULL) /* didn't find the page, so inset the */ -+ { /* new one we've just created */ -+ npg->pg_next = pr->pr_pages[hashval]; -+ pr->pr_pages[hashval] = npg; -+ -+ spin_unlock (&pr->pr_lock); -+ return (npg); -+ } -+ -+ spin_unlock (&pr->pr_lock); /* drop spinlock before creating a new page */ -+ -+ KMEM_ALLOC(npg, MEM_PAGE *, sizeof (MEM_PAGE), TRUE); -+ -+ if (npg == NULL) -+ return (NULL); -+ -+ if ((npg->pg_addr = elan3_sdram_alloc (pr->pr_dev, PAGE_SIZE)) == 0) -+ { -+ KMEM_FREE (npg, sizeof (MEM_PAGE)); -+ return (NULL); -+ } -+ -+ /* zero the page before returning it to the user */ -+ elan3_sdram_zeroq_sdram (pr->pr_dev, npg->pg_addr, PAGE_SIZE); -+ -+ npg->pg_pgoff = pgoff; -+ npg->pg_ref = 1; -+ -+ /* created a new page - so have to rescan before inserting it */ -+ goto again; -+} -+ -+static void -+mem_droppage (MEM_PRIVATE *pr, u_long pgoff, int dontfree) -+{ -+ MEM_PAGE **ppg; -+ MEM_PAGE *pg; -+ -+ spin_lock (&pr->pr_lock); -+ for (ppg = &pr->pr_pages[MEM_HASH(pgoff)]; *ppg; ppg = &(*ppg)->pg_next) -+ if ((*ppg)->pg_pgoff == pgoff) -+ break; -+ -+ pg = *ppg; -+ -+ ASSERT (*ppg != NULL); -+ -+ PRINTF (DBG_DEVICE, DBG_SEG, "mem_droppage: pr=%p pgoff=%lx pg=%p ref=%d dontfree=%d\n", pr, pgoff, (*ppg), (*ppg)->pg_ref, dontfree); -+ -+ if (--pg->pg_ref == 0 && !dontfree) -+ { -+ *ppg = pg->pg_next; -+ -+ mem_freepage (pr, pg); -+ } -+ -+ spin_unlock (&pr->pr_lock); -+} -+ -+static int -+mem_open (struct inode *inode, struct file *file) -+{ -+ ELAN3_DEV *dev = elan3_devices[ELAN3_DEVICE(inode)]; -+ MEM_PRIVATE *pr; -+ register int i; -+ -+ KMEM_ALLOC(pr, MEM_PRIVATE *, sizeof (MEM_PRIVATE), TRUE); -+ -+ if (pr == NULL) -+ return (-ENOMEM); -+ -+ spin_lock_init (&pr->pr_lock); -+ pr->pr_dev = dev; -+ for (i = 0; i < MEM_HASH_SIZE; i++) -+ pr->pr_pages[i] = NULL; -+ -+ file->private_data = (void *) pr; -+ -+ MOD_INC_USE_COUNT; -+ return (0); -+} -+ -+static int -+mem_release (struct inode *node, struct file *file) -+{ -+ MEM_PRIVATE *pr = (MEM_PRIVATE *) file->private_data; -+ MEM_PAGE *pg, *next; -+ int i; -+ -+ /* free off any pages that we'd allocated */ -+ spin_lock (&pr->pr_lock); -+ for (i = 0; i < MEM_HASH_SIZE; i++) -+ { -+ for (pg = pr->pr_pages[i]; pg; pg = next) -+ { -+ next = pg->pg_next; -+ mem_freepage (pr, pg); -+ } -+ } -+ spin_unlock (&pr->pr_lock); -+ -+ KMEM_FREE (pr, sizeof (MEM_PRIVATE)); -+ -+ MOD_DEC_USE_COUNT; -+ return (0); -+} -+ -+static int -+mem_ioctl (struct inode *inode, struct file *file, -+ unsigned int cmd, unsigned long arg) -+{ -+ return (-EINVAL); -+} -+ -+static void mem_vma_open(struct vm_area_struct *vma) -+{ -+ MEM_PRIVATE *pr = (MEM_PRIVATE *) vma->vm_private_data; -+ unsigned long addr; -+ unsigned long pgoff; -+ -+ PRINTF (DBG_DEVICE, DBG_SEG, "mem_vma_open: vm_mm=%p start=%lx end=%lx pgoff=%lx file=%p\n", -+ vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_pgoff, vma->vm_file); -+ -+ preemptable_start { -+ for (addr = vma->vm_start, pgoff = vma->vm_pgoff; addr < vma->vm_end; addr += PAGE_SIZE, pgoff++) { -+ mem_getpage (pr, pgoff, addr); -+ preemptable_check(); -+ } -+ } preemptable_end; -+} -+ -+static void mem_vma_close(struct vm_area_struct *vma) -+{ -+ MEM_PRIVATE *pr = (MEM_PRIVATE *) vma->vm_private_data; -+ unsigned long addr; -+ unsigned long pgoff; -+ -+ PRINTF (DBG_DEVICE, DBG_SEG, "mem_vma_close: vm_mm=%p start=%lx end=%lx pgoff=%lx file=%p\n", -+ vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_pgoff, vma->vm_file); -+ -+ /* NOTE: the call to close may not have the same vm_start/vm_end values as -+ * were passed into mmap()/open() - since if an partial unmap had occured -+ * then the vma could have been shrunk or even split. -+ * -+ * if a the vma is split then an vma_open() will be called for the top -+ * portion - thus causing the reference counts to become incorrect. -+ * -+ * We drop the reference to any pages we're notified about - so they get freed -+ * earlier than when the device is finally released. -+ */ -+ for (pgoff = vma->vm_pgoff, addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE, pgoff++) -+ mem_droppage (pr, pgoff, 0); -+} -+ -+static struct vm_operations_struct mem_vm_ops = { -+ open: mem_vma_open, -+ close: mem_vma_close, -+}; -+ -+static int -+mem_mmap (struct file *file, struct vm_area_struct *vma) -+{ -+ MEM_PRIVATE *pr = (MEM_PRIVATE *) file->private_data; -+ MEM_PAGE *pg; -+ unsigned long addr; -+ unsigned long pgoff; -+ -+ PRINTF (DBG_DEVICE, DBG_SEG, "mem_mmap: vm_mm=%p start=%lx end=%lx pgoff=%lx prot=%llx file=%p\n", -+ vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_pgoff, (long long)vma->vm_page_prot.pgprot , file); -+ -+ preemptable_start { -+ for (addr = vma->vm_start, pgoff = vma->vm_pgoff; addr < vma->vm_end; addr += PAGE_SIZE, pgoff++) -+ { -+ if ((pg = mem_getpage (pr, pgoff, addr)) == NULL) -+ goto failed; -+ -+#ifdef LINUX_SPARC -+ pgprot_val(vma->vm_page_prot) &= ~(_PAGE_CACHE); -+ pgprot_val(vma->vm_page_prot) |= _PAGE_IE; -+#elif defined(pgprot_noncached) -+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); -+#endif -+ -+#if defined(__ia64__) -+ if (enable_sdram_writecombining) -+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); -+#endif -+ PRINTF (DBG_DEVICE, DBG_SEG, "mem_mmap: addr %lx -> pg=%p addr=%lx phys=%llx flags=%lx prot=%llx\n", -+ addr, pg, pg->pg_addr, (long long) elan3_sdram_to_phys (pr->pr_dev, pg->pg_addr), vma->vm_flags, (long long)vma->vm_page_prot.pgprot); -+ -+ if (__remap_page_range(addr, elan3_sdram_to_phys (pr->pr_dev, pg->pg_addr), PAGE_SIZE, vma->vm_page_prot)) -+ { -+ mem_droppage (pr, pgoff, 0); /* drop our reference to this page */ -+ goto failed; -+ } -+ -+ preemptable_check(); -+ } -+ } preemptable_end; -+ -+ /* Don't try to swap out Elan SDRAM pages.. */ -+ vma->vm_flags |= VM_RESERVED; -+ -+ /* -+ * Don't dump SDRAM pages to a core file -+ * (Pity I would really like to do this but it crashes in elf_core_dump() as -+ * it can only handle pages that are in the mem_map area (addy 11/01/2002)) -+ */ -+ vma->vm_flags |= VM_IO; -+ -+ vma->vm_ops = &mem_vm_ops; -+ vma->vm_file = file; -+ vma->vm_private_data = (void *) pr; -+ -+ return (0); -+ -+ failed: -+ PRINTF (DBG_DEVICE, DBG_SEG, "mem_mmap: failed\n"); -+ -+ /* free of any pages we've already allocated/referenced */ -+ while (pgoff-- > vma->vm_pgoff) -+ mem_droppage (pr, pgoff, 0); -+ -+ return (-ENOMEM); -+} -+ -+#if !defined(NO_PTRACK) && defined(IOPROC_PATCH_APPLIED) -+ -+#include -+ -+/* -+ * /dev/elan3/userX - control device -+ * -+ * "user_private" can be referenced from a number of places -+ * 1) the "file" structure. -+ * 2) the "mm" ioproc ops -+ * 3) the "mmap" of the command port. -+ * -+ */ -+typedef struct user_private -+{ -+ spinlock_t pr_lock; -+ atomic_t pr_mappings; -+ atomic_t pr_ref; -+ ELAN3_CTXT *pr_ctxt; -+ struct mm_struct *pr_mm; -+ struct ioproc_ops pr_ioproc; -+} USER_PRIVATE; -+ -+static void -+user_free (USER_PRIVATE *pr) -+{ -+ /* Have to unreserve the FlagPage or else we leak memory like a sieve! */ -+ ClearPageReserved(pte_page(*find_pte_kernel((unsigned long) pr->pr_ctxt->FlagPage))); -+ -+ elan3_detach(pr->pr_ctxt); -+ elan3_free (pr->pr_ctxt); -+ -+ KMEM_FREE (pr, sizeof(USER_PRIVATE)); -+ -+ MOD_DEC_USE_COUNT; -+} -+ -+static void -+user_ioproc_release (void *arg, struct mm_struct *mm) -+{ -+ USER_PRIVATE *pr = (USER_PRIVATE *) arg; -+ -+ PRINTF3 (pr->pr_ctxt, DBG_SEG, "user_ioproc_release: ctxt=%p pr=%p ref=%d\n", -+ pr->pr_ctxt, pr, atomic_read (&pr->pr_ref)); -+ -+ elan3mmu_pte_ctxt_unload (pr->pr_ctxt->Elan3mmu); -+ -+ pr->pr_mm = NULL; -+ -+ if (atomic_dec_and_test (&pr->pr_ref)) -+ user_free (pr); -+} -+ -+/* -+ * On 2.4 kernels we get passed a mm_struct, whereas on 2.6 kernels -+ * we get the vma which is more usefull -+ */ -+#if defined(IOPROC_MM_STRUCT_ARG) -+static void -+user_ioproc_sync_range (void *arg, struct mm_struct *mm, unsigned long start, unsigned long end) -+{ -+ USER_PRIVATE *pr = (USER_PRIVATE *) arg; -+ -+ PRINTF2 (pr->pr_ctxt, DBG_SEG, "user_ioproc_sync_range: start=%lx end=%lx\n", start, end); -+ -+ ASSERT(start <= end); -+ -+ elan3mmu_pte_range_unload(pr->pr_ctxt->Elan3mmu, mm, (caddr_t) start, end-start); -+} -+ -+static void -+user_ioproc_invalidate_range (void *arg, struct mm_struct *mm, unsigned long start, unsigned long end) -+{ -+ USER_PRIVATE *pr = (USER_PRIVATE *) arg; -+ -+ PRINTF2 (pr->pr_ctxt, DBG_SEG, "user_ioproc_invalidate_range: start=%lx end=%lx\n", start, end); -+ -+ ASSERT(start <= end); -+ -+ elan3mmu_pte_range_unload(pr->pr_ctxt->Elan3mmu, mm, (caddr_t) start, end-start); -+} -+ -+static void -+user_ioproc_update_range (void *arg, struct mm_struct *mm, unsigned long start, unsigned long end) -+{ -+ USER_PRIVATE *pr = (USER_PRIVATE *) arg; -+ -+ ASSERT(start <= end && ((start & PAGEOFFSET) == 0) && ((end & PAGEOFFSET) == 0)); -+ -+ PRINTF2 (pr->pr_ctxt, DBG_SEG, "user_ioproc_update_range: start=%lx end=%lx\n", start, end); -+ -+ elan3mmu_pte_range_update (pr->pr_ctxt->Elan3mmu, mm,(caddr_t) start, end-start); -+} -+ -+static void -+user_ioproc_change_protection (void *arg, struct mm_struct *mm, unsigned long start, unsigned long end, pgprot_t newprot) -+{ -+ USER_PRIVATE *pr = (USER_PRIVATE *) arg; -+ -+ PRINTF2 (pr->pr_ctxt, DBG_SEG, "user_ioproc_change_protection: start=%lx end=%lx\n", start, end); -+ -+ ASSERT(start <= end); -+ -+ elan3mmu_pte_range_unload(pr->pr_ctxt->Elan3mmu, mm, (caddr_t) start, end-start); -+} -+ -+#else -+ -+static void -+user_ioproc_sync_range (void *arg, struct vm_area_struct *vma, unsigned long start, unsigned long end) -+{ -+ USER_PRIVATE *pr = (USER_PRIVATE *) arg; -+ -+ PRINTF2 (pr->pr_ctxt, DBG_SEG, "user_ioproc_sync_range: start=%lx end=%lx\n", start, end); -+ -+ ASSERT(start <= end); -+ -+ elan3mmu_pte_range_unload(pr->pr_ctxt->Elan3mmu, vma->vm_mm, (caddr_t) start, end-start); -+} -+ -+static void -+user_ioproc_invalidate_range (void *arg, struct vm_area_struct *vma, unsigned long start, unsigned long end) -+{ -+ USER_PRIVATE *pr = (USER_PRIVATE *) arg; -+ -+ PRINTF2 (pr->pr_ctxt, DBG_SEG, "user_ioproc_invalidate_range: start=%lx end=%lx\n", start, end); -+ -+ ASSERT(start <= end); -+ -+ elan3mmu_pte_range_unload(pr->pr_ctxt->Elan3mmu, vma->vm_mm, (caddr_t) start, end-start); -+} -+ -+static void -+user_ioproc_update_range (void *arg, struct vm_area_struct *vma, unsigned long start, unsigned long end) -+{ -+ USER_PRIVATE *pr = (USER_PRIVATE *) arg; -+ -+ ASSERT(start <= end && ((start & PAGEOFFSET) == 0) && ((end & PAGEOFFSET) == 0)); -+ -+ PRINTF2 (pr->pr_ctxt, DBG_SEG, "user_ioproc_update_range: start=%lx end=%lx\n", start, end); -+ -+ elan3mmu_pte_range_update (pr->pr_ctxt->Elan3mmu, vma->vm_mm, (caddr_t) start, end-start); -+} -+ -+static void -+user_ioproc_change_protection (void *arg, struct vm_area_struct *vma, unsigned long start, unsigned long end, pgprot_t newprot) -+{ -+ USER_PRIVATE *pr = (USER_PRIVATE *) arg; -+ -+ PRINTF2 (pr->pr_ctxt, DBG_SEG, "user_ioproc_change_protection: start=%lx end=%lx\n", start, end); -+ -+ ASSERT(start <= end); -+ -+ elan3mmu_pte_range_unload(pr->pr_ctxt->Elan3mmu, vma->vm_mm, (caddr_t) start, end-start); -+} -+#endif /* defined(IOPROC_NO_VMA_RANGE) */ -+ -+static void -+user_ioproc_sync_page (void *arg, struct vm_area_struct *vma, unsigned long addr) -+{ -+ USER_PRIVATE *pr = (USER_PRIVATE *) arg; -+ -+ PRINTF1 (pr->pr_ctxt, DBG_SEG, "user_ioproc_sync_page: addr=%lx\n", addr); -+ -+ elan3mmu_pte_range_unload(pr->pr_ctxt->Elan3mmu, vma->vm_mm, (caddr_t) (addr & PAGE_MASK), PAGE_SIZE); -+} -+ -+static void -+user_ioproc_invalidate_page (void *arg, struct vm_area_struct *vma, unsigned long addr) -+{ -+ USER_PRIVATE *pr = (USER_PRIVATE *) arg; -+ -+ PRINTF1 (pr->pr_ctxt, DBG_SEG, "user_ioproc_invalidate_page: addr=%lx\n", addr); -+ -+ elan3mmu_pte_range_unload(pr->pr_ctxt->Elan3mmu, vma->vm_mm, (caddr_t) (addr & PAGE_MASK), PAGE_SIZE); -+} -+ -+static void -+user_ioproc_update_page (void *arg, struct vm_area_struct *vma, unsigned long addr) -+{ -+ USER_PRIVATE *pr = (USER_PRIVATE *) arg; -+ -+ PRINTF1 (pr->pr_ctxt, DBG_SEG, "user_ioproc_update_page: addr=%lx\n", addr); -+ -+ elan3mmu_pte_range_update (pr->pr_ctxt->Elan3mmu,vma->vm_mm, (caddr_t) (addr & PAGE_MASK), PAGE_SIZE); -+} -+ -+int -+user_ptrack_handler (void *arg, int phase, struct task_struct *child) -+{ -+ USER_PRIVATE *pr = (USER_PRIVATE *) arg; -+ ELAN3_CTXT *ctxt = pr->pr_ctxt; -+ -+ PRINTF5 (pr->pr_ctxt, DBG_FN, "user_ptrack_handler: ctxt=%p pr=%p ref=%d phase %d mm->ref %d\n", -+ pr->pr_ctxt, pr, atomic_read (&pr->pr_ref), phase, atomic_read (¤t->mm->mm_count)); -+ -+ if (phase == PTRACK_PHASE_EXIT) -+ { -+ /* this will force the helper thread to exit */ -+ elan3_swapout (ctxt, CTXT_EXITING); -+ -+ if (atomic_dec_and_test (&pr->pr_ref)) -+ user_free (pr); -+ } -+ return PTRACK_FINISHED; -+} -+ -+static int -+user_open (struct inode *inode, struct file *file) -+{ -+ ELAN3_DEV *dev = elan3_devices[ELAN3_DEVICE(inode)]; -+ USER_PRIVATE *pr; -+ ELAN3_CTXT *ctxt; -+ -+ if (dev == NULL) -+ return (-ENXIO); -+ -+ KMEM_ALLOC(pr, USER_PRIVATE *, sizeof (USER_PRIVATE), TRUE); -+ -+ if (pr == NULL) -+ return (-ENOMEM); -+ -+ if ((ctxt = elan3_alloc (dev, 0)) == NULL) -+ { -+ KMEM_FREE (pr, sizeof (USER_PRIVATE)); -+ return (-ENOMEM); -+ } -+ -+ if (sys_init (ctxt) == NULL) -+ { -+ elan3_detach(ctxt); -+ elan3_free (ctxt); -+ KMEM_FREE (pr, sizeof (USER_PRIVATE)); -+ return (-ENOMEM); -+ } -+ -+ /* initialise refcnt to 3 - one for "file", one for XA handler, one for the ioproc ops */ -+ atomic_set (&pr->pr_ref, 3); -+ -+ atomic_set (&pr->pr_mappings, 0); -+ spin_lock_init (&pr->pr_lock); -+ -+ pr->pr_ctxt = ctxt; -+ pr->pr_mm = current->mm; -+ -+ /* register an ptrack handler to force the helper thread to exit when we do */ -+ if (ptrack_register (user_ptrack_handler, pr) < 0) -+ { -+ elan3_detach(ctxt); -+ elan3_free (ctxt); -+ KMEM_FREE (pr, sizeof (USER_PRIVATE)); -+ return (-ENOMEM); -+ } -+ -+ /* register a ioproc callback to notify us of translation changes */ -+ -+ pr->pr_ioproc.arg = (void *) pr; -+ pr->pr_ioproc.release = user_ioproc_release; -+ pr->pr_ioproc.sync_range = user_ioproc_sync_range; -+ pr->pr_ioproc.invalidate_range = user_ioproc_invalidate_range; -+ pr->pr_ioproc.update_range = user_ioproc_update_range; -+ pr->pr_ioproc.change_protection = user_ioproc_change_protection; -+ pr->pr_ioproc.sync_page = user_ioproc_sync_page; -+ pr->pr_ioproc.invalidate_page = user_ioproc_invalidate_page; -+ pr->pr_ioproc.update_page = user_ioproc_update_page; -+ -+ spin_lock (¤t->mm->page_table_lock); -+ ioproc_register_ops (current->mm, &pr->pr_ioproc); -+ spin_unlock (¤t->mm->page_table_lock); -+ -+ file->private_data = (void *) pr; -+ -+ PRINTF2 (pr->pr_ctxt, DBG_FN, "user_open: done ctxt=%p pr=%p\n", ctxt, pr); -+ -+ MOD_INC_USE_COUNT; -+ return (0); -+} -+ -+static int -+user_release (struct inode *inode, struct file *file) -+{ -+ USER_PRIVATE *pr = (USER_PRIVATE *) file->private_data; -+ -+ PRINTF3 (pr->pr_ctxt, DBG_FN, "user_release: ctxt=%p pr=%p ref=%d\n", pr->pr_ctxt, pr, -+ atomic_read (&pr->pr_ref)); -+ -+ if (atomic_dec_and_test (&pr->pr_ref)) -+ user_free (pr); -+ -+ return (0); -+} -+ -+static int -+user_ioctl (struct inode *inode, struct file *file, -+ unsigned int cmd, unsigned long arg) -+{ -+ USER_PRIVATE *pr = (USER_PRIVATE *) file->private_data; -+ ELAN3_CTXT *ctxt = pr->pr_ctxt; -+ SYS_CTXT *sctx = (SYS_CTXT *) ctxt->Private; -+ int res = 0; -+ -+ if (current->mm != pr->pr_mm) -+ return (-EINVAL); -+ -+ PRINTF4 (ctxt, DBG_FN, "user_ioctl: ctxt=%p cmd=%x(%d) arg=%lx\n", ctxt, cmd, _IOC_NR(cmd), arg); -+ -+ switch (cmd) -+ { -+ case ELAN3IO_FREE: -+ if (atomic_read (&pr->pr_mappings) > 0) -+ return (-EINVAL); -+ -+ spin_lock (¤t->mm->page_table_lock); -+ if (pr->pr_mm != current->mm) -+ spin_unlock (¤t->mm->page_table_lock); -+ else -+ { -+ ioproc_unregister_ops (current->mm, &pr->pr_ioproc); -+ spin_unlock (¤t->mm->page_table_lock); -+ -+ user_ioproc_release (pr, current->mm); -+ } -+ -+ if (ptrack_registered (user_ptrack_handler, pr)) -+ { -+ ptrack_deregister (user_ptrack_handler, pr); -+ user_ptrack_handler (pr, PTRACK_PHASE_EXIT, NULL); -+ } -+ break; -+ -+ case ELAN3IO_ATTACH: -+ { -+ ELAN_CAPABILITY *cap; -+ -+ KMEM_ALLOC(cap, ELAN_CAPABILITY *, sizeof (ELAN_CAPABILITY), TRUE); -+ -+ if (cap == NULL) -+ return (set_errno (EFAULT)); -+ -+ if (copy_from_user (cap, (void *) arg, sizeof (ELAN_CAPABILITY))) -+ res = EFAULT; -+ else -+ { -+ if ((res = elan3_attach (ctxt, cap)) == 0) -+ { -+ if (copy_to_user ((void *) arg, cap, sizeof (ELAN_CAPABILITY))) -+ { -+ elan3_detach (ctxt); -+ res = EFAULT; -+ } -+ } -+ } -+ KMEM_FREE (cap, sizeof(ELAN_CAPABILITY)); -+ break; -+ } -+ -+ case ELAN3IO_DETACH: -+ spin_lock (&pr->pr_lock); -+ if (atomic_read (&pr->pr_mappings) > 0) -+ res = EINVAL; -+ else -+ elan3_detach (ctxt); -+ spin_unlock (&pr->pr_lock); -+ break; -+ -+ case ELAN3IO_ADDVP: -+ { -+ ELAN3IO_ADDVP_STRUCT *args; -+ -+ KMEM_ALLOC(args, ELAN3IO_ADDVP_STRUCT *, sizeof (ELAN3IO_ADDVP_STRUCT), TRUE); -+ -+ if (args == NULL) -+ return (set_errno (ENOMEM)); -+ -+ if (copy_from_user (args, (void *) arg, sizeof (ELAN3IO_ADDVP_STRUCT))) -+ res = EFAULT; -+ else -+ { -+ if ( (res=elan3_addvp (ctxt, args->process, &args->capability)) != 0) -+ PRINTF0 (ctxt, DBG_FN, "ELAN3IO_ADDVP elan3_addvp failed \n"); -+ } -+ -+ KMEM_FREE (args, sizeof (ELAN3IO_ADDVP_STRUCT)); -+ break; -+ } -+ -+ case ELAN3IO_REMOVEVP: -+ res = elan3_removevp (ctxt, arg); -+ break; -+ -+ case ELAN3IO_BCASTVP: -+ { -+ ELAN3IO_BCASTVP_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_BCASTVP_STRUCT))) -+ return (-EFAULT); -+ -+ res = elan3_addbcastvp (ctxt, args.process, args.lowvp, args.highvp); -+ break; -+ } -+ -+ case ELAN3IO_LOAD_ROUTE: -+ { -+ ELAN3IO_LOAD_ROUTE_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_LOAD_ROUTE_STRUCT))) -+ return (-EFAULT); -+ -+ res = elan3_load_route (ctxt, args.process, args.flits); -+ break; -+ } -+ -+ case ELAN3IO_CHECK_ROUTE: -+ { -+ ELAN3IO_CHECK_ROUTE_STRUCT args; -+ -+ args.routeError = 0; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_LOAD_ROUTE_STRUCT))) -+ return (-EFAULT); -+ -+ if ((res = elan3_check_route (ctxt, args.process, args.flits, & args.routeError)) == ESUCCESS) -+ { -+ if (copy_to_user ( (void *) arg, &args,sizeof (ELAN3IO_LOAD_ROUTE_STRUCT))) -+ return (-EFAULT); -+ } -+ break; -+ } -+ -+ case ELAN3IO_PROCESS_2_LOCATION: -+ { -+ ELAN3IO_PROCESS_2_LOCATION_STRUCT args; -+ ELAN_LOCATION loc; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_PROCESS_2_LOCATION_STRUCT))) -+ return (-EFAULT); -+ -+ krwlock_write (&ctxt->VpLock); -+ loc = ProcessToLocation (ctxt, NULL, args.process , NULL); -+ krwlock_done (&ctxt->VpLock); -+ -+ args.loc = loc; -+ -+ if (copy_to_user ( (void *) arg, &args,sizeof (ELAN3IO_PROCESS_2_LOCATION_STRUCT))) -+ return (-EFAULT); -+ -+ break; -+ } -+ -+ case ELAN3IO_GET_ROUTE: -+ { -+ ELAN3IO_GET_ROUTE_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_GET_ROUTE_STRUCT))) -+ return (-EFAULT); -+ -+ if ((res = elan3_get_route (ctxt, args.process, args.flits)) == ESUCCESS) -+ { -+ if (copy_to_user ( (void *) arg, &args,sizeof (ELAN3IO_GET_ROUTE_STRUCT))) -+ return (-EFAULT); -+ } -+ break; -+ } -+ -+ case ELAN3IO_RESET_ROUTE: -+ { -+ ELAN3IO_RESET_ROUTE_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_RESET_ROUTE_STRUCT))) -+ return (-EFAULT); -+ -+ res = elan3_reset_route (ctxt, args.process); -+ break; -+ } -+ -+ case ELAN3IO_VP2NODEID: -+ { -+ ELAN3IO_VP2NODEID_STRUCT *vp2nodeId; -+ ELAN_LOCATION location; -+ -+ KMEM_ALLOC (vp2nodeId, ELAN3IO_VP2NODEID_STRUCT *, sizeof(ELAN3IO_VP2NODEID_STRUCT), TRUE); -+ if (vp2nodeId == NULL) -+ return (set_errno (ENOMEM)); -+ -+ if (copy_from_user (vp2nodeId, (void *) arg, sizeof (ELAN3IO_VP2NODEID_STRUCT))) { -+ KMEM_FREE (vp2nodeId, sizeof(ELAN3IO_VP2NODEID_STRUCT)); -+ return (-EFAULT); -+ } -+ -+ krwlock_write (&ctxt->VpLock); -+ location = ProcessToLocation (ctxt, NULL, vp2nodeId->process , NULL); -+ krwlock_done (&ctxt->VpLock); -+ -+ vp2nodeId->nodeId = location.loc_node; -+ if (copy_to_user ( (void *) arg, vp2nodeId, sizeof (ELAN3IO_VP2NODEID_STRUCT))) { -+ KMEM_FREE (vp2nodeId, sizeof(ELAN3IO_VP2NODEID_STRUCT)); -+ return (-EFAULT); -+ } -+ -+ KMEM_FREE (vp2nodeId, sizeof(ELAN3IO_VP2NODEID_STRUCT)); -+ -+ break; -+ } -+ -+ case ELAN3IO_PROCESS: -+ return (elan3_process (ctxt)); -+ -+ case ELAN3IO_SETPERM: -+ { -+ ELAN3IO_SETPERM_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_SETPERM_STRUCT))) -+ return (-EFAULT); -+ -+ res = elan3mmu_setperm (ctxt->Elan3mmu, args.maddr, args.eaddr, args.len, args.perm); -+ break; -+ } -+ -+ case ELAN3IO_CLEARPERM: -+ { -+ ELAN3IO_CLEARPERM_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_CLEARPERM_STRUCT))) -+ return (-EFAULT); -+ -+ elan3mmu_clrperm (ctxt->Elan3mmu, args.eaddr, args.len); -+ break; -+ } -+ -+ case ELAN3IO_CHANGEPERM: -+ { -+ ELAN3IO_CHANGEPERM_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_CHANGEPERM_STRUCT))) -+ return (-EFAULT); -+ -+ res = EINVAL; -+ break; -+ } -+ -+ case ELAN3IO_HELPER_THREAD: -+ res = elan3_lwp (ctxt); -+ break; -+ -+ case ELAN3IO_WAITCOMMAND: -+ res = WaitForCommandPort (ctxt); -+ break; -+ -+ case ELAN3IO_BLOCK_INPUTTER: -+ elan3_block_inputter (ctxt, arg); -+ break; -+ -+ case ELAN3IO_SET_FLAGS: -+ sctx->Flags = arg; -+ break; -+ -+ case ELAN3IO_SET_SIGNAL: -+ sctx->signal = arg; -+ break; -+ -+ case ELAN3IO_WAITEVENT: -+ res = sys_waitevent (ctxt, (E3_Event *) arg); -+ break; -+ -+ case ELAN3IO_ALLOC_EVENTCOOKIE: -+ res = cookie_alloc_cookie (sctx->Table, arg); -+ break; -+ -+ case ELAN3IO_FREE_EVENTCOOKIE: -+ res = cookie_free_cookie (sctx->Table, arg); -+ break; -+ -+ case ELAN3IO_ARM_EVENTCOOKIE: -+ res = cookie_arm_cookie (sctx->Table, arg); -+ break; -+ -+ case ELAN3IO_WAIT_EVENTCOOKIE: -+ res = cookie_wait_cookie (sctx->Table, arg); -+ break; -+ -+ case ELAN3IO_SWAPSPACE: -+ if (fuword (&((SYS_SWAP_SPACE *) arg)->Magic) != SYS_SWAP_MAGIC) -+ return (set_errno (EINVAL)); -+ -+ ((SYS_CTXT *) ctxt->Private)->Swap = (SYS_SWAP_SPACE *) arg; -+ break; -+ -+ case ELAN3IO_EXCEPTION_SPACE: -+ if (fuword (&((SYS_EXCEPTION_SPACE *) arg)->Magic) != SYS_EXCEPTION_MAGIC) -+ return (set_errno (EINVAL)); -+ -+ ((SYS_CTXT *) ctxt->Private)->Exceptions = (SYS_EXCEPTION_SPACE *) arg; -+ break; -+ -+ case ELAN3IO_GET_EXCEPTION: -+ { -+ SYS_EXCEPTION *exception; -+ -+ if (((SYS_CTXT *) ctxt->Private)->Exceptions == NULL) -+ return (set_errno (EINVAL)); -+ -+ KMEM_ALLOC(exception, SYS_EXCEPTION *, sizeof (SYS_EXCEPTION), TRUE); -+ -+ if (exception == NULL) -+ return (set_errno (ENOMEM)); -+ -+ if ((res = sys_getException (((SYS_CTXT *) ctxt->Private), exception)) == 0 && -+ copy_to_user ((void *) arg, exception, sizeof (SYS_EXCEPTION))) -+ res = EFAULT; -+ -+ KMEM_FREE (exception, sizeof (SYS_EXCEPTION)); -+ break; -+ } -+ -+ case ELAN3IO_UNLOAD: -+ { -+ ELAN3MMU *elan3mmu = ctxt->Elan3mmu; -+ ELAN3IO_UNLOAD_STRUCT args; -+ int span; -+ unsigned long flags; -+ E3_Addr eaddr; -+ caddr_t addr; -+ size_t len; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_UNLOAD_STRUCT))) -+ return (-EFAULT); -+ -+ addr = (caddr_t) args.addr; -+ len = args.len; -+ -+ if (((unsigned long) addr & PAGEMASK) || (len & PAGEMASK) || (len < 0)) -+ return -EINVAL; -+ -+ spin_lock_irqsave (&elan3mmu->elan3mmu_lock, flags); -+ for (; len; len -= span, addr += span) -+ { -+ ELAN3MMU_RGN *rgn = elan3mmu_findrgn_main (elan3mmu, addr, 0); -+ -+ if (rgn == NULL || (rgn->rgn_mbase + rgn->rgn_len) < addr) -+ span = len; -+ else if (rgn->rgn_mbase > addr) -+ span = MIN(len, rgn->rgn_mbase - addr); -+ else -+ { -+ span = MIN(len, (rgn->rgn_mbase + rgn->rgn_len) - addr); -+ eaddr = rgn->rgn_ebase + (addr - rgn->rgn_mbase); -+ -+ elan3mmu_unload (elan3mmu, eaddr, span, PTE_UNLOAD); -+ } -+ } -+ spin_unlock_irqrestore (&elan3mmu->elan3mmu_lock, flags); -+ -+ return 0; -+ } -+ -+ case ELAN3IO_GET_DEVINFO: -+ { -+ ELAN3IO_GET_DEVINFO_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_GET_DEVINFO_STRUCT))) -+ return (-EFAULT); -+ -+ if (copy_to_user ((void *) args.devinfo, &ctxt->Device->Devinfo, sizeof (ELAN_DEVINFO))) -+ res = EFAULT; -+ break; -+ } -+ -+ case ELAN3IO_GET_POSITION: -+ { -+ ELAN3IO_GET_POSITION_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_GET_POSITION_STRUCT))) -+ return (-EFAULT); -+ -+ if (copy_to_user ((void *) args.position, &ctxt->Device->Position, sizeof (ELAN_POSITION))) -+ res = EFAULT; -+ break; -+ } -+ -+ default: -+ return (-EINVAL); -+ } -+ -+ return (res ? set_errno (res) : 0); -+} -+ -+static void user_vma_open(struct vm_area_struct *vma) -+{ -+ USER_PRIVATE *pr = (USER_PRIVATE *) vma->vm_private_data; -+ -+ PRINTF (DBG_DEVICE, DBG_SEG, "user_vma_open: vm_mm=%p start=%lx end=%lx pgoff=%lx file=%p\n", -+ vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_pgoff, vma->vm_file); -+ -+ if (vma->vm_pgoff == ELAN3IO_OFF_COMMAND_PAGE) -+ if (atomic_dec_and_test (&pr->pr_mappings)) -+ pr->pr_ctxt->CommandPageMapping = NULL; -+} -+ -+static void user_vma_close(struct vm_area_struct *vma) -+{ -+ USER_PRIVATE *pr = (USER_PRIVATE *) vma->vm_private_data; -+ -+ PRINTF (DBG_DEVICE, DBG_SEG, "user_vma_close: vm_mm=%p start=%lx end=%lx pgoff=%lx file=%p\n", -+ vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_pgoff, vma->vm_file); -+ -+ if (vma->vm_pgoff == ELAN3IO_OFF_COMMAND_PAGE) -+ atomic_inc (&pr->pr_mappings); -+} -+ -+static struct vm_operations_struct user_vm_ops = { -+ open: user_vma_open, -+ close: user_vma_close, -+}; -+ -+static int -+user_mmap (struct file *file, struct vm_area_struct *vma) -+{ -+ USER_PRIVATE *pr = (USER_PRIVATE *) file->private_data; -+ ELAN3_CTXT *ctxt = pr->pr_ctxt; -+ ioaddr_t ioaddr; -+ -+ /* -+ * NOTE - since we need to maintain the reference count on -+ * the user_private we only permit single page -+ * mmaps - this means that we will certainly see -+ * the correct number of closes to maintain the -+ * the reference count correctly. -+ */ -+ -+ if ((vma->vm_end - vma->vm_start) != PAGE_SIZE) -+ return (-EINVAL); -+ -+ PRINTF (DBG_DEVICE, DBG_SEG, "user_mmap: vm_mm=%p start=%lx end=%lx pgoff=%lx flags=%lx prot=%llx file=%p\n", -+ vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_pgoff, vma->vm_flags, (long long)vma->vm_page_prot.pgprot, vma->vm_file); -+ -+ switch (vma->vm_pgoff) -+ { -+ default: -+ return (-EINVAL); -+ -+ case ELAN3IO_OFF_COMMAND_PAGE: -+ spin_lock (&pr->pr_lock); -+ if (ctxt->CommandPage == (ioaddr_t) 0 || atomic_read (&pr->pr_mappings) != 0) -+ { -+ PRINTF (DBG_DEVICE, DBG_SEG, "user_mmap: command port - %s\n", ctxt->CommandPort ? "already mapped" : "not attached"); -+ spin_unlock (&pr->pr_lock); -+ return (-EINVAL); -+ } -+#ifdef LINUX_SPARC -+ pgprot_val(vma->vm_page_prot) &= ~(_PAGE_CACHE); -+ pgprot_val(vma->vm_page_prot) |= _PAGE_IE; -+#elif defined(pgprot_noncached) -+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); -+#endif -+ -+ PRINTF (DBG_DEVICE, DBG_SEG, "user_mmap: commandport at %lx phys %llx prot %llx\n", -+ vma->vm_start, (unsigned long long) kmem_to_phys ((void *) ctxt->CommandPort), (long long)vma->vm_page_prot.pgprot); -+ -+ /* Don't try to swap out physical pages.. */ -+ vma->vm_flags |= VM_RESERVED; -+ -+ /* -+ * Don't dump addresses that are not real memory to a core file. -+ */ -+ vma->vm_flags |= VM_IO; -+ -+ if (__remap_page_range(vma->vm_start, kmem_to_phys ((void *) ctxt->CommandPage), vma->vm_end - vma->vm_start, vma->vm_page_prot)) -+ { -+ spin_unlock (&pr->pr_lock); -+ return (-EAGAIN); -+ } -+ ctxt->CommandPageMapping = (void *) vma->vm_start; -+ -+ atomic_inc (&pr->pr_mappings); -+ -+ spin_unlock (&pr->pr_lock); -+ break; -+ -+ case ELAN3IO_OFF_UREG_PAGE: -+#ifdef LINUX_SPARC -+ pgprot_val(vma->vm_page_prot) &= ~(_PAGE_CACHE); -+ pgprot_val(vma->vm_page_prot) |= _PAGE_IE; -+#elif defined(pgprot_noncached) -+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); -+#endif -+ ioaddr = ctxt->Device->RegPtr + (offsetof (E3_Regs, URegs) & PAGEMASK); -+ -+ PRINTF (DBG_DEVICE, DBG_SEG, "user_mmap: user_regs at %lx phys %llx prot %llx\n", vma->vm_start, -+ (unsigned long long) kmem_to_phys ((void *) ioaddr), (long long)vma->vm_page_prot.pgprot); -+ -+ /* Don't try to swap out physical pages.. */ -+ vma->vm_flags |= VM_RESERVED; -+ -+ /* -+ * Don't dump addresses that are not real memory to a core file. -+ */ -+ vma->vm_flags |= VM_IO; -+ if (__remap_page_range (vma->vm_start, kmem_to_phys ((void *) ioaddr), -+ vma->vm_end - vma->vm_start, vma->vm_page_prot)) -+ return (-EAGAIN); -+ break; -+ -+ case ELAN3IO_OFF_FLAG_PAGE: -+ PRINTF (DBG_DEVICE, DBG_SEG, "user_mmap: flag page at %lx phys %llx\n", vma->vm_start, -+ (unsigned long long) kmem_to_phys ((void *) ctxt->FlagPage)); -+ -+ /* we do not want to have this area swapped out, lock it */ -+ vma->vm_flags |= VM_LOCKED; -+ -+ /* Mark the page as reserved or else the remap_page_range() doesn't remap it */ -+ SetPageReserved(pte_page(*find_pte_kernel((unsigned long) ctxt->FlagPage))); -+ -+ if (__remap_page_range (vma->vm_start, kmem_to_phys ((void *) ctxt->FlagPage), -+ vma->vm_end - vma->vm_start, vma->vm_page_prot)) -+ return (-EAGAIN); -+ break; -+ } -+ -+ ASSERT (vma->vm_ops == NULL); -+ -+ vma->vm_ops = &user_vm_ops; -+ vma->vm_file = file; -+ vma->vm_private_data = (void *) pr; -+ -+ return (0); -+} -+ -+#else /* defined(NO_PTRACK) || !defined(IOPROC_PATCH_APPLIED) */ -+ -+static int -+user_open (struct inode *inode, struct file *file) -+{ -+ return -ENXIO; -+} -+ -+static int -+user_release (struct inode *inode, struct file *file) -+{ -+ return 0; -+} -+ -+static int -+user_ioctl (struct inode *inode, struct file *file, -+ unsigned int cmd, unsigned long arg) -+{ -+ return 0; -+} -+ -+static int -+user_mmap (struct file *file, struct vm_area_struct *vma) -+{ -+ return 0; -+} -+#endif -+ -+/* driver entry points */ -+static int -+elan3_open (struct inode *inode, struct file *file) -+{ -+ if (elan3_devices[ELAN3_DEVICE(inode)] == NULL) -+ return (-ENXIO); -+ -+ PRINTF (DBG_DEVICE, DBG_FN, "elan3_open: device %d minor %d file=%p\n", ELAN3_DEVICE(inode), ELAN3_MINOR(inode), file); -+ -+ switch (ELAN3_MINOR (inode)) -+ { -+ case ELAN3_MINOR_CONTROL: -+ return (control_open (inode, file)); -+ case ELAN3_MINOR_MEM: -+ return (mem_open (inode, file)); -+ case ELAN3_MINOR_USER: -+ return (user_open (inode, file)); -+ default: -+ return (-ENXIO); -+ } -+} -+ -+static int -+elan3_release (struct inode *inode, struct file *file) -+{ -+ PRINTF (DBG_DEVICE, DBG_FN, "elan3_release: device %d minor %d file=%p\n", ELAN3_DEVICE(inode), ELAN3_MINOR(inode), file); -+ -+ switch (ELAN3_MINOR (inode)) -+ { -+ case ELAN3_MINOR_CONTROL: -+ return (control_release (inode, file)); -+ case ELAN3_MINOR_MEM: -+ return (mem_release (inode, file)); -+ case ELAN3_MINOR_USER: -+ return (user_release (inode, file)); -+ default: -+ return (-ENXIO); -+ } -+} -+ -+static int -+elan3_ioctl (struct inode *inode, struct file *file, -+ unsigned int cmd, unsigned long arg) -+{ -+ switch (ELAN3_MINOR (inode)) -+ { -+ case ELAN3_MINOR_CONTROL: -+ return (control_ioctl (inode, file, cmd, arg)); -+ case ELAN3_MINOR_MEM: -+ return (mem_ioctl (inode, file, cmd, arg)); -+ case ELAN3_MINOR_USER: -+ return (user_ioctl (inode, file, cmd, arg)); -+ default: -+ return (-ENXIO); -+ } -+} -+ -+ -+static int -+elan3_mmap (struct file *file, struct vm_area_struct *vma) -+{ -+ PRINTF (DBG_DEVICE, DBG_SEG, "elan3_mmap: instance %d minor %d start=%lx end=%lx pgoff=%lx flags=%lx prot=%llx\n", -+ ELAN3_DEVICE (file->f_dentry->d_inode), ELAN3_MINOR (file->f_dentry->d_inode), -+ vma->vm_start, vma->vm_end, vma->vm_pgoff, vma->vm_flags, (long long)vma->vm_page_prot.pgprot); -+ -+ switch (ELAN3_MINOR (file->f_dentry->d_inode)) -+ { -+ case ELAN3_MINOR_CONTROL: -+ return (control_mmap (file, vma)); -+ case ELAN3_MINOR_MEM: -+ return (mem_mmap (file, vma)); -+ case ELAN3_MINOR_USER: -+ return (user_mmap (file, vma)); -+ default: -+ return (-ENXIO); -+ } -+} -+ -+static irqreturn_t -+InterruptHandlerWrapper(int irq, void *dev_id, struct pt_regs *regs) -+{ -+ if (InterruptHandler ((ELAN3_DEV *)dev_id) == 0) -+ return IRQ_HANDLED; -+ else -+ return IRQ_NONE; -+} -+ -+ -+/* -+ * Elan specific PCI configuration registers. -+ */ -+ -+#define PCI_CONF_PARITY_PHYS_LO 0x40 -+#define PCI_CONF_PARITY_PHYS_HI 0x44 -+#define PCI_CONF_PARITY_PHASE_ADDR 0x46 -+#define PCI_CONF_PARITY_MASTER_TYPE 0x47 -+#define PCI_CONF_ELAN3_CTRL 0x48 -+ -+#define ECTRL_EXTEND_LATENCY (1 << 0) -+#define ECTRL_ENABLE_PREFETCH (1 << 1) -+#define ECTRL_SOFTWARE_INTERNAL_RESET (1 << 2) -+#define ECTRL_REDUCED_RETRY_RATE (1 << 3) -+#define ECTRL_CLOCK_DIVIDE_RATE_SHIFT 4 -+#define ECTRL_COMMS_DIVIDE_RATE_SHIFT 10 -+#define ECTRL_FORCE_COMMSCLK_LOCAL (1 << 14) -+ -+/* -+ * Configure PCI. -+ */ -+static int -+ConfigurePci(ELAN3_DEV *dev) -+{ -+ struct pci_dev *pci = dev->Osdep.pci; -+ u32 rom_address; -+ -+ if (pci_enable_device(pci)) -+ return (ENXIO); -+ -+ /* disable ROM */ -+ pci_read_config_dword(pci, PCI_ROM_ADDRESS, &rom_address); -+ rom_address &= ~PCI_ROM_ADDRESS_ENABLE; -+ pci_write_config_dword(pci, PCI_ROM_ADDRESS, rom_address); -+ mb(); -+ -+ /* this is in 32-bit WORDS */ -+ pci_write_config_byte(pci, PCI_CACHE_LINE_SIZE, (64 >> 2)); -+ mb(); -+ -+ /* allow 40 ticks to respond, 16 data phases */ -+ pci_write_config_byte(pci, PCI_LATENCY_TIMER, 255); -+ mb(); -+ -+ /* don't enable PCI_COMMAND_SERR--see note in elandev_dunix.c */ -+ pci_write_config_word(pci, PCI_COMMAND, PCI_COMMAND_MEMORY -+ | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE | PCI_COMMAND_PARITY); -+ mb(); -+ -+ return ESUCCESS; -+} -+ -+/* -+ * Reset chip to a known state. -+ */ -+static int -+ResetElan(ELAN3_DEV *dev, ioaddr_t intPalAddr) -+{ -+ struct pci_dev *pci = dev->Osdep.pci; -+ int instance = dev->Instance; -+ u32 val; -+ u8 revid; -+ int CasLatency; -+ int res; -+ -+ /* determine rev of board */ -+ pci_read_config_byte(pci, PCI_REVISION_ID, &revid); -+ -+ /* GNAT 2328 - don't set ECTRL_ENABLE_PREFETCH on Elan rev A */ -+ val = ECTRL_EXTEND_LATENCY | (39 << ECTRL_CLOCK_DIVIDE_RATE_SHIFT) -+ | (6 << ECTRL_COMMS_DIVIDE_RATE_SHIFT); -+ switch (revid) -+ { -+ case PCI_REVISION_ID_ELAN3_REVA: -+ printk("elan%d: is an elan3 (revision a) - not supported\n", instance); -+ return (EFAIL); -+ -+ case PCI_REVISION_ID_ELAN3_REVB: -+ val |= ECTRL_ENABLE_PREFETCH; -+ if (BackToBackMaster) -+ val |= ECTRL_FORCE_COMMSCLK_LOCAL; -+ printk("elan%d: is an elan3 (revision b)\n", instance); -+ break; -+ default: -+ printk("elan%d: unsupported elan3 revision %d\n", -+ instance, revid); -+ return EFAIL; -+ } -+ pci_write_config_dword(pci, PCI_CONF_ELAN3_CTRL, val); -+ mb(); -+ -+ /* -+ * GNAT: 2474 -+ * Hit reset on the Elan, then we MUST initialise the schedule status -+ * register to drive reset on the link before the link can come out -+ * of reset (15 uS). We need to keep it like this until we've -+ * initialised SDRAM -+ */ -+ pci_read_config_dword(pci, PCI_CONF_ELAN3_CTRL, &val); -+ pci_write_config_dword(pci, PCI_CONF_ELAN3_CTRL, -+ val | ECTRL_SOFTWARE_INTERNAL_RESET); -+ mb(); -+ -+ /* Read the Vital Product Data to determine the cas latency */ -+ if ((res = ReadVitalProductData (dev, &CasLatency)) != ESUCCESS) -+ return (res); -+ -+ /* -+ * Now clear the Software internal reset bit, and start the sdram -+ */ -+ pci_write_config_dword(pci, PCI_CONF_ELAN3_CTRL, val); -+ mb(); -+ -+ /* -+ * Enable SDRAM before sizing and initalising it for ECC. -+ * NOTE: don't enable all sets of the cache (yet), nor ECC -+ */ -+ dev->Cache_Control_Reg = (CasLatency | REFRESH_RATE_16US); -+ -+ write_reg32 (dev, Cache_Control_Reg.ContReg, (dev->Cache_Control_Reg | SETUP_SDRAM)); -+ mb(); -+ -+ INIT_SCHED_STATUS(dev, Sched_Initial_Value); -+ -+ /* -+ * Set the interrupt mask to 0 and enable the interrupt PAL -+ * by writing any value to it. -+ */ -+ SET_INT_MASK (dev, 0); -+ writeb (0, (void *) intPalAddr); -+ -+ return ESUCCESS; -+} -+ -+/* -+ * Determine the size of elan PCI address spaces. EFAIL is returned if -+ * unused or invalid BAR is specified, or if board reports I/O mapped space. -+ */ -+int -+DeviceRegisterSize(ELAN3_DEV *dev, int rnumber, int *sizep) -+{ -+ struct pci_dev *pdev = dev->Osdep.pci; -+ -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) -+ *sizep = pci_resource_size(pdev, rnumber); -+#else -+ *sizep = pci_resource_end(pdev, rnumber) - pci_resource_start(pdev, rnumber) + 1; -+#endif -+ return ESUCCESS; -+} -+ -+/* -+ * Map PCI memory into kernel virtual address space. On the alpha, -+ * we just return appropriate kseg address, and Unmap is a no-op. -+ */ -+int -+MapDeviceRegister(ELAN3_DEV *dev, int rnumber, ioaddr_t *addrp, -+ int off, int len, DeviceMappingHandle *handlep) -+{ -+ struct pci_dev *pdev = dev->Osdep.pci; -+ -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) -+ u64 base = pci_get_base_address(pdev, rnumber); -+ *addrp = (ioaddr_t) pci_base_to_kseg(base + off, pdev->bus->number); -+ -+#else -+ if (len == 0) -+ len = pci_resource_end(pdev, rnumber) - pci_resource_start(pdev, rnumber) + 1; -+ -+ if (len == 0) -+ return (EINVAL); -+ -+ *addrp = (ioaddr_t) ioremap_nocache (pci_resource_start(pdev, rnumber) + off, len); -+#endif -+ -+ *handlep = (void *) *addrp; -+ -+ return (*addrp ? ESUCCESS : ENOMEM); -+} -+void -+UnmapDeviceRegister(ELAN3_DEV *dev, DeviceMappingHandle *handlep) -+{ -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) -+ iounmap (*handlep); -+#endif -+} -+ -+void -+ElanBusError (ELAN3_DEV *dev) -+{ -+ struct pci_dev *pci = dev->Osdep.pci; -+ u8 phaseaddr, type; -+ u16 status, cmd, physhi; -+ u32 physlo; -+ -+ printk("elan%d: bus error occured\n", dev->Instance); -+ -+ pci_read_config_word (pci, PCI_STATUS, &status); -+ pci_read_config_word (pci, PCI_COMMAND, &cmd); -+ pci_read_config_dword(pci, PCI_CONF_PARITY_PHYS_LO, &physlo); -+ pci_read_config_word (pci, PCI_CONF_PARITY_PHYS_HI, &physhi); -+ pci_read_config_byte (pci, PCI_CONF_PARITY_PHASE_ADDR, &phaseaddr); -+ pci_read_config_byte (pci, PCI_CONF_PARITY_MASTER_TYPE, &type); -+ -+#define PCI_CONF_STAT_FORMAT "\20" \ -+ "\6SIXTY_SIX_MHZ\7UDF\10FAST_BACK\11PARITY" \ -+ "\14SIG_TARGET_ABORT\15REC_TARGET_ABORT\16REC_MASTER_ABORT" \ -+ "\17SIG_SYSTEM_ERROR\20DETECTED_PARITY" -+ -+ printk ("elan%d: status %x cmd %4x physaddr %04x%08x phase %x type %x\n", -+ dev->Instance, status, cmd, physhi, physlo, phaseaddr, type); -+} -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/elan3/elansyscall.c linux-2.6.9/drivers/net/qsnet/elan3/elansyscall.c ---- clean/drivers/net/qsnet/elan3/elansyscall.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan3/elansyscall.c 2004-11-01 13:01:51.000000000 -0500 -@@ -0,0 +1,1230 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: elansyscall.c,v 1.100 2004/11/01 18:01:51 robin Exp $" -+/* $Source: /cvs/master/quadrics/elan3mod/elan3/os/elansyscall.c,v $*/ -+ -+#include -+#include -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+static int sys_exception (ELAN3_CTXT *ctxt, int type, int proc, void *trap, va_list ap); -+static int sys_getWordItem (ELAN3_CTXT *ctxt, int list, void **itemp, E3_uint32 *valuep); -+static int sys_getBlockItem (ELAN3_CTXT *ctxt, int list, void **itemp, E3_Addr *valuep); -+static void sys_putWordItem (ELAN3_CTXT *ctxt, int list, E3_uint32 value); -+static void sys_putBlockItem (ELAN3_CTXT *ctxt, int list, E3_uint32 *ptr); -+static void sys_putbackItem (ELAN3_CTXT *ctxt, int list, void *item); -+static void sys_freeWordItem (ELAN3_CTXT *ctxt, void *item); -+static void sys_freeBlockItem (ELAN3_CTXT *ctxt, void *item); -+static int sys_countItems (ELAN3_CTXT *ctxt, int list); -+static int sys_event (ELAN3_CTXT *ctxt, E3_uint32 cookie, int flag); -+static void sys_swapin (ELAN3_CTXT *ctxt); -+static void sys_swapout (ELAN3_CTXT *ctxt); -+static void sys_freePrivate (ELAN3_CTXT *ctxt); -+static int sys_fixupNetworkError (ELAN3_CTXT *ctxt, NETERR_FIXUP *nef); -+static int sys_startFaultCheck (ELAN3_CTXT *ctxt); -+static void sys_endFaultCheck (ELAN3_CTXT *ctxt); -+static E3_uint8 sys_load8 (ELAN3_CTXT *ctxt, E3_Addr addr); -+static void sys_store8 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint8 val); -+static E3_uint16 sys_load16 (ELAN3_CTXT *ctxt, E3_Addr addr); -+static void sys_store16 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint16 val); -+static E3_uint32 sys_load32 (ELAN3_CTXT *ctxt, E3_Addr addr); -+static void sys_store32 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint32 val); -+static E3_uint64 sys_load64 (ELAN3_CTXT *ctxt, E3_Addr addr); -+static void sys_store64 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint64 val); -+ -+static ELAN3_OPS elan3_sys_ops = { -+ ELAN3_OPS_VERSION, /* Version */ -+ -+ sys_exception, /* Exception */ -+ sys_getWordItem, /* GetWordItem */ -+ sys_getBlockItem, /* GetBlockItem */ -+ sys_putWordItem, /* PutWordItem */ -+ sys_putBlockItem, /* PutBlockItem */ -+ sys_putbackItem, /* PutbackItem */ -+ sys_freeWordItem, /* FreeWordItem */ -+ sys_freeBlockItem, /* FreeBlockItem */ -+ sys_countItems, /* CountItems */ -+ sys_event, /* Event */ -+ sys_swapin, /* Swapin */ -+ sys_swapout, /* Swapout */ -+ sys_freePrivate, /* FreePrivate */ -+ sys_fixupNetworkError, /* FixupNetworkError */ -+ NULL, /* DProcTrap */ -+ NULL, /* TProcTrap */ -+ NULL, /* IProcTrap */ -+ NULL, /* CProcTrap */ -+ NULL, /* CProcReissue */ -+ sys_startFaultCheck, /* StartFaultCheck */ -+ sys_endFaultCheck, /* EndFaultCheck */ -+ sys_load8, /* Load8 */ -+ sys_store8, /* Store8 */ -+ sys_load16, /* Load16 */ -+ sys_store16, /* Store16 */ -+ sys_load32, /* Load32 */ -+ sys_store32, /* Store32 */ -+ sys_load64, /* Load64 */ -+ sys_store64 /* Store64 */ -+}; -+ -+va_list null_valist; -+ -+SYS_CTXT * -+sys_init (ELAN3_CTXT *ctxt) -+{ -+ SYS_CTXT *sctx; -+ -+ /* Allocate and initialise the context private data */ -+ KMEM_ZALLOC (sctx, SYS_CTXT *, sizeof (SYS_CTXT), TRUE); -+ -+ if (sctx == NULL) -+ return ((SYS_CTXT *) NULL); -+ -+ sctx->Swap = NULL; -+ sctx->Armed = 0; -+ sctx->Backoff = 1; -+ sctx->Table = cookie_alloc_table ((unsigned long) ELAN3_MY_TASK_HANDLE(), 0); -+ sctx->signal = SIGSEGV; -+ -+ if (sctx->Table == NULL) -+ { -+ KMEM_FREE (sctx, sizeof (SYS_CTXT)); -+ return ((SYS_CTXT *) NULL); -+ } -+ -+ kmutex_init (&sctx->Lock); -+ spin_lock_init (&sctx->WaitLock); -+ kcondvar_init (&sctx->NetworkErrorWait); -+ -+ /* Install my context operations and private data */ -+ ctxt->Operations = &elan3_sys_ops; -+ ctxt->Private = (void *) sctx; -+ -+ return (sctx); -+} -+ -+/* returns -ve on error or ELAN_CAP_OK or ELAN_CAP_RMS */ -+/* use = ELAN_USER_ATTACH, ELAN_USER_P2P, ELAN_USER_BROADCAST */ -+int -+elan3_validate_cap(ELAN3_DEV *dev, ELAN_CAPABILITY *cap ,int use) -+{ -+ /* Don't allow a user process to attach to system context */ -+ if (ELAN3_SYSTEM_CONTEXT (cap->cap_lowcontext) || ELAN3_SYSTEM_CONTEXT (cap->cap_highcontext) -+ || cap->cap_highcontext <= ELAN_USER_BASE_CONTEXT_NUM || cap->cap_highcontext <= ELAN_USER_BASE_CONTEXT_NUM) -+ { -+ PRINTF2 (DBG_DEVICE, DBG_VP,"elan3_validate_cap: lctx %x hctx %x \n",cap->cap_lowcontext, cap->cap_highcontext); -+ PRINTF3 (DBG_DEVICE, DBG_VP,"elan3_validate_cap: bit %x low %x high %x\n", ((cap->cap_lowcontext) & SYS_CONTEXT_BIT), -+ E3_NUM_CONTEXT_0, ELAN3_KCOMM_BASE_CONTEXT_NUM); -+ -+ -+ PRINTF0 (DBG_DEVICE, DBG_VP,"elan3_validate_cap: user process cant attach to system cap\n"); -+ return (-EINVAL); -+ } -+ -+ if (cap->cap_type & ELAN_CAP_TYPE_HWTEST) -+ { -+ if (!(cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP)) /* cant have a bit map */ -+ { -+ PRINTF0 (DBG_DEVICE, DBG_VP, "elanmod_classify_cap: ELAN_CAP_TYPE_HWTEST must have ELAN_CAP_TYPE_NO_BITMAP\n"); -+ return (-EINVAL); -+ } -+ -+ if (cap->cap_lowcontext != cap->cap_highcontext) -+ { -+ PRINTF2 (DBG_DEVICE, DBG_VP, "elanmod_classify_cap: ELAN_CAP_TYPE_HWTEST (cap->cap_lowcontext != cap->cap_highcontext) %d %d\n",cap->cap_lowcontext , cap->cap_highcontext) ; -+ return (-EINVAL); -+ } -+ -+ if ( ! (ELAN3_HWTEST_CONTEXT(cap->cap_lowcontext) && ELAN3_HWTEST_CONTEXT(cap->cap_highcontext))) -+ { -+ PRINTF3 (DBG_DEVICE, DBG_VP, "elanmod_classify_cap: ELAN_CAP_TYPE_HWTEST HWTEST_BASE_CONTEXT %d %d %d \n" , ELAN3_HWTEST_BASE_CONTEXT_NUM,cap->cap_lowcontext ,ELAN3_HWTEST_TOP_CONTEXT_NUM); -+ return (-EINVAL); -+ } -+ -+ if (cap->cap_lownode != ELAN_CAP_UNINITIALISED || cap->cap_highnode != ELAN_CAP_UNINITIALISED) -+ { -+ PRINTF0 (DBG_DEVICE, DBG_VP, "elanmod_classify_cap: ELAN_CAP_TYPE_HWTEST nodes != ELAN_CAP_UNINITIALISED\n"); -+ return (-EINVAL); -+ } -+ -+ return ELAN_CAP_OK; -+ } -+ -+ return elanmod_classify_cap(&dev->Position, cap, use); -+} -+ -+int -+sys_waitevent (ELAN3_CTXT *ctxt, E3_Event *event) -+{ -+ SYS_CTXT *sctx = (SYS_CTXT *) ctxt->Private; -+ EVENT_COOKIE cookie; -+ -+ if (ctxt->Device->Devinfo.dev_revision_id == PCI_REVISION_ID_ELAN3_REVA) -+ return (EINVAL); -+ -+ cookie = fuword ((int *) &event->ev_Type) & ~(EV_TYPE_MASK_EVIRQ | EV_TYPE_MASK_BCOPY); -+ -+ if (cookie_alloc_cookie (sctx->Table, cookie) != ESUCCESS) -+ return (EINVAL); -+ -+ cookie_arm_cookie (sctx->Table, cookie); -+ -+ if (fuword ((int *) &event->ev_Count) > 0) -+ cookie_wait_cookie (sctx->Table, cookie); -+ -+ cookie_free_cookie (sctx->Table, cookie); -+ -+ return (ESUCCESS); -+} -+ -+static void * -+sys_getItem (SYS_SWAP_SPACE *sp, int list) -+{ -+ void *itemp = (void *) fuptr_noerr ((void **) &sp->ItemListsHead[list]); -+ void *next; -+ -+ PRINTF4 (DBG_DEVICE, DBG_SYSCALL, "sys_getItem: sp=%p list=%d head=%p itemp=%p\n", -+ sp, list, &sp->ItemListsHead[list], itemp); -+ -+ if (itemp == NULL) -+ return (NULL); -+ -+ next = (void *) fuptr_noerr ((void *) itemp); -+ -+ suptr_noerr ((void *) &sp->ItemListsHead[list], (void *) next); -+ if (next == NULL) -+ suptr_noerr ((void *) &sp->ItemListsTailp[list], (void *)&sp->ItemListsHead[list]); -+ return (itemp); -+} -+ -+static void -+sys_putItemBack (SYS_SWAP_SPACE *sp, int list, void *itemp) -+{ -+ PRINTF4 (DBG_DEVICE, DBG_SYSCALL, "sys_putItemBack: sp=%p list=%d itemp=%p value=%08x\n", -+ sp, list, itemp, fuword_noerr ((int *) &((SYS_WORD_ITEM *) itemp)->Value)); -+ -+ suptr_noerr ((void **) itemp, NULL); /* item->Next = NULL */ -+ suptr_noerr ((void **) fuptr_noerr ((void **) &sp->ItemListsTailp[list]), (void *)itemp); /* *Tailp = item */ -+ suptr_noerr ((void **) &sp->ItemListsTailp[list], (void *) itemp); /* Tailp = &item->Next */ -+} -+ -+static void -+sys_putItemFront (SYS_SWAP_SPACE *sp, int list, void *itemp) -+{ -+ PRINTF4 (DBG_DEVICE, DBG_SYSCALL, "sys_putItemFront: sp=%p list=%d itemp=%p value=%08x\n", -+ sp, list, itemp, fuword_noerr ((int *) &((SYS_WORD_ITEM *) itemp)->Value)); -+ -+ suptr_noerr ((void **) itemp, fuptr_noerr ((void **) &sp->ItemListsHead[list])); /* item->Next = Head */ -+ suptr_noerr ((void **) &sp->ItemListsHead[list], (void *) itemp); /* Head = item */ -+ -+ if (fuptr_noerr ((void **) &sp->ItemListsTailp[list]) == (void *) &sp->ItemListsHead[list]) /* if (Tailp == &Head) */ -+ suptr_noerr ((void **) &sp->ItemListsTailp[list], (void *) itemp); /* Tailp = &Item->Next */ -+} -+ -+ -+static int -+sys_getWordItem (ELAN3_CTXT *ctxt, int list, void **itemp, E3_uint32 *valuep) -+{ -+ SYS_CTXT *sctx = (SYS_CTXT *) ctxt->Private; -+ SYS_SWAP_SPACE *sp = sctx->Swap; -+ SYS_WORD_ITEM *item; -+ int res; -+ label_t ljb; -+ -+ kmutex_lock (&sctx->Lock); -+ -+ if (on_fault (&ljb)) -+ { -+ no_fault(); -+ kmutex_unlock (&sctx->Lock); -+ sys_exception (ctxt, EXCEPTION_SWAP_FAULT, list, (void *) NULL, null_valist); -+ return (0); -+ } -+ -+ item = (SYS_WORD_ITEM *) sys_getItem (sp, list); -+ -+ if (item == NULL) -+ res = 0; -+ else -+ { -+ if (list == LIST_DMA_PTR) -+ sctx->Armed = TRUE; -+ -+ *itemp = (void *) item; -+ *valuep = (E3_Addr) fuword_noerr ((E3_int32 *) &item->Value); -+ -+ PRINTF3 (ctxt, DBG_SYSCALL, "sys_getWordItem: list=%d -> item=%p value=%08x\n", list, *itemp, *valuep); -+ -+ res = 1; -+ } -+ -+ no_fault(); -+ kmutex_unlock (&sctx->Lock); -+ -+ return (res); -+} -+ -+static int -+sys_getBlockItem (ELAN3_CTXT *ctxt, int list, void **itemp, E3_Addr *valuep) -+{ -+ SYS_CTXT *sctx = (SYS_CTXT *) ctxt->Private; -+ SYS_SWAP_SPACE *sp = sctx->Swap; -+ SYS_BLOCK_ITEM *item; -+ int res; -+ label_t ljb; -+ -+ kmutex_lock (&sctx->Lock); -+ -+ if (on_fault (&ljb)) -+ { -+ no_fault(); -+ kmutex_unlock (&sctx->Lock); -+ sys_exception (ctxt, EXCEPTION_SWAP_FAULT, list, (void *) NULL, null_valist); -+ return (0); -+ } -+ -+ item = sys_getItem (sp, list); -+ -+ if (item == NULL) -+ res = 0; -+ else -+ { -+ E3_uint32 *dest = fuptr_noerr ((void **) &item->Pointer); -+ -+ if (list == LIST_DMA_DESC) -+ sctx->Armed = TRUE; -+ -+ *itemp = (void *) item; -+ *valuep = elan3mmu_elanaddr (ctxt->Elan3mmu, (caddr_t) dest); -+ -+ PRINTF3 (ctxt, DBG_SYSCALL, "sys_getBlockItem: list=%d -> item=%p addr=%08x\n", list, *itemp, *valuep); -+ PRINTF4 (ctxt, DBG_SYSCALL, " %08x %08x %08x %08x\n", -+ fuword_noerr ((int *) &dest[0]), fuword_noerr ((int *) &dest[1]), -+ fuword_noerr ((int *) &dest[2]), fuword_noerr ((int *) &dest[3])); -+ PRINTF4 (ctxt, DBG_SYSCALL, " %08x %08x %08x %08x\n", -+ fuword_noerr ((int *) &dest[4]), fuword_noerr ((int *) &dest[5]), -+ fuword_noerr ((int *) &dest[6]), fuword_noerr ((int *) &dest[7])); -+ -+ -+ res = 1; -+ } -+ -+ no_fault(); -+ kmutex_unlock (&sctx->Lock); -+ -+ return (res); -+} -+ -+static void -+sys_putWordItem (ELAN3_CTXT *ctxt, int list, E3_Addr value) -+{ -+ SYS_CTXT *sctx = (SYS_CTXT *) ctxt->Private; -+ SYS_SWAP_SPACE *sp = sctx->Swap; -+ SYS_WORD_ITEM *item; -+ label_t ljp; -+ -+ kmutex_lock (&sctx->Lock); -+ -+ PRINTF2 (ctxt,DBG_SYSCALL, "sys_putWordItem: list=%x value=%x\n", list, value); -+ -+ if (on_fault (&ljp)) -+ { -+ no_fault(); -+ kmutex_unlock (&sctx->Lock); -+ -+ sys_exception (ctxt, EXCEPTION_SWAP_FAULT, list, (void *) NULL, null_valist); -+ return; -+ } -+ -+ item = sys_getItem (sp, LIST_FREE_WORD); -+ -+ PRINTF1 (ctxt, DBG_SYSCALL, "sys_putWordItem: item=%p\n", item); -+ -+ if (item == NULL) -+ { -+ no_fault(); -+ kmutex_unlock (&sctx->Lock); -+ -+ sys_exception (ctxt, EXCEPTION_SWAP_FAILED, list, (void *) NULL, null_valist); -+ return; -+ } -+ -+ PRINTF2 (ctxt, DBG_SYSCALL, "sys_putWordItem: storing value=%08x at %p\n", value, &item->Value); -+ -+ PRINTF2 (ctxt, DBG_SYSCALL, "sys_putWordItem: item=%p value=%08x\n", item, value); -+ -+ suword_noerr ((E3_int32 *) &item->Value, value); /* write "value" into item */ -+ -+ sys_putItemBack (sp, list, item); -+ -+ no_fault(); -+ kmutex_unlock (&sctx->Lock); -+} -+ -+static void -+sys_putBlockItem (ELAN3_CTXT *ctxt, int list, E3_uint32 *ptr) -+{ -+ SYS_CTXT *sctx = (SYS_CTXT *) ctxt->Private; -+ SYS_SWAP_SPACE *sp = sctx->Swap; -+ SYS_BLOCK_ITEM *item; -+ label_t ljp; -+ E3_uint32 *source; -+ E3_uint32 *dest; -+ -+ PRINTF2 (ctxt, DBG_SYSCALL, "sys_putBlockItem: list=%x ptr=%p\n", list, ptr); -+ -+ kmutex_lock (&sctx->Lock); -+ -+ if (on_fault (&ljp)) -+ { -+ no_fault(); -+ kmutex_unlock (&sctx->Lock); -+ -+ sys_exception (ctxt, EXCEPTION_SWAP_FAULT, list, (void *) NULL, null_valist); -+ return; -+ } -+ -+ item = sys_getItem (sp, LIST_FREE_BLOCK); /* get an item from the freelist. */ -+ -+ if (item == NULL) -+ { -+ no_fault(); -+ kmutex_unlock (&sctx->Lock); -+ -+ sys_exception (ctxt, EXCEPTION_SWAP_FAILED, list, (void *) NULL, null_valist); -+ return; -+ } -+ -+ /* -+ * The block will have been read using 64 bit reads, since we have -+ * to write it to user memory using 32 bit writes, we need to perform -+ * an endian swap on the Ultrasparc. -+ */ -+ dest = (E3_uint32 *) fuptr_noerr ((void **) &item->Pointer); -+ source = (E3_uint32 *) ptr; -+ -+ PRINTF2 (ctxt, DBG_SYSCALL, "sys_putBlockItem: item=%p dest=%p\n",item, dest); -+ PRINTF4 (ctxt, DBG_SYSCALL, " %08x %08x %08x %08x\n", -+ source[0^WordEndianFlip], source[1^WordEndianFlip], source[2^WordEndianFlip], source[3^WordEndianFlip]); -+ PRINTF4 (ctxt, DBG_SYSCALL, " %08x %08x %08x %08x\n", -+ source[4^WordEndianFlip], source[5^WordEndianFlip], source[6^WordEndianFlip], source[7^WordEndianFlip]); -+ -+ suword_noerr ((E3_int32 *) &dest[7], (E3_int32) source[7^WordEndianFlip]); -+ suword_noerr ((E3_int32 *) &dest[6], (E3_int32) source[6^WordEndianFlip]); -+ suword_noerr ((E3_int32 *) &dest[5], (E3_int32) source[5^WordEndianFlip]); -+ suword_noerr ((E3_int32 *) &dest[4], (E3_int32) source[4^WordEndianFlip]); -+ suword_noerr ((E3_int32 *) &dest[3], (E3_int32) source[3^WordEndianFlip]); -+ suword_noerr ((E3_int32 *) &dest[2], (E3_int32) source[2^WordEndianFlip]); -+ suword_noerr ((E3_int32 *) &dest[1], (E3_int32) source[1^WordEndianFlip]); -+ suword_noerr ((E3_int32 *) &dest[0], (E3_int32) source[0^WordEndianFlip]); -+ -+ sys_putItemBack (sp, list, item); /* chain onto list of items. */ -+ -+ no_fault(); -+ kmutex_unlock (&sctx->Lock); -+} -+ -+static void -+sys_freeWordItem (ELAN3_CTXT *ctxt, void *itemp) -+{ -+ SYS_CTXT *sctx = (SYS_CTXT *) ctxt->Private; -+ SYS_SWAP_SPACE *sp = sctx->Swap; -+ label_t ljp; -+ -+ kmutex_lock (&sctx->Lock); -+ -+ if (on_fault (&ljp)) -+ { -+ no_fault(); -+ kmutex_unlock (&sctx->Lock); -+ -+ sys_exception (ctxt, EXCEPTION_SWAP_FAULT, LIST_FREE_WORD, (void *) NULL, null_valist); -+ return; -+ } -+ -+ sys_putItemBack (sp, LIST_FREE_WORD, itemp); -+ -+ no_fault(); -+ kmutex_unlock (&sctx->Lock); -+} -+ -+static void -+sys_freeBlockItem (ELAN3_CTXT *ctxt, void *itemp) -+{ -+ SYS_CTXT *sctx = (SYS_CTXT *) ctxt->Private; -+ SYS_SWAP_SPACE *sp = sctx->Swap; -+ SYS_BLOCK_ITEM *item = (SYS_BLOCK_ITEM *)itemp; -+ E3_uint32 *dest; -+ label_t ljp; -+ -+ kmutex_lock (&sctx->Lock); -+ -+ if (on_fault (&ljp)) -+ { -+ no_fault(); -+ kmutex_unlock (&sctx->Lock); -+ -+ sys_exception (ctxt, EXCEPTION_SWAP_FAULT, LIST_FREE_BLOCK, (void *) NULL, null_valist); -+ return; -+ } -+#ifdef DEBUG_PRINTF -+ dest = (E3_uint32 *) fuptr_noerr ((void **) &item->Pointer); -+ -+ PRINTF2 (ctxt, DBG_SYSCALL, "sys_freeBlockItem: item=%p dest=%p\n", item, dest); -+ PRINTF4 (ctxt, DBG_SYSCALL, " %08x %08x %08x %08x\n", -+ fuword_noerr ((int *) &dest[0]), fuword_noerr ((int *) &dest[1]), -+ fuword_noerr ((int *) &dest[2]), fuword_noerr ((int *) &dest[3])); -+ PRINTF4 (ctxt, DBG_SYSCALL, " %08x %08x %08x %08x\n", -+ fuword_noerr ((int *) &dest[4]), fuword_noerr ((int *) &dest[5]), -+ fuword_noerr ((int *) &dest[6]), fuword_noerr ((int *) &dest[7])); -+#endif -+ -+ sys_putItemBack (sp, LIST_FREE_BLOCK, itemp); -+ -+ no_fault(); -+ kmutex_unlock (&sctx->Lock); -+} -+ -+static void -+sys_putbackItem (ELAN3_CTXT *ctxt, int list, void *itemp) -+{ -+ SYS_CTXT *sctx = (SYS_CTXT *) ctxt->Private; -+ SYS_SWAP_SPACE *sp = sctx->Swap; -+ label_t ljp; -+ -+ kmutex_lock (&sctx->Lock); -+ -+ if (on_fault (&ljp)) -+ { -+ no_fault(); -+ kmutex_unlock (&sctx->Lock); -+ -+ sys_exception (ctxt, EXCEPTION_SWAP_FAULT, list, (void *) NULL, null_valist); -+ return; -+ } -+ -+ sys_putItemFront (sp, list, itemp); -+ -+ no_fault(); -+ kmutex_unlock (&sctx->Lock); -+} -+ -+static int -+sys_countItems (ELAN3_CTXT *ctxt, int list) -+{ -+ SYS_CTXT *sctx = (SYS_CTXT *) ctxt->Private; -+ SYS_SWAP_SPACE *sp = sctx->Swap; -+ int count = 0; -+ void *item; -+ label_t ljb; -+ -+ kmutex_lock (&sctx->Lock); -+ -+ if (on_fault (&ljb)) -+ { -+ no_fault(); -+ kmutex_unlock (&sctx->Lock); -+ sys_exception (ctxt, EXCEPTION_SWAP_FAULT, list, (void *) NULL, null_valist); -+ return (0); -+ } -+ -+ for (item = (void *) fuptr_noerr ((void **) &sp->ItemListsHead[list]); -+ item != NULL; -+ item = (void *) fuptr_noerr ((void **) item)) -+ { -+ count++; -+ } -+ -+ no_fault(); -+ kmutex_unlock (&sctx->Lock); -+ -+ return (count); -+} -+ -+ -+long sys_longTime; -+long sys_shortTime; -+int sys_waitTicks; -+int sys_maxBackoff; -+ -+#define SYS_LONG_TIME MAX((hz * 5) / 1000, 1) /* 5 ms */ -+#define SYS_SHORT_TIME MAX((hz * 2) / 1000, 1) /* 2 ms */ -+#define SYS_WAIT_TICKS MAX((hz * 1) / 1000, 1) /* 1 ms - backoff granularity */ -+#define SYS_MAX_BACKOFF MAX((hz * 5) / 1000, 1) /* 5 ms - max backoff for "nacked" packets*/ -+#define SYS_TIMEOUT_BACKOFF MAX((hz * 10) / 1000, 1) /* 10 ms - backoff for output timeout (point to point) */ -+#define SYS_BCAST_BACKOFF MAX((hz * 50) / 1000, 1) /* 50 ms - backoff for output timeout (broadcast) */ -+#define SYS_NETERR_BACKOFF MAX((hz * 10) / 1000, 1) /* 10 ms - delay for network error in dma data */ -+ -+static void -+sys_backoffWait (ELAN3_CTXT *ctxt, int ticks) -+{ -+ SYS_CTXT *sctx = (SYS_CTXT *) ctxt->Private; -+ long t; -+ -+ spin_lock (&sctx->WaitLock); -+ -+ t = lbolt - sctx->Time; -+ -+ if (sys_longTime == 0) sys_longTime = SYS_LONG_TIME; -+ if (sys_shortTime == 0) sys_shortTime = SYS_SHORT_TIME; -+ if (sys_waitTicks == 0) sys_waitTicks = SYS_WAIT_TICKS; -+ if (sys_maxBackoff == 0) sys_maxBackoff = SYS_MAX_BACKOFF; -+ -+ if (t > sys_longTime) /* It's a long time since the last trap */ -+ sctx->Backoff = 0; /* so set the backoff back down to 0 */ -+ -+ if (ticks) -+ { -+ PRINTF2 (ctxt, DBG_DPROC, "sys_backoffWait : Waiting - %d ticks [%lx]\n", ticks, t); -+ kcondvar_timedwait (&sctx->NetworkErrorWait, &sctx->WaitLock, NULL, lbolt + ticks); -+ } -+ else if (sctx->Armed) -+ { -+ if (t < sys_shortTime) /* It's been a short time since the last */ -+ { /* trap, so increase the backoff */ -+ sctx->Backoff++; -+ -+ if (sctx->Backoff > sys_maxBackoff) -+ sctx->Backoff = sys_maxBackoff; -+ } -+ -+ PRINTF2 (ctxt, DBG_DPROC, "sys_backoffWait : Waiting - %d [%lx]\n", sctx->Backoff, t); -+ -+ if (sctx->Backoff) -+ kcondvar_timedwaitsig (&sctx->NetworkErrorWait, &sctx->WaitLock, NULL, lbolt + sctx->Backoff * sys_waitTicks); -+ -+ sctx->Armed = 0; -+ } -+ else -+ { -+ PRINTF1 (ctxt, DBG_DPROC, "sys_backoffWait : Not Waiting - %d\n", sctx->Backoff); -+ -+ } -+ sctx->Time = lbolt; -+ -+ spin_unlock (&sctx->WaitLock); -+} -+ -+static int -+trapSize (int proc) -+{ -+ switch (proc) -+ { -+ case DMA_PROC: return (sizeof (DMA_TRAP)); -+ case THREAD_PROC: return (sizeof (THREAD_TRAP)); -+ case COMMAND_PROC: return (sizeof (COMMAND_TRAP)); -+ case INPUT_PROC: return (sizeof (INPUT_TRAP)); -+ default: return (0); -+ } -+} -+ -+static int -+sys_exception (ELAN3_CTXT *ctxt, int type, int proc, void *trapp, va_list ap) -+{ -+ SYS_CTXT *sctx = (SYS_CTXT *) ctxt->Private; -+ int res; -+ -+ PRINTF2 (ctxt, DBG_SYSCALL, "sys_exception: type %d proc %d\n", type, proc); -+ -+ switch (type) -+ { -+ case EXCEPTION_INVALID_ADDR: -+ { -+ E3_FaultSave_BE *faultSave = va_arg (ap, E3_FaultSave_BE *); -+ int res = va_arg (ap, int); -+ -+ sys_addException (sctx, type, proc, trapp, trapSize(proc), faultSave, res, 0); -+ break; -+ } -+ -+ case EXCEPTION_UNIMP_INSTR: -+ { -+ E3_uint32 instr = va_arg (ap, E3_uint32); -+ -+ sys_addException (sctx, type, proc, trapp, trapSize(proc), NULL, 0, instr); -+ break; -+ } -+ -+ case EXCEPTION_INVALID_PROCESS: -+ { -+ E3_uint32 vproc = va_arg (ap, E3_uint32); -+ int res = va_arg (ap, int); -+ -+ switch (proc) -+ { -+ case DMA_PROC: -+ if (sctx->Flags & ELAN3_SYS_FLAG_DMA_BADVP) -+ { -+ DMA_TRAP *trap = (DMA_TRAP *) trapp; -+ -+ if (trap->Desc.s.dma_direction != DMA_WRITE) -+ trap->Desc.s.dma_srcEvent = trap->Desc.s.dma_destEvent; -+ -+ trap->Desc.s.dma_direction = DMA_WRITE; -+ trap->Desc.s.dma_size = 0; -+ trap->Desc.s.dma_source = (E3_Addr) 0; -+ trap->Desc.s.dma_dest = (E3_Addr) 0; -+ trap->Desc.s.dma_destEvent = (E3_Addr) 0; -+ trap->Desc.s.dma_destCookieVProc = 0; -+ trap->Desc.s.dma_srcCookieVProc = 0; -+ -+ return (OP_IGNORE); -+ } -+ break; -+ -+ case THREAD_PROC: -+ if (sctx->Flags & ELAN3_SYS_FLAG_THREAD_BADVP) -+ { -+ THREAD_TRAP *trap = (THREAD_TRAP *) trapp; -+ -+ trap->TrapBits.s.PacketAckValue = E3_PAckError; -+ -+ return (OP_IGNORE); -+ } -+ break; -+ } -+ -+ sys_addException (sctx, type, proc, trapp, trapSize(proc), NULL, res, vproc); -+ break; -+ } -+ -+ case EXCEPTION_FAULTED: -+ { -+ E3_Addr addr = va_arg (ap, E3_Addr); -+ -+ sys_addException (sctx, type, proc, trapp, trapSize(proc), NULL, 0, addr); -+ break; -+ } -+ -+ case EXCEPTION_QUEUE_OVERFLOW: -+ { -+ E3_FaultSave_BE *faultSave = va_arg (ap, E3_FaultSave_BE *); -+ int trapType = va_arg (ap, int); -+ -+ sys_addException (sctx, type, proc, trapp, trapSize(proc), faultSave, 0, trapType); -+ break; -+ } -+ -+ case EXCEPTION_COMMAND_OVERFLOW: -+ { -+ int count = va_arg (ap, int); -+ -+ sys_addException (sctx, type, proc, trapp, trapSize(proc), NULL, 0, count); -+ break; -+ } -+ -+ case EXCEPTION_CHAINED_EVENT: -+ { -+ E3_Addr addr = va_arg (ap, E3_Addr); -+ -+ sys_addException (sctx, type, proc, trapp, trapSize(proc), NULL, 0, addr); -+ break; -+ } -+ -+ case EXCEPTION_DMA_RETRY_FAIL: -+ case EXCEPTION_PACKET_TIMEOUT: -+ if (proc != DMA_PROC) -+ sys_backoffWait (ctxt, SYS_TIMEOUT_BACKOFF); -+ else -+ { -+ DMA_TRAP *trap = (DMA_TRAP *) trapp; -+ -+ if (sctx->Flags & ELAN3_SYS_FLAG_DMAFAIL) -+ { -+ E3_BlockCopyEvent *event; -+ -+ if (trap->Desc.s.dma_direction != DMA_WRITE) -+ trap->Desc.s.dma_srcEvent = trap->Desc.s.dma_destEvent; -+ -+ /* change the source word to be E3_EVENT_FAILED */ -+ if ((event = (E3_BlockCopyEvent *) elan3mmu_mainaddr (ctxt->Elan3mmu, trap->Desc.s.dma_srcEvent)) == NULL) -+ { -+ sys_addException (sctx, type, proc, trapp, trapSize(proc), NULL, 0, 0); -+ break; -+ } -+ -+ suword (&event->ev_Source, E3_EVENT_FAILED); -+ wmb(); mmiob(); -+ -+ trap->Desc.s.dma_direction = DMA_WRITE; -+ trap->Desc.s.dma_size = 0; -+ trap->Desc.s.dma_source = (E3_Addr) 0; -+ trap->Desc.s.dma_dest = (E3_Addr) 0; -+ trap->Desc.s.dma_destEvent = (E3_Addr) 0; -+ trap->Desc.s.dma_destCookieVProc = 0; -+ trap->Desc.s.dma_srcCookieVProc = 0; -+ -+ return (OP_IGNORE); -+ } -+ -+ if (type == EXCEPTION_DMA_RETRY_FAIL) -+ sys_backoffWait (ctxt, 0); -+ else -+ { -+ ELAN_LOCATION location; -+ -+ krwlock_read (&ctxt->VpLock); -+ location = ProcessToLocation (ctxt, NULL, trap->Desc.s.dma_direction == DMA_WRITE ? -+ trap->Desc.s.dma_destVProc : trap->Desc.s.dma_srcVProc, NULL); -+ krwlock_done (&ctxt->VpLock); -+ -+ sys_backoffWait (ctxt, location.loc_node == ELAN3_INVALID_NODE ? SYS_BCAST_BACKOFF : SYS_TIMEOUT_BACKOFF); -+ } -+ } -+ return (OP_IGNORE); -+ -+ case EXCEPTION_NETWORK_ERROR: -+ { -+ INPUT_TRAP *trap = (INPUT_TRAP *) trapp; -+ NETERR_RESOLVER **rvpp = va_arg (ap, NETERR_RESOLVER **); -+ -+ ASSERT (trap->State == CTXT_STATE_NETWORK_ERROR); -+ -+ if (! (sctx->Flags & ELAN3_SYS_FLAG_NETERR) && (trap->DmaIdentifyTransaction || trap->ThreadIdentifyTransaction)) -+ { -+ if ((*rvpp) != (NETERR_RESOLVER *) NULL) -+ res = (*rvpp)->Status; -+ else if ((res = QueueNetworkErrorResolver (ctxt, trap, rvpp)) == ESUCCESS) -+ { -+ /* Successfully queued the network error resolver */ -+ return (OP_HANDLED); -+ } -+ -+ /* network error resolution has failed - either a bad cookie or */ -+ /* an rpc error has occured */ -+ sys_addException (sctx, type, proc, trapp, trapSize(proc), NULL, res, 0); -+ } -+ else -+ { -+ /* Must be an overlaped dma packet. Must wait long enough to -+ * ensure that the sending dma'er has tried to send the next -+ * packet and had it discarded. In the real world this should -+ * be greater than an output timeout. (About 8mSec) */ -+ -+ sys_backoffWait (ctxt, SYS_NETERR_BACKOFF); -+ -+ /* set this inputter state to be ok, since we've been called -+ * by the lwp it will lower the context filter for us, so -+ * re-enabling the inputter, note we don't need to execute -+ * any of the packet since the dma process will re-transmit -+ * it after receiving a nack for the next packet */ -+ trap->State = CTXT_STATE_OK; -+ -+ return (OP_HANDLED); -+ } -+ break; -+ } -+ -+ default: -+ sys_addException (sctx, type, proc, trapp, trapSize(proc), NULL, 0, 0); -+ break; -+ } -+ -+ if (type != EXCEPTION_DEBUG) -+#ifdef LINUX -+#ifdef NO_NPTL -+ psignal (CURPROC()->p_opptr, sctx->signal); -+#else -+ psignal (CURPROC()->parent, sctx->signal); -+#endif -+#else -+ psignal (CURPROC(), sctx->signal); -+#endif -+ return (OP_HANDLED); -+} -+ -+static int -+sys_event (ELAN3_CTXT *ctxt, E3_uint32 cookie, int flag) -+{ -+ SYS_CTXT *sctx = (SYS_CTXT *) ctxt->Private; -+ -+ cookie_fire_cookie (sctx->Table, cookie); -+ -+ return (OP_HANDLED); -+} -+ -+static void -+sys_swapin (ELAN3_CTXT *ctxt) -+{ -+ PRINTF0 (ctxt, DBG_SYSCALL, "sys_swapin\n"); -+} -+ -+static void -+sys_swapout (ELAN3_CTXT *ctxt) -+{ -+ PRINTF0 (ctxt, DBG_SYSCALL, "sys_swapout\n"); -+} -+ -+static void -+sys_freePrivate (ELAN3_CTXT *ctxt) -+{ -+ SYS_CTXT *sctx = (SYS_CTXT *) ctxt->Private; -+ -+ cookie_free_table (sctx->Table); -+ -+ kmutex_destroy (&sctx->Lock); -+ spin_lock_destroy (&sctx->WaitLock); -+ kcondvar_destroy (&sctx->NetworkErrorWait); -+ -+ KMEM_FREE (sctx, sizeof (SYS_CTXT)); -+ ctxt->Private = NULL; -+} -+ -+static int -+sys_checkThisDma (ELAN3_CTXT *ctxt, NETERR_FIXUP *nef, E3_DMA *dma) -+{ -+ E3_DmaType type; -+ E3_uint32 cookie; -+ E3_uint32 cvproc; -+ int ignore; -+ int match; -+ -+ type.type = fuword_noerr ((int *) &dma->dma_type); -+ -+ if (type.s.direction == DMA_WRITE) -+ { -+ cookie = fuword_noerr ((int *) &dma->dma_srcCookieVProc); -+ cvproc = fuword_noerr ((int *) &dma->dma_destCookieVProc); -+ } -+ else -+ { -+ cookie = fuword_noerr ((int *) &dma->dma_destCookieVProc); -+ cvproc = fuword_noerr ((int *) &dma->dma_srcCookieVProc); -+ } -+ -+ PRINTF5 (ctxt, DBG_NETERR, "sys_checkThisDma: dir = %d cookie = %08x cvproc = %08x CookieVProc %08x DstProcess %04x\n", -+ type.s.direction, cookie, cvproc, nef->Message.CookieVProc, nef->Message.DstProcess); -+ -+ /* A DMA matches a network errror fixup if it's going to the right place (or is a broadcast) -+ * and the approriate cookie matches, except that we ignore DMA's which don't have a destEvent -+ * since they don't have any atomic behaviour (though they still send the identify) */ -+ -+ ignore = (type.s.direction == DMA_WRITE && cookie == 0 && -+ fuword_noerr ((int *) &dma->dma_destEvent) == 0); -+ match = (nef->Message.CookieVProc == cookie && -+ (nef->Message.DstProcess == (cvproc & DMA_PROCESS_MASK) || nef->Message.WaitForEop)); -+ -+ PRINTF2 (ctxt, DBG_NETERR, " -> %s %s\n", ignore ? "ignore" : match ? "matched" : "not-matched", nef->Message.WaitForEop ? "wait for eop" : ""); -+ -+ if (match && !ignore && !nef->Message.WaitForEop) -+ { -+ PRINTF0 (ctxt, DBG_NETERR, "sys_checkThisDma: nuking the dma\n"); -+ -+ /* NOTE - we access the dma descriptor backwards since it could exist in sdram */ -+ if (type.s.direction != DMA_WRITE) -+ suword_noerr ((int *) &dma->dma_srcEvent, 0); -+ -+ suword_noerr ((int *) &dma->dma_destEvent, 0); -+ suword_noerr ((int *) &dma->dma_dest, 0); -+ suword_noerr ((int *) &dma->dma_source, 0); -+ suword_noerr ((int *) &dma->dma_size, 0); -+ -+ if (type.s.direction != DMA_WRITE) -+ suword_noerr ((int *) &dma->dma_type, fuword_noerr ((int *) &dma->dma_type) & E3_DMA_CONTEXT_MASK); -+ -+ wmb(); mmiob(); -+ } -+ -+ return (match && !ignore); -+} -+ -+static int -+sys_fixupNetworkError (ELAN3_CTXT *ctxt, NETERR_FIXUP *nef) -+{ -+ SYS_CTXT *sctx = (SYS_CTXT *) ctxt->Private; -+ SYS_SWAP_SPACE *sp = sctx->Swap; -+ int matched = 0; -+ SYS_WORD_ITEM *wordp; -+ SYS_BLOCK_ITEM *blockp; -+ label_t ljb; -+ int res; -+ -+ PRINTF3 (ctxt, DBG_NETERR, "sys_fixupnetworkError %08x %08x %08x\n", -+ nef->Message.CookieAddr, nef->Message.CookieVProc, nef->Message.NextCookie); -+ -+ if (nef->Message.CookieAddr == (E3_Addr) 0) /* It's a DMA which requires fixing up */ -+ { -+ kmutex_lock (&sctx->Lock); -+ -+ if (on_fault (&ljb)) -+ res = EFAULT; -+ else -+ { -+ /* scan the dma ptr list */ -+ for (wordp = (SYS_WORD_ITEM *) fuptr_noerr ((void **) &sp->ItemListsHead[LIST_DMA_PTR]); -+ wordp != NULL; -+ wordp = (SYS_WORD_ITEM *) fuptr_noerr ((void **) &wordp->Next)) -+ { -+ E3_uint32 value = fuword_noerr ((int *) &wordp->Value); -+ E3_DMA *dma = (E3_DMA *) elan3mmu_mainaddr (ctxt->Elan3mmu, value); -+ -+ PRINTF3 (ctxt, DBG_NETERR, "sys_fixupnetworkError: check block item %p Value %08x dma %p\n", wordp, value, dma); -+ -+ matched += sys_checkThisDma (ctxt, nef, dma); -+ } -+ -+ /* scan the dma desc list */ -+ for (blockp = (SYS_BLOCK_ITEM *) fuptr_noerr ((void **) &sp->ItemListsHead[LIST_DMA_DESC]); -+ blockp != NULL; -+ blockp = (SYS_BLOCK_ITEM *) fuptr_noerr ((void **) &blockp->Next)) -+ { -+ E3_DMA *dma = (E3_DMA *) fuptr_noerr ((void *) &blockp->Pointer); -+ -+ PRINTF2 (ctxt, DBG_NETERR, "sys_fixupnetworkError: check block item %p Pointer %p\n", blockp, dma); -+ -+ matched += sys_checkThisDma (ctxt, nef, dma); -+ } -+ -+ /* If we've still not found it, then check the command port item */ -+ /* it MUST be present as a command waiting to be executed, as */ -+ /* otherwise it could have already happened and we will claim to */ -+ /* have found it, but not realy */ -+ if (ctxt->CommandPortItem != NULL) -+ { -+ E3_DMA *dma = (E3_DMA *) fuptr_noerr ((void *) &((SYS_BLOCK_ITEM *) ctxt->CommandPortItem)->Pointer); -+ -+ if (sys_checkThisDma (ctxt, nef, dma)) -+ { -+ printk ("!!! it's the command port item - need to ensure that the command exists\n"); -+ matched++; -+ } -+ } -+ -+ res = matched ? ESUCCESS : ESRCH; -+ } -+ no_fault(); -+ kmutex_unlock (&sctx->Lock); -+ -+ if (matched > 1) -+ ElanException (ctxt, EXCEPTION_COOKIE_ERROR, DMA_PROC, NULL, NULL, nef->Message.CookieVProc); -+ } -+ else /* It's a thread which requires fixing up */ -+ { -+ E3_int32 *cookiePtr = (E3_int32 *) elan3mmu_mainaddr (ctxt->Elan3mmu, nef->Message.CookieAddr); -+ E3_uint32 curval = fuword_noerr (cookiePtr); -+ -+ if (curval == nef->Message.CookieVProc) /* thread doesn't think it's been done */ -+ { -+ if (! nef->Message.WaitForEop) -+ { -+ suword_noerr (cookiePtr, nef->Message.NextCookie); -+ mb(); mmiob(); -+ } -+ -+ res = ESUCCESS; -+ } -+ else /* thread thinks that it's been executed */ -+ { -+ res = ESRCH; -+ } -+ } -+ -+ CompleteNetworkErrorFixup (ctxt, nef, res); -+ -+ return (OP_HANDLED); -+} -+ -+ -+static int -+sys_startFaultCheck (ELAN3_CTXT *ctxt) -+{ -+ return (0); -+} -+ -+static void -+sys_endFaultCheck (ELAN3_CTXT *ctxt) -+{ -+ wmb(); -+} -+ -+static E3_uint8 -+sys_load8 (ELAN3_CTXT *ctxt, E3_Addr addr) -+{ -+ E3_uint8 *maddr = (E3_uint8 *) elan3mmu_mainaddr (ctxt->Elan3mmu, addr); -+ -+ return (fubyte_noerr (maddr)); -+} -+ -+static void -+sys_store8 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint8 val) -+{ -+ E3_uint8 *maddr = (E3_uint8 *) elan3mmu_mainaddr (ctxt->Elan3mmu, addr); -+ -+ subyte_noerr (maddr, val); -+ wmb(); mmiob(); -+} -+ -+static E3_uint16 -+sys_load16 (ELAN3_CTXT *ctxt, E3_Addr addr) -+{ -+ E3_uint16 *maddr = (E3_uint16 *) elan3mmu_mainaddr (ctxt->Elan3mmu, addr); -+ -+ return (fusword_noerr (maddr)); -+} -+ -+static void -+sys_store16 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint16 val) -+{ -+ E3_uint16 *maddr = (E3_uint16 *) elan3mmu_mainaddr (ctxt->Elan3mmu, addr); -+ -+ susword_noerr (maddr, val); -+ wmb(); mmiob(); -+} -+ -+static E3_uint32 -+sys_load32 (ELAN3_CTXT *ctxt, E3_Addr addr) -+{ -+ E3_uint32 *maddr = (E3_uint32 *) elan3mmu_mainaddr (ctxt->Elan3mmu, addr); -+ -+ return (fuword_noerr (maddr)); -+} -+ -+static void -+sys_store32 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint32 val) -+{ -+ E3_uint32 *maddr = (E3_uint32 *) elan3mmu_mainaddr (ctxt->Elan3mmu, addr); -+ -+ suword_noerr (maddr, val); -+ wmb(); mmiob(); -+} -+ -+static E3_uint64 -+sys_load64 (ELAN3_CTXT *ctxt, E3_Addr addr) -+{ -+ E3_uint64 *maddr = (E3_uint64 *) elan3mmu_mainaddr (ctxt->Elan3mmu, addr); -+ -+ return (fulonglong_noerr ((long long *) maddr)); -+} -+ -+static void -+sys_store64 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint64 val) -+{ -+ E3_uint64 *maddr = (E3_uint64 *) elan3mmu_mainaddr (ctxt->Elan3mmu, addr); -+ -+ sulonglong_noerr ((long long *) maddr, val); -+ wmb(); mmiob(); -+} -+ -+ -+void -+sys_addException (SYS_CTXT *sctx, int type, int proc, caddr_t trapp, int size, -+ E3_FaultSave_BE *faultSave, u_long res, u_long value) -+{ -+ SYS_EXCEPTION *ex_ptr; -+ int front; -+ int back; -+ int count; -+ label_t ljp; -+ -+ PRINTF4 (DBG_DEVICE, DBG_FN, "sys_addException: type %d proc %d res %ld value %ld\n", -+ type, proc, res, value); -+ -+ KMEM_ZALLOC (ex_ptr, SYS_EXCEPTION *, sizeof (SYS_EXCEPTION), TRUE); -+ -+ if (ex_ptr != NULL) -+ { -+ bzero ((caddr_t) ex_ptr, sizeof (SYS_EXCEPTION)); -+ -+ ex_ptr->Type = type; -+ ex_ptr->Proc = proc; -+ ex_ptr->Res = res; -+ ex_ptr->Value = value; -+ -+ if (trapp && size) -+ bcopy (trapp, (caddr_t) &ex_ptr->Union, size); -+ if (faultSave) -+ bcopy ((caddr_t) faultSave, (caddr_t) &ex_ptr->FaultArea, sizeof (E3_FaultSave_BE)); -+ } -+ -+ kmutex_lock (&sctx->Lock); -+ if (! on_fault (&ljp)) -+ { -+ front = fuword_noerr (&sctx->Exceptions->Front); -+ back = fuword_noerr (&sctx->Exceptions->Back); -+ count = fuword_noerr (&sctx->Exceptions->Count); -+ -+ if (count <= 0 || front < 0 || back < 0 || front >= count || back >= count) -+ suword_noerr (&sctx->Exceptions->Overflow, fuword_noerr (&sctx->Exceptions->Overflow) + 1); -+ else if (((front+1) % count ) == back) -+ suword_noerr (&sctx->Exceptions->Overflow, fuword_noerr (&sctx->Exceptions->Overflow) + 1); -+ else -+ { -+ if (ex_ptr != NULL) -+ copyout_noerr ((caddr_t) ex_ptr, (caddr_t) &sctx->Exceptions->Exceptions[front], sizeof (SYS_EXCEPTION)); -+ else -+ { -+ suword_noerr (&sctx->Exceptions->Exceptions[front].Type, EXCEPTION_ENOMEM); -+ suword_noerr (&sctx->Exceptions->Exceptions[front].Proc, 0); -+ } -+ suword_noerr (&sctx->Exceptions->Front, (front + 1) % count); -+ } -+ -+ /* always reset the magic number in case it's been overwritten */ -+ /* so that 'edb' can find the exception page in the core file */ -+ suword_noerr (&sctx->Exceptions->Magic, SYS_EXCEPTION_MAGIC); -+ } -+ no_fault(); -+ kmutex_unlock (&sctx->Lock); -+ -+ if (ex_ptr != NULL) -+ KMEM_FREE (ex_ptr, sizeof (SYS_EXCEPTION)); -+} -+ -+int -+sys_getException (SYS_CTXT *sctx, SYS_EXCEPTION *ex) -+{ -+ int front; -+ int back; -+ int count; -+ int res; -+ label_t ljp; -+ -+ if (sctx->Exceptions == NULL) -+ return (EINVAL); -+ -+ kmutex_lock (&sctx->Lock); -+ if (on_fault (&ljp)) -+ { -+ no_fault(); -+ kmutex_unlock (&sctx->Lock); -+ return (EFAULT); -+ } -+ -+ front = fuword_noerr (&sctx->Exceptions->Front); -+ back = fuword_noerr (&sctx->Exceptions->Back); -+ count = fuword_noerr (&sctx->Exceptions->Count); -+ -+ if (count <= 0 || front < 0 || back < 0 || front >= count || back >= count || back == front) -+ res = EINVAL; -+ else -+ { -+ copyin_noerr ((caddr_t) &sctx->Exceptions->Exceptions[back], (caddr_t) ex, sizeof (SYS_EXCEPTION)); -+ suword_noerr (&sctx->Exceptions->Back, (back+1) % count); -+ -+ res = ESUCCESS; -+ } -+ no_fault(); -+ kmutex_unlock (&sctx->Lock); -+ -+ return (res); -+} -+ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/elan3/eventcookie.c linux-2.6.9/drivers/net/qsnet/elan3/eventcookie.c ---- clean/drivers/net/qsnet/elan3/eventcookie.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan3/eventcookie.c 2003-08-13 06:03:03.000000000 -0400 -@@ -0,0 +1,324 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: eventcookie.c,v 1.7 2003/08/13 10:03:03 fabien Exp $" -+/* $Source: /cvs/master/quadrics/elan3mod/elan3/os/eventcookie.c,v $*/ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+static EVENT_COOKIE_TABLE *cookie_tables; -+static spinlock_t cookie_table_lock; -+ -+/* -+ * cookie_drop_entry: -+ * drop the reference to a cookie held -+ * by the cookie table -+ */ -+static void -+cookie_drop_entry (EVENT_COOKIE_ENTRY *ent) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave (&ent->ent_lock, flags); -+ if (--ent->ent_ref != 0) -+ { -+ ent->ent_fired = ent->ent_cookie; -+ kcondvar_wakeupall (&ent->ent_wait, &ent->ent_lock); -+ -+ spin_unlock_irqrestore (&ent->ent_lock, flags); -+ } -+ else -+ { -+ spin_unlock_irqrestore (&ent->ent_lock, flags); -+ -+ spin_lock_destroy (&ent->ent_lock); -+ kcondvar_destroy (&ent->ent_wait); -+ -+ KMEM_FREE (ent, sizeof (EVENT_COOKIE_ENTRY)); -+ } -+} -+ -+void -+cookie_init() -+{ -+ spin_lock_init (&cookie_table_lock); -+} -+ -+void -+cookie_fini() -+{ -+ spin_lock_destroy (&cookie_table_lock); -+} -+ -+EVENT_COOKIE_TABLE * -+cookie_alloc_table (unsigned long task, unsigned long handle) -+{ -+ EVENT_COOKIE_TABLE *tbl, *ntbl; -+ -+ KMEM_ZALLOC (ntbl, EVENT_COOKIE_TABLE *, sizeof (EVENT_COOKIE_TABLE), TRUE); -+ -+ if (ntbl == NULL) -+ return (NULL); -+ -+ spin_lock (&cookie_table_lock); -+ -+ for (tbl = cookie_tables; tbl; tbl = tbl->tbl_next) -+ if (tbl->tbl_task == task && tbl->tbl_handle == handle) -+ break; -+ -+ if (tbl != NULL) -+ tbl->tbl_ref++; -+ else -+ { -+ spin_lock_init (&ntbl->tbl_lock); -+ -+ ntbl->tbl_task = task; -+ ntbl->tbl_handle = handle; -+ ntbl->tbl_ref = 1; -+ ntbl->tbl_entries = NULL; -+ -+ if ((ntbl->tbl_next = cookie_tables) != NULL) -+ cookie_tables->tbl_prev = ntbl; -+ cookie_tables = ntbl; -+ ntbl->tbl_prev = NULL; -+ } -+ spin_unlock (&cookie_table_lock); -+ -+ if (tbl == NULL) -+ return (ntbl); -+ else -+ { -+ KMEM_FREE (ntbl, sizeof (EVENT_COOKIE_TABLE)); -+ return (tbl); -+ } -+} -+ -+void -+cookie_free_table (EVENT_COOKIE_TABLE *tbl) -+{ -+ EVENT_COOKIE_ENTRY *ent; -+ -+ spin_lock (&cookie_table_lock); -+ if (tbl->tbl_ref > 1) -+ { -+ tbl->tbl_ref--; -+ spin_unlock (&cookie_table_lock); -+ return; -+ } -+ -+ if (tbl->tbl_prev) -+ tbl->tbl_prev->tbl_next = tbl->tbl_next; -+ else -+ cookie_tables = tbl->tbl_next; -+ if (tbl->tbl_next) -+ tbl->tbl_next->tbl_prev = tbl->tbl_prev; -+ -+ spin_unlock (&cookie_table_lock); -+ -+ /* NOTE - table no longer visible to other threads -+ * no need to aquire tbl_lock */ -+ while ((ent = tbl->tbl_entries) != NULL) -+ { -+ if ((tbl->tbl_entries = ent->ent_next) != NULL) -+ ent->ent_next->ent_prev = NULL; -+ -+ cookie_drop_entry (ent); -+ } -+ spin_lock_destroy (&tbl->tbl_lock); -+ -+ KMEM_FREE (tbl, sizeof (EVENT_COOKIE_TABLE)); -+} -+ -+int -+cookie_alloc_cookie (EVENT_COOKIE_TABLE *tbl, EVENT_COOKIE cookie) -+{ -+ EVENT_COOKIE_ENTRY *ent, *nent; -+ unsigned long flags; -+ -+ KMEM_ZALLOC (nent, EVENT_COOKIE_ENTRY *, sizeof (EVENT_COOKIE_ENTRY), TRUE); -+ -+ spin_lock_irqsave (&tbl->tbl_lock, flags); -+ for (ent = tbl->tbl_entries; ent; ent = ent->ent_next) -+ if (ent->ent_cookie == cookie) -+ break; -+ -+ if (ent == NULL) -+ { -+ kcondvar_init (&nent->ent_wait); -+ spin_lock_init (&nent->ent_lock); -+ -+ nent->ent_ref = 1; -+ nent->ent_cookie = cookie; -+ -+ if ((nent->ent_next = tbl->tbl_entries) != NULL) -+ tbl->tbl_entries->ent_prev = nent; -+ tbl->tbl_entries = nent; -+ nent->ent_prev = NULL; -+ } -+ spin_unlock_irqrestore (&tbl->tbl_lock, flags); -+ -+ if (ent == NULL) -+ return (ESUCCESS); -+ else -+ { -+ KMEM_FREE (nent, sizeof (EVENT_COOKIE_ENTRY)); -+ return (EINVAL); -+ } -+} -+ -+int -+cookie_free_cookie (EVENT_COOKIE_TABLE *tbl, EVENT_COOKIE cookie) -+{ -+ EVENT_COOKIE_ENTRY *ent; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&tbl->tbl_lock, flags); -+ for (ent = tbl->tbl_entries; ent; ent = ent->ent_next) -+ if (ent->ent_cookie == cookie) -+ break; -+ -+ if (ent == NULL) -+ { -+ spin_unlock_irqrestore (&tbl->tbl_lock, flags); -+ return (EINVAL); -+ } -+ -+ if (ent->ent_prev == NULL) -+ tbl->tbl_entries = ent->ent_next; -+ else -+ ent->ent_prev->ent_next = ent->ent_next; -+ -+ if (ent->ent_next != NULL) -+ ent->ent_next->ent_prev = ent->ent_prev; -+ -+ spin_unlock_irqrestore (&tbl->tbl_lock, flags); -+ -+ cookie_drop_entry (ent); -+ -+ return (ESUCCESS); -+} -+ -+/* -+ * cookie_fire_cookie: -+ * fire the cookie - this is called from the event interrupt. -+ */ -+int -+cookie_fire_cookie (EVENT_COOKIE_TABLE *tbl, EVENT_COOKIE cookie) -+{ -+ EVENT_COOKIE_ENTRY *ent; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&tbl->tbl_lock, flags); -+ for (ent = tbl->tbl_entries; ent; ent = ent->ent_next) -+ if (ent->ent_cookie == cookie) -+ break; -+ -+ if (ent == NULL) -+ { -+ spin_unlock_irqrestore (&tbl->tbl_lock, flags); -+ return (EINVAL); -+ } -+ -+ spin_lock (&ent->ent_lock); -+ ent->ent_fired = cookie; -+ kcondvar_wakeupall (&ent->ent_wait, &ent->ent_lock); -+ spin_unlock (&ent->ent_lock); -+ -+ spin_unlock_irqrestore (&tbl->tbl_lock, flags); -+ -+ return (ESUCCESS); -+} -+ -+/* -+ * cookie_wait_cookie: -+ * deschedule on a cookie if it has not already fired. -+ * note - if the cookie is removed from the table, then -+ * we free it off when we're woken up. -+ */ -+int -+cookie_wait_cookie (EVENT_COOKIE_TABLE *tbl, EVENT_COOKIE cookie) -+{ -+ EVENT_COOKIE_ENTRY *ent; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&tbl->tbl_lock, flags); -+ for (ent = tbl->tbl_entries; ent; ent = ent->ent_next) -+ if (ent->ent_cookie == cookie) -+ break; -+ -+ if (ent == NULL) -+ { -+ spin_unlock_irqrestore (&tbl->tbl_lock, flags); -+ return (EINVAL); -+ } -+ -+ spin_lock (&ent->ent_lock); -+ spin_unlock (&tbl->tbl_lock); -+ -+ if (ent->ent_fired != 0) -+ { -+ spin_unlock_irqrestore (&ent->ent_lock, flags); -+ return (ESUCCESS); -+ } -+ -+ ent->ent_ref++; -+ kcondvar_waitsig (&ent->ent_wait, &ent->ent_lock, &flags); -+ -+ if (--ent->ent_ref > 0) -+ spin_unlock_irqrestore (&ent->ent_lock, flags); -+ else -+ { -+ spin_unlock_irqrestore (&ent->ent_lock, flags); -+ -+ spin_lock_destroy (&ent->ent_lock); -+ kcondvar_destroy (&ent->ent_wait); -+ -+ KMEM_FREE (ent, sizeof (EVENT_COOKIE_ENTRY)); -+ } -+ return (ESUCCESS); -+} -+ -+int -+cookie_arm_cookie (EVENT_COOKIE_TABLE *tbl, EVENT_COOKIE cookie) -+{ -+ EVENT_COOKIE_ENTRY *ent; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&tbl->tbl_lock, flags); -+ for (ent = tbl->tbl_entries; ent; ent = ent->ent_next) -+ if (ent->ent_cookie == cookie) -+ break; -+ -+ if (ent == NULL) -+ { -+ spin_unlock_irqrestore (&tbl->tbl_lock, flags); -+ return (EINVAL); -+ } -+ -+ spin_lock (&ent->ent_lock); -+ ent->ent_fired = 0; -+ spin_unlock (&ent->ent_lock); -+ -+ spin_unlock_irqrestore (&tbl->tbl_lock, flags); -+ -+ return (ESUCCESS); -+} -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/elan3/iproc.c linux-2.6.9/drivers/net/qsnet/elan3/iproc.c ---- clean/drivers/net/qsnet/elan3/iproc.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan3/iproc.c 2003-09-24 09:57:25.000000000 -0400 -@@ -0,0 +1,925 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: iproc.c,v 1.47 2003/09/24 13:57:25 david Exp $" -+/* $Source: /cvs/master/quadrics/elan3mod/elan3/os/iproc.c,v $ */ -+ -+#include -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+ -+static int TrSizeTable[] = {0, 8, 16, 32, 64}; -+ -+static void ConvertTransactionToSetEvent (ELAN3_CTXT *ctxt, E3_IprocTrapHeader_BE *hdrp, E3_Addr Addr); -+static void SimulateBlockWrite (ELAN3_CTXT *ctxt, E3_IprocTrapHeader_BE *hdrp, E3_IprocTrapData_BE *datap); -+static void SimulateWriteWord (ELAN3_CTXT *ctxt, E3_IprocTrapHeader_BE *hdrp, E3_IprocTrapData_BE *datap); -+static void SimulateWriteDWord (ELAN3_CTXT *ctxt, E3_IprocTrapHeader_BE *hdrp, E3_IprocTrapData_BE *datap); -+static void SimulateTraceRoute (ELAN3_CTXT *ctxt, E3_IprocTrapHeader_BE *hdrp, E3_IprocTrapData_BE *datap); -+static void BumpInputterStats (ELAN3_DEV *dev, E3_IprocTrapHeader_BE *hdrp); -+ -+void -+HandleIProcTrap (ELAN3_DEV *dev, -+ int Channel, -+ E3_uint32 Pend, -+ sdramaddr_t FaultSaveOff, -+ sdramaddr_t TransactionsOff, -+ sdramaddr_t DataOff) -+{ -+ E3_IprocTrapHeader_BE Transaction0; -+ ELAN3_CTXT *ctxt; -+ INPUT_TRAP *trap; -+ register int i; -+ -+ /* -+ * Read the 1st set of transactions, so we can determine the -+ * context for the trap -+ */ -+ elan3_sdram_copyq_from_sdram (dev, TransactionsOff, (void *) &Transaction0, 16); -+ -+ BumpStat (dev, IProcTraps); -+ BumpInputterStats (dev, &Transaction0); -+ -+ if (Transaction0.s.TrTypeCntx.s.TypeCntxInvalid) -+ { -+ /* -+ * The context is not valid. This will occur if the packet -+ * trapped for an EopError with no IdentTrans or an error corrupted the context -+ * giving a CRC error on the first transaction and the Ack had not been returned. -+ */ -+ if (Transaction0.s.TrTypeCntx.s.LastTrappedTrans) -+ { -+ PRINTF0 (DBG_DEVICE, DBG_IPROC, "iproc: Error on EOP without a good context, ignoring trap\n"); -+ } -+ else -+ { -+ /* Check that only crap has been received. If not then die. */ -+ if (! Transaction0.s.IProcTrapStatus.s.BadLength && -+ (Transaction0.s.IProcTrapStatus.Status & CRC_MASK) == CRC_STATUS_GOOD) -+ { -+ printk ("iproc: Did not have a valid context for the trap area.\n"); -+ printk ("iproc: TrTypeCntx=%x TrAddr=%x TrData0=%x IProcTrapStatus=%x\n", -+ Transaction0.s.TrTypeCntx.TypeContext, Transaction0.s.TrAddr, -+ Transaction0.s.TrData0, Transaction0.s.IProcTrapStatus.Status); -+ panic ("elan3: iproc did not have a valid context"); -+ /* NOTREACHED */ -+ } -+ PRINTF0 (DBG_DEVICE, DBG_IPROC, "iproc: First transaction is bad, ignoring trap\n"); -+ } -+ } -+ else -+ { -+ ctxt = ELAN3_DEV_CTX_TABLE(dev, Transaction0.s.TrTypeCntx.s.Context); -+ -+ if (ctxt == NULL) -+ { -+ PRINTF1 (DBG_DEVICE, DBG_INTR, "HandleIProcTrap: context %x invalid\n", -+ Transaction0.s.TrTypeCntx.s.Context); -+ -+ BumpStat (dev, InvalidContext); -+ } -+ else -+ { -+ trap = (Channel == 0) ? &ctxt->Input0Trap : &ctxt->Input1Trap; -+ -+ ASSERT (trap->State == CTXT_STATE_OK); -+ -+ trap->Transactions[0] = Transaction0; -+ -+ PRINTF1 (ctxt, DBG_INTR, "HandleIProcTrap: %s\n", IProcTrapString (&trap->Transactions[0], NULL)); -+ /* -+ * Copy the rest of the transactions into the trap area. -+ */ -+ for (i = 0; !(trap->Transactions[i].s.TrTypeCntx.s.LastTrappedTrans);) -+ { -+ if (++i >= MAX_TRAPPED_TRANS) -+ { -+ trap->Overflow = 1; -+ break; -+ } -+ -+ elan3_sdram_copyq_from_sdram (dev, TransactionsOff + i*sizeof (E3_IprocTrapHeader), (void *) &trap->Transactions[i], 16); -+ -+ PRINTF1 (ctxt, DBG_INTR, " %s\n", IProcTrapString (&trap->Transactions[i], NULL)); -+ -+ BumpInputterStats (dev, &trap->Transactions[i]); -+ } -+ -+ /* -+ * Remember the number of transactions we've copied. -+ */ -+ trap->NumTransactions = i+1; -+ -+ PRINTF1 (ctxt, DBG_INTR, " NumTransactions = %d\n", trap->NumTransactions); -+ -+ /* -+ * Copy all the data blocks in one go to let the Elan prefetcher work -+ */ -+ elan3_sdram_copyq_from_sdram (dev, DataOff, trap->DataBuffers, trap->NumTransactions*sizeof (E3_IprocTrapData)); -+ -+ /* -+ * Copy fault save area and clear out for next time round. -+ */ -+ elan3_sdram_copyq_from_sdram (dev, FaultSaveOff, (void *) &trap->FaultSave, 16); -+ elan3_sdram_zeroq_sdram (dev, FaultSaveOff, 16); -+ -+ if (ELAN3_OP_IPROC_TRAP (ctxt, trap, Channel) == OP_DEFER) -+ { -+ /* -+ * Mark the trap as valid and set the inputter state to -+ * raise the context filter. -+ */ -+ trap->State = CTXT_STATE_TRAPPED; -+ kcondvar_wakeupone (&ctxt->Wait, &dev->IntrLock); -+ -+ SetInputterStateForContext (ctxt, Pend, NULL); -+ } -+ } -+ } -+} -+ -+void -+InspectIProcTrap (ELAN3_CTXT *ctxt, INPUT_TRAP *trap) -+{ -+ int i; -+ int StatusValid; -+ -+ trap->AckSent = 0; -+ trap->BadTransaction = 0; -+ -+ trap->TrappedTransaction = NULL; -+ trap->TrappedDataBuffer = NULL; -+ trap->WaitForEopTransaction = NULL; -+ trap->WaitForEopDataBuffer = NULL; -+ trap->DmaIdentifyTransaction = NULL; -+ trap->ThreadIdentifyTransaction = NULL; -+ trap->LockQueuePointer = (E3_Addr) 0; -+ trap->UnlockQueuePointer = (E3_Addr) 0; -+ -+ /* -+ * Now scan all the transactions received -+ */ -+ for (i = 0; i < trap->NumTransactions ; i++) -+ { -+ E3_IprocTrapHeader_BE *hdrp = &trap->Transactions[i]; -+ E3_IprocTrapData_BE *datap = &trap->DataBuffers[i]; -+ -+ StatusValid = hdrp->s.TrTypeCntx.s.StatusRegValid != 0; -+ -+ if (StatusValid && hdrp->s.IProcTrapStatus.s.AckSent) /* Remember if we've sent the ack back */ -+ trap->AckSent = 1; -+ -+ if (hdrp->s.TrTypeCntx.s.LastTrappedTrans) /* Check for EOP */ -+ { -+ ASSERT (i == trap->NumTransactions - 1); -+ -+ switch (hdrp->s.IProcTrapStatus.Status & E3_IPS_EopType) -+ { -+ case EOP_GOOD: -+ /* if we get an EOP_GOOD then the outputer should have received a PAckOk. */ -+ /* unless it was a flood, in which case someone must have sent an ack */ -+ /* but not necessarily us */ -+ break; -+ -+ case EOP_BADACK: -+ BumpUserStat (ctxt, EopBadAcks); -+ -+ /* if we get an EOP_BADACK then the outputer did not receive a PAckOk even if -+ * we sent a PAckOk. We can clear tinfo.AckSent. */ -+ if (trap->AckSent == 1) -+ { -+ PRINTF0 (ctxt, DBG_IPROC, "InspectIProcTrap: Network error destroyed PAckOk\n"); -+ trap->AckSent = 0; -+ } -+ break; -+ -+ case EOP_ERROR_RESET: -+ BumpUserStat (ctxt, EopResets); -+ -+ /* if we get an EOP_ERROR_RESET then the outputer may or may not have got a PAckOk. */ -+ trap->BadTransaction = 1; -+ break; -+ -+ default: -+ panic ("InspectIProcTrap: invalid EOP type in status register\n"); -+ /* NOTREACHED */ -+ } -+ continue; -+ } -+ -+ PRINTF2 (ctxt, DBG_IPROC, "InspectIProcTrap: %2d: %s\n", i, IProcTrapString (hdrp, datap)); -+ -+ if (! StatusValid) /* We're looking at transactions stored before the trap */ -+ { /* these should only be identifies and lock transactions */ -+ -+ if (hdrp->s.TrTypeCntx.s.Type & TR_WRITEBLOCK_BIT) -+ panic ("InspectIProcTrap: writeblock transaction found in input trap header before trap occured\n"); -+ -+ switch (hdrp->s.TrTypeCntx.s.Type & TR_OPCODE_TYPE_MASK) -+ { -+ case TR_LOCKQUEUE & TR_OPCODE_TYPE_MASK: -+ if (trap->LockQueuePointer) /* Already seen a LOCKQUEUE transaction in this packet, */ -+ { /* the user program should not have done this !! */ -+ ElanException (ctxt, EXCEPTION_BAD_PACKET, INPUT_PROC, trap); -+ return; -+ } -+ -+ trap->LockQueuePointer = (E3_Addr) hdrp->s.TrAddr; /* Remember the queue pointer in case we need to unlock it */ -+ break; -+ -+ case TR_DMAIDENTIFY & TR_OPCODE_TYPE_MASK: -+ if (trap->DmaIdentifyTransaction || /* Already seen an identify transaction in this packet */ -+ trap->ThreadIdentifyTransaction) /* the user program should not have done this */ -+ { -+ ElanException (ctxt, EXCEPTION_BAD_PACKET, INPUT_PROC, trap); -+ return; -+ } -+ trap->DmaIdentifyTransaction = hdrp; -+ break; -+ -+ case TR_THREADIDENTIFY & TR_OPCODE_TYPE_MASK: -+ if (trap->DmaIdentifyTransaction || /* Already seen an identify transaction in this packet */ -+ trap->ThreadIdentifyTransaction) /* the user program should not have done this */ -+ { -+ ElanException (ctxt, EXCEPTION_BAD_PACKET, INPUT_PROC, trap); -+ return; -+ } -+ trap->ThreadIdentifyTransaction = hdrp; -+ break; -+ -+ default: -+ panic ("InspectIProcTrap: invalid transaction found in input trap header before trap occured\n"); -+ /* NOTREACHED */ -+ } -+ continue; -+ } -+ -+ if (StatusValid && trap->TrappedTransaction == NULL) /* Remember the transaction which caused the */ -+ { /* trap */ -+ trap->TrappedTransaction = hdrp; -+ trap->TrappedDataBuffer = datap; -+ } -+ -+ if(hdrp->s.IProcTrapStatus.s.BadLength || -+ ((hdrp->s.IProcTrapStatus.Status & CRC_MASK) == CRC_STATUS_ERROR) || -+ ((hdrp->s.IProcTrapStatus.Status & CRC_MASK) == CRC_STATUS_BAD)) -+ { -+ int j; -+ PRINTF0 (ctxt, DBG_IPROC, "InspectIProcTrap: transaction has a bad crc\n"); -+ for (j=0; jTrData[j], datap->TrData[j+1], datap->TrData[j+2], datap->TrData[j+3]); -+ trap->BadTransaction = 1; -+ continue; -+ } -+ -+ /* No more to do if it's a writeblock transaction */ -+ if (hdrp->s.TrTypeCntx.s.Type & TR_WRITEBLOCK_BIT) -+ continue; -+ -+ -+ if (GET_STATUS_TRAPTYPE(hdrp->s.IProcTrapStatus) == MI_InputDoTrap && -+ (hdrp->s.TrTypeCntx.s.Type & TR_WAIT_FOR_EOP) != 0) -+ { -+ /* -+ * This is a wait for eop transaction that has trapped because the inputer -+ * then received a EopError. The next transaction saved should always be an -+ * EopError. -+ */ -+ PRINTF0 (ctxt, DBG_IPROC, "InspectIProcTrap: got a trapped WaitForEop transaction due to EopError\n"); -+ -+ trap->WaitForEopTransaction = hdrp; -+ trap->WaitForEopDataBuffer = datap; -+ continue; -+ } -+ -+ switch (hdrp->s.TrTypeCntx.s.Type & TR_OPCODE_TYPE_MASK) -+ { -+ case TR_UNLOCKQUEUE & TR_OPCODE_TYPE_MASK: -+ if (trap->UnlockQueuePointer) -+ { -+ ElanException (ctxt, EXCEPTION_BAD_PACKET, INPUT_PROC, trap); -+ return; -+ } -+ trap->UnlockQueuePointer = (E3_Addr) hdrp->s.TrAddr; -+ break; -+ } -+ } -+} -+ -+void -+ResolveIProcTrap (ELAN3_CTXT *ctxt, INPUT_TRAP *trap, NETERR_RESOLVER **rvpp) -+{ -+ ELAN3_DEV *dev = ctxt->Device; -+ int res; -+ unsigned long flags; -+ -+ ASSERT (! CTXT_IS_KERNEL (ctxt)); -+ -+ BumpUserStat (ctxt, IProcTraps); -+ -+ InspectIProcTrap (ctxt, trap); -+ -+ /* -+ * fixup page fault if we've trapped because of one. -+ */ -+ if (trap->FaultSave.s.FaultContext != 0) -+ { -+ /* -+ * If it's a WRITEBLOCK transaction, then see if we remember faulting -+ * before it, and try and prefault in a sensible amount past it. -+ */ -+ int fixedFault = FALSE; -+ INPUT_FAULT_SAVE *entry; -+ INPUT_FAULT_SAVE **predp; -+ int npages; -+ -+ if ((trap->TrappedTransaction->s.TrTypeCntx.s.Type & TR_WRITEBLOCK_BIT) != 0 && /* a DMA packet */ -+ trap->LockQueuePointer == (E3_Addr) 0 && /* but not a queueing DMA */ -+ trap->TrappedTransaction->s.TrAddr != 0) /* and not a DMA to 0 */ -+ { -+ spin_lock (&ctxt->InputFaultLock); -+ -+ for (predp = &ctxt->InputFaultList; (entry = *predp)->Next != NULL ; predp = &entry->Next) -+ { -+ if (entry->Addr == trap->TrappedTransaction->s.TrAddr) -+ break; -+ } -+ -+ *predp = entry->Next; -+ entry->Next = ctxt->InputFaultList; -+ ctxt->InputFaultList = entry; -+ -+ if (entry->Addr == trap->TrappedTransaction->s.TrAddr) -+ { -+ if ((entry->Count <<= 1) > MAX_INPUT_FAULT_PAGES) -+ entry->Count = MAX_INPUT_FAULT_PAGES; -+ } -+ else -+ { -+ entry->Count = MIN_INPUT_FAULT_PAGES; -+ } -+ -+ entry->Addr = trap->TrappedTransaction->s.TrAddr + (entry->Count * PAGESIZE); -+ npages = entry->Count; -+ -+ spin_unlock (&ctxt->InputFaultLock); -+ -+ if (elan3_pagefault (ctxt, &trap->FaultSave, npages) != ESUCCESS) -+ { -+ PRINTF2 (ctxt, DBG_IPROC, "ResolveIProcTrap: pagefaulting %d pages at %08x - failed\n", -+ npages, trap->TrappedTransaction->s.TrAddr); -+ } -+ else -+ { -+ PRINTF2 (ctxt, DBG_IPROC, "ResolveIProcTrap: pagefaulting %d pages at %08x - succeeded\n", -+ npages, trap->TrappedTransaction->s.TrAddr); -+ -+ fixedFault = TRUE; -+ } -+ } -+ -+ /* Workaround WRITEBLOCK transaction executed when LOCKQUEUE transaction missed */ -+ /* the packet will have been nacked */ -+ if ((trap->TrappedTransaction->s.TrTypeCntx.s.Type & TR_WRITEBLOCK_BIT) && /* a DMA packet */ -+ trap->LockQueuePointer == 0 && trap->UnlockQueuePointer && /* a queueing DMA */ -+ trap->TrappedTransaction->s.TrAddr == trap->FaultSave.s.FaultAddress) /* and missed lockqueue */ -+ { -+ fixedFault = TRUE; -+ } -+ -+ if (! fixedFault) -+ { -+ if ((res = elan3_pagefault (ctxt, &trap->FaultSave, 1)) != ESUCCESS) -+ { -+ PRINTF1 (ctxt, DBG_IPROC, "ResolveIProcTrap: elan3_pagefault failed at %x\n", -+ trap->FaultSave.s.FaultAddress); -+ ElanException (ctxt, EXCEPTION_INVALID_ADDR, INPUT_PROC, trap, &trap->FaultSave, res); -+ return; -+ } -+ } -+ } -+ -+ if (! trap->AckSent && trap->LockQueuePointer) /* Queued DMA */ -+ { /* The ack was not sent, so the queue will be locked. */ -+ SimulateUnlockQueue (ctxt, trap->LockQueuePointer, FALSE); /* We must unlock it. */ -+ } -+ -+ if (trap->AckSent && trap->BadTransaction) -+ { -+ if (trap->DmaIdentifyTransaction) -+ { -+ PRINTF0 (ctxt, DBG_IPROC, "ResolveIProcTrap: Dma identify needs network resultion\n"); -+ -+ BumpStat (dev, DmaIdentifyNetworkErrors); -+ BumpUserStat (ctxt, DmaIdentifyNetworkErrors); -+ -+ if (trap->WaitForEopTransaction) -+ PRINTF0 (ctxt, DBG_IPROC, "ResolveIProcTrap: have delayed wait for eop transaction\n"); -+ } -+ else if (trap->ThreadIdentifyTransaction) -+ { -+ PRINTF0 (ctxt, DBG_IPROC, "ResolveIProcTrap: Thread identify needs network resolution\n"); -+ -+ BumpStat (dev, ThreadIdentifyNetworkErrors); -+ BumpUserStat (ctxt, ThreadIdentifyNetworkErrors); -+ -+ if (trap->WaitForEopTransaction) -+ PRINTF0 (ctxt, DBG_IPROC, "ResolveIProcTrap: have delayed wait for eop transaction\n"); -+ } -+ else -+ { -+ BumpStat (dev, DmaNetworkErrors); -+ BumpUserStat (ctxt, DmaNetworkErrors); -+ } -+ } -+ -+ spin_lock_irqsave (&dev->IntrLock, flags); -+ -+ if (! trap->AckSent) -+ { -+ PRINTF0 (ctxt, DBG_IPROC, "ResolveIProcTrap: ack not sent, lowering context filter\n"); -+ -+ trap->State = CTXT_STATE_OK; -+ } -+ else -+ { -+ if (trap->BadTransaction) -+ { -+ PRINTF0 (ctxt, DBG_IPROC, "ResolveIProcTrap: ack sent, waiting on bad transaction\n"); -+ trap->State = CTXT_STATE_NETWORK_ERROR; -+ } -+ else -+ { -+ PRINTF0 (ctxt, DBG_IPROC, "ResolveIProcTrap: ack sent, waiting on packet to be re-executed\n"); -+ trap->State = CTXT_STATE_NEEDS_RESTART; -+ } -+ } -+ -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+ -+ if (trap->AckSent && trap->BadTransaction) -+ ElanException (ctxt, EXCEPTION_NETWORK_ERROR, INPUT_PROC, trap, rvpp); -+} -+ -+int -+RestartIProcTrap (ELAN3_CTXT *ctxt, INPUT_TRAP *trap) -+{ -+ PRINTF1 (ctxt, DBG_IPROC, "RestartIProc: %d transactions\n", trap->NumTransactions); -+ -+ if (trap->TrappedTransaction == NULL) /* No transaction trapped - probably a network */ -+ return (ESUCCESS); /* error */ -+ -+ while (! trap->TrappedTransaction->s.TrTypeCntx.s.LastTrappedTrans) -+ { -+ E3_IprocTrapHeader_BE *hdrp = trap->TrappedTransaction; -+ E3_IprocTrapData_BE *datap = trap->TrappedDataBuffer; -+ -+ ASSERT (hdrp->s.TrTypeCntx.s.StatusRegValid != 0); -+ -+ PRINTF2 (ctxt, DBG_IPROC, "RestartIProc: TrType=0x%x Status=0x%x\n", -+ hdrp->s.TrTypeCntx.TypeContext, hdrp->s.IProcTrapStatus.Status); -+ -+ if ((hdrp->s.TrTypeCntx.s.Type & TR_WRITEBLOCK_BIT) != 0) -+ { -+ PRINTF1 (ctxt, DBG_IPROC, "RestartIProc: WRITEBLOCK : Addr %x\n", hdrp->s.TrAddr); -+ SimulateBlockWrite (ctxt, hdrp, datap); -+ } -+ else -+ { -+ switch (hdrp->s.TrTypeCntx.s.Type & TR_OPCODE_TYPE_MASK) -+ { -+ case TR_SETEVENT & TR_OPCODE_TYPE_MASK: -+ PRINTF1 (ctxt, DBG_IPROC, "RestartIProc: SETEVENT : %x\n", hdrp->s.TrAddr); -+ -+ if (GET_STATUS_TRAPTYPE(hdrp->s.IProcTrapStatus) != MI_InputDoTrap) -+ FixupEventTrap (ctxt, INPUT_PROC, trap, GET_STATUS_TRAPTYPE(hdrp->s.IProcTrapStatus), &trap->FaultSave, FALSE); -+ else if (hdrp->s.TrAddr) -+ { -+ if (IssueCommand (ctxt, offsetof (E3_CommandPort, SetEvent), hdrp->s.TrAddr, FALSE) != ISSUE_COMMAND_OK) -+ return (EAGAIN); -+ } -+ break; -+ -+ case TR_WRITEWORD & TR_OPCODE_TYPE_MASK: -+ SimulateWriteWord (ctxt, hdrp, datap); -+ break; -+ -+ case TR_WRITEDOUBLEWORD & TR_OPCODE_TYPE_MASK: -+ SimulateWriteDWord (ctxt, hdrp, datap); -+ break; -+ -+ case TR_UNLOCKQUEUE & TR_OPCODE_TYPE_MASK: -+ if (GET_STATUS_TRAPTYPE(hdrp->s.IProcTrapStatus) == MI_InputDoTrap) -+ ElanException (ctxt, EXCEPTION_BAD_PACKET, INPUT_PROC, trap); -+ else -+ { -+ switch (GET_STATUS_TRAPTYPE (hdrp->s.IProcTrapStatus)) -+ { -+ case MI_WaitForUnLockDescRead: -+ /* -+ * Fault occured on the read of the queue descriptor - since the ack -+ * has been sent we need to move the queue on one slot. -+ */ -+ PRINTF0 (ctxt, DBG_IPROC, "RestartIProc: TR_UNLOCKQUEUE : desc read fault\n"); -+ -+ SimulateUnlockQueue (ctxt, trap->LockQueuePointer, TRUE); -+ -+ if (IssueCommand (ctxt, offsetof (E3_CommandPort, SetEvent), -+ hdrp->s.TrAddr + E3_QUEUE_EVENT_OFFSET, FALSE) != ISSUE_COMMAND_OK) -+ { -+ /* Failed to issue setevent to complete queue unlock, since we've already unlocked */ -+ /* the queue, we should "convert" this transaction into a setevent transaction that */ -+ /* hasn't trapped */ -+ PRINTF0 (ctxt, DBG_IPROC, "RestartIProc: could not issue setevent for SimulateUnlockQueue\n"); -+ -+ ConvertTransactionToSetEvent (ctxt, hdrp, hdrp->s.TrAddr + E3_QUEUE_EVENT_OFFSET); -+ return (EAGAIN); -+ } -+ break; -+ -+ case MI_DoSetEvent: -+ /* -+ * Fault occured on either the write to unlock the queue or during -+ * processing of the event. Test the fault address against the -+ * queue address to find out which - in this case, since the ack -+ * has been sent we need to move the queue on one slot. -+ */ -+ if (trap->FaultSave.s.FaultAddress == trap->LockQueuePointer) -+ { -+ PRINTF0 (ctxt, DBG_IPROC, "RestartIProc: fixed unlock queue write to unlock fault\n"); -+ -+ SimulateUnlockQueue (ctxt, trap->LockQueuePointer, TRUE); -+ -+ if (IssueCommand (ctxt, offsetof (E3_CommandPort, SetEvent), -+ hdrp->s.TrAddr + E3_QUEUE_EVENT_OFFSET, FALSE) != ISSUE_COMMAND_OK) -+ { -+ /* Failed to issue setevent to complete queue unlock, since we've already unlocked */ -+ /* the queue, we should "convert" this transaction into a setevent transaction that */ -+ /* hasn't trapped */ -+ PRINTF0 (ctxt, DBG_IPROC, "RestartIProc: could not issue setevent for SimulateUnlockQueue\n"); -+ -+ ConvertTransactionToSetEvent (ctxt, hdrp, hdrp->s.TrAddr + E3_QUEUE_EVENT_OFFSET); -+ return (EFAIL); -+ } -+ break; -+ } -+ /*DROPTHROUGH*/ -+ -+ default: -+ FixupEventTrap (ctxt, INPUT_PROC, trap, GET_STATUS_TRAPTYPE (hdrp->s.IProcTrapStatus), -+ &trap->FaultSave, FALSE); -+ break; -+ } -+ trap->LockQueuePointer = trap->UnlockQueuePointer = 0; -+ } -+ break; -+ -+ case TR_SENDDISCARD & TR_OPCODE_TYPE_MASK: -+ /* Just ignore send-discard transactions */ -+ PRINTF0 (ctxt, DBG_IPROC, "RestartIProc: ignore SENDDISCARD\n"); -+ break; -+ -+ case TR_REMOTEDMA & TR_OPCODE_TYPE_MASK: -+ PRINTF0 (ctxt, DBG_IPROC, "RestartIProc: REMOTEDMA\n"); -+ -+ /* modify the dma type since it will still be a "read" dma */ -+ ((E3_DMA_BE *) datap)->s.dma_type &= ~(DMA_TYPE_READ | E3_DMA_CONTEXT_MASK); -+ ((E3_DMA_BE *) datap)->s.dma_type |= DMA_TYPE_ISREMOTE; -+ -+ RestartDmaDesc (ctxt, (E3_DMA_BE *) datap); -+ break; -+ -+ case TR_TRACEROUTE & TR_OPCODE_TYPE_MASK: -+ PRINTF0 (ctxt, DBG_IPROC, "RestartIProc: TRACEROUTE\n"); -+ SimulateTraceRoute (ctxt, hdrp, datap); -+ break; -+ -+ default: -+ ElanException (ctxt, EXCEPTION_BAD_PACKET, INPUT_PROC, trap); -+ break; -+ } -+ } -+ -+ /* -+ * We've successfully processed this transaction, so move onto the -+ * next one. -+ */ -+ trap->TrappedTransaction++; -+ trap->TrappedDataBuffer++; -+ } -+ -+ return (ESUCCESS); -+} -+ -+static void -+ConvertTransactionToSetEvent (ELAN3_CTXT *ctxt, E3_IprocTrapHeader_BE *hdrp, E3_Addr Addr) -+{ -+ hdrp->s.TrTypeCntx.s.Type = TR_SETEVENT; -+ hdrp->s.TrTypeCntx.s.StatusRegValid = 0; -+ hdrp->s.TrAddr = Addr; -+} -+ -+void -+SimulateBlockWrite (ELAN3_CTXT *ctxt, E3_IprocTrapHeader_BE *hdrp, E3_IprocTrapData_BE *datap) -+{ -+ void *saddr = (void *) ((unsigned long) datap + (hdrp->s.TrAddr & 0x3f)); -+ unsigned nbytes = (hdrp->s.TrTypeCntx.s.Type) & TR_PARTSIZE_MASK; -+ int i; -+ -+ if (nbytes == 0) -+ nbytes = sizeof (E3_IprocTrapData_BE); -+ -+ if (ELAN3_OP_START_FAULT_CHECK (ctxt)) -+ { -+ ELAN3_OP_END_FAULT_CHECK (ctxt); -+ -+ PRINTF1 (ctxt, DBG_IPROC, "SimulateBlockWrite: faulted at %x\n", hdrp->s.TrAddr); -+ ElanException (ctxt, EXCEPTION_FAULTED, INPUT_PROC, NULL, hdrp->s.TrAddr); -+ return; -+ } -+ -+ /* -+ * NOTE: since the block copy could be to sdram, we issue the writes backwards, -+ * except we MUST ensure that the last item in the block is written last. -+ */ -+ switch (((hdrp->s.TrTypeCntx.s.Type) >> TR_TYPE_SHIFT) & TR_TYPE_MASK) -+ { -+ case TR_TYPE_BYTE: /* 8 bit */ -+ for (i = nbytes - (2*sizeof (E3_uint8)); i >= 0; i -= sizeof (E3_uint8)) -+ ELAN3_OP_STORE8 (ctxt, hdrp->s.TrAddr + i, ((E3_uint8 *) saddr)[i]); -+ i = nbytes - sizeof (E3_uint8); -+ ELAN3_OP_STORE8 (ctxt, hdrp->s.TrAddr + i, ((E3_uint8 *) saddr)[i]); -+ break; -+ -+ case TR_TYPE_SHORT: /* 16 bit */ -+ for (i = nbytes - (2*sizeof (E3_uint16)); i >= 0; i -= sizeof (E3_uint16)) -+ ELAN3_OP_STORE16 (ctxt, hdrp->s.TrAddr + i, ((E3_uint16 *) saddr)[i]); -+ i = nbytes - sizeof (E3_uint16); -+ ELAN3_OP_STORE16 (ctxt, hdrp->s.TrAddr + i, ((E3_uint16 *) saddr)[i]); -+ break; -+ -+ case TR_TYPE_WORD: /* 32 bit */ -+ for (i = nbytes - (2*sizeof (E3_uint32)); i >= 0; i -= sizeof (E3_uint32)) -+ ELAN3_OP_STORE32 (ctxt, hdrp->s.TrAddr + i, ((E3_uint32 *) saddr)[i]); -+ i = nbytes - sizeof (E3_uint32); -+ ELAN3_OP_STORE32 (ctxt, hdrp->s.TrAddr + i, ((E3_uint32 *) saddr)[i]); -+ break; -+ -+ case TR_TYPE_DWORD: /* 64 bit */ -+ for (i = nbytes - (2*sizeof (E3_uint64)); i >= 0; i -= sizeof (E3_uint64)) -+ ELAN3_OP_STORE64 (ctxt, hdrp->s.TrAddr + i, ((E3_uint64 *) saddr)[i]); -+ i = nbytes - sizeof (E3_uint64); -+ ELAN3_OP_STORE64 (ctxt, hdrp->s.TrAddr + i, ((E3_uint64 *) saddr)[i]); -+ break; -+ } -+ ELAN3_OP_END_FAULT_CHECK (ctxt); -+} -+ -+void -+SimulateWriteWord (ELAN3_CTXT *ctxt, E3_IprocTrapHeader_BE *hdrp, E3_IprocTrapData_BE *datap) -+{ -+ if (ELAN3_OP_START_FAULT_CHECK (ctxt)) -+ { -+ ELAN3_OP_END_FAULT_CHECK (ctxt); -+ -+ PRINTF1 (ctxt, DBG_IPROC, "SimulateWriteWord: faulted at %x\n", hdrp->s.TrAddr); -+ ElanException (ctxt, EXCEPTION_FAULTED, INPUT_PROC, NULL, hdrp->s.TrAddr); -+ return; -+ } -+ -+ ELAN3_OP_STORE32 (ctxt, hdrp->s.TrAddr, ((E3_uint32 *) datap)[WordEndianFlip]); -+ ELAN3_OP_END_FAULT_CHECK (ctxt); -+} -+ -+void -+SimulateWriteDWord (ELAN3_CTXT *ctxt, E3_IprocTrapHeader_BE *hdrp, E3_IprocTrapData_BE *datap) -+{ -+ if (ELAN3_OP_START_FAULT_CHECK (ctxt)) -+ { -+ ELAN3_OP_END_FAULT_CHECK (ctxt); -+ -+ PRINTF1 (ctxt, DBG_IPROC, "SimulateWriteDWord: faulted at %x\n", hdrp->s.TrAddr); -+ ElanException (ctxt, EXCEPTION_FAULTED, INPUT_PROC, NULL, hdrp->s.TrAddr); -+ return; -+ } -+ -+ ELAN3_OP_STORE64 (ctxt, hdrp->s.TrAddr, ((E3_uint64 *) datap)[0]); -+ ELAN3_OP_END_FAULT_CHECK (ctxt); -+} -+ -+void -+SimulateTraceRoute (ELAN3_CTXT *ctxt, E3_IprocTrapHeader_BE *hdrp, E3_IprocTrapData_BE *datap) -+{ -+ E3_uint32 *saddr = (E3_uint32 *) ((unsigned long) datap + (hdrp->s.TrAddr & 0x3f)); -+ unsigned nwords = TrSizeTable[(hdrp->s.TrTypeCntx.s.Type >> TR_SIZE_SHIFT) & TR_SIZE_MASK] / sizeof (E3_uint32); -+ int i; -+ -+ if (ELAN3_OP_START_FAULT_CHECK (ctxt)) -+ { -+ ELAN3_OP_END_FAULT_CHECK (ctxt); -+ -+ PRINTF1 (ctxt, DBG_IPROC, "SimulateTraceRoute: faulted at %x\n", hdrp->s.TrAddr); -+ ElanException (ctxt, EXCEPTION_FAULTED, INPUT_PROC, NULL, hdrp->s.TrAddr); -+ return; -+ } -+ -+ for (i = nwords-2; i >= 0; i--) -+ ELAN3_OP_STORE32 (ctxt, hdrp->s.TrAddr + (i * sizeof (E3_uint32)), saddr[i ^ WordEndianFlip]); -+ -+ i = nwords-1; -+ ELAN3_OP_STORE32 (ctxt, hdrp->s.TrAddr + (i * sizeof (E3_uint32)), saddr[i ^ WordEndianFlip]); -+ -+ ELAN3_OP_END_FAULT_CHECK (ctxt); -+} -+ -+void -+SimulateUnlockQueue (ELAN3_CTXT *ctxt, E3_Addr QueuePointer, int SentAck) -+{ -+ E3_uint32 QueueLock; -+ E3_Addr QueueBPTR; -+ E3_Addr QueueFPTR; -+ E3_uint64 QueueStateAndBPTR; -+ -+ if (ELAN3_OP_START_FAULT_CHECK (ctxt)) -+ { -+ ELAN3_OP_END_FAULT_CHECK (ctxt); -+ -+ PRINTF1 (ctxt, DBG_IPROC, "UnlockQueue: faulted with QueuePointer %x\n", QueuePointer); -+ ElanException (ctxt, EXCEPTION_FAULTED, INPUT_PROC, NULL, QueuePointer); -+ return; -+ } -+ -+ if (SentAck) -+ { -+ QueueBPTR = ELAN3_OP_LOAD32 (ctxt, QueuePointer + offsetof (E3_Queue, q_bptr)); -+ QueueFPTR = ELAN3_OP_LOAD32 (ctxt, QueuePointer + offsetof (E3_Queue, q_fptr)); -+ -+ if (QueueBPTR == ELAN3_OP_LOAD32 (ctxt, QueuePointer + offsetof (E3_Queue, q_top))) /* move on back pointer */ -+ QueueBPTR = ELAN3_OP_LOAD32 (ctxt, QueuePointer + offsetof (E3_Queue, q_base)); -+ else -+ QueueBPTR += ELAN3_OP_LOAD32 (ctxt, QueuePointer + offsetof (E3_Queue, q_size)); -+ -+ QueueLock = ELAN3_OP_LOAD32 (ctxt, QueuePointer + offsetof (E3_Queue, q_state)); -+ -+ if (QueueBPTR == QueueFPTR) /* and set full bit if fptr == bptr */ -+ QueueLock |= E3_QUEUE_FULL; -+ -+ QueueLock &= ~E3_QUEUE_LOCKED; -+ -+ QueueStateAndBPTR = (E3_uint64)QueueLock << 32 | QueueBPTR; -+ -+ ELAN3_OP_STORE64 (ctxt, QueuePointer + offsetof (E3_Queue, q_state), QueueStateAndBPTR); -+ } -+ else -+ { -+ QueueLock = ELAN3_OP_LOAD32 (ctxt, QueuePointer + offsetof (E3_Queue, q_state)); -+ -+ QueueLock &= ~E3_QUEUE_LOCKED; -+ -+ ELAN3_OP_STORE32 (ctxt, QueuePointer + offsetof (E3_Queue, q_state), QueueLock); -+ } -+ -+ no_fault(); -+} -+ -+static void -+BumpInputterStats (ELAN3_DEV *dev, E3_IprocTrapHeader_BE *hdrp) -+{ -+ if (hdrp->s.TrTypeCntx.s.LastTrappedTrans) /* EOP */ -+ { -+ switch (hdrp->s.IProcTrapStatus.Status & E3_IPS_EopType) -+ { -+ case EOP_BADACK: -+ BumpStat (dev, EopBadAcks); -+ break; -+ case EOP_ERROR_RESET: -+ BumpStat (dev, EopResets); -+ break; -+ } -+ } -+ else if (hdrp->s.TrTypeCntx.s.StatusRegValid) -+ { -+ /* -+ * Errors are tested in order of badness. i.e. badlength will prevent a BadCrc and so on... -+ */ -+ if (hdrp->s.IProcTrapStatus.s.BadLength) -+ BumpStat (dev, InputterBadLength); -+ else if ((hdrp->s.IProcTrapStatus.Status & CRC_MASK) == CRC_STATUS_BAD) -+ BumpStat (dev, InputterCRCBad); -+ else if ((hdrp->s.IProcTrapStatus.Status & CRC_MASK) == CRC_STATUS_ERROR) -+ BumpStat (dev, InputterCRCErrors); -+ else if ((hdrp->s.IProcTrapStatus.Status & CRC_MASK) == CRC_STATUS_DISCARD) -+ BumpStat (dev, InputterCRCDiscards); -+ } -+} -+ -+char * -+IProcTrapString (E3_IprocTrapHeader_BE *hdrp, E3_IprocTrapData_BE *datap) -+{ -+ static char buffer[256]; -+ static char typeString[256]; -+ static char statusString[256]; -+ char *ptr; -+ E3_Addr Addr = hdrp->s.TrAddr; -+ E3_uint32 Type = hdrp->s.TrTypeCntx.s.Type; -+ E3_uint32 Context = hdrp->s.TrTypeCntx.s.Context; -+ E3_uint32 StatusValid = hdrp->s.TrTypeCntx.s.StatusRegValid; -+ -+ if (hdrp->s.TrTypeCntx.s.LastTrappedTrans) -+ { -+ switch (hdrp->s.IProcTrapStatus.Status & E3_IPS_EopType) -+ { -+ case EOP_GOOD: sprintf (typeString, "EOP GOOD"); break; -+ case EOP_BADACK: sprintf (typeString, "EOP BADACK"); break; -+ case EOP_ERROR_RESET: sprintf (typeString, "EOP ERROR RESET"); break; -+ default: sprintf (typeString, "EOP - bad status"); break; -+ } -+ sprintf (buffer, "%15s Cntx=%08x", typeString, Context); -+ } -+ else -+ { -+ if (Type & TR_WRITEBLOCK_BIT) -+ { -+ switch ((Type >> TR_TYPE_SHIFT) & TR_TYPE_MASK) -+ { -+ case TR_TYPE_BYTE: ptr = "Byte"; break; -+ case TR_TYPE_SHORT: ptr = "Short"; break; -+ case TR_TYPE_WORD: ptr = "Word"; break; -+ case TR_TYPE_DWORD: ptr = "Double"; break; -+ default: ptr = "Unknown"; break; -+ } -+ -+ sprintf (typeString, "WriteBlock Type=%s Size=%2d", ptr, Type & TR_PARTSIZE_MASK); -+ } -+ else -+ { -+ switch (Type & TR_OPCODE_TYPE_MASK) -+ { -+ case TR_SETEVENT & TR_OPCODE_TYPE_MASK: sprintf (typeString, "Setevent"); break; -+ case TR_REMOTEDMA & TR_OPCODE_TYPE_MASK: sprintf (typeString, "Remote DMA"); break; -+ case TR_LOCKQUEUE & TR_OPCODE_TYPE_MASK: sprintf (typeString, "Lock Queue"); break; -+ case TR_UNLOCKQUEUE & TR_OPCODE_TYPE_MASK: sprintf (typeString, "Unlock Queue"); break; -+ case TR_SENDDISCARD & TR_OPCODE_TYPE_MASK: sprintf (typeString, "Send Discard"); break; -+ case TR_DMAIDENTIFY & TR_OPCODE_TYPE_MASK: sprintf (typeString, "DMA Identify"); break; -+ case TR_THREADIDENTIFY & TR_OPCODE_TYPE_MASK: sprintf (typeString, "Thread Identify"); break; -+ case TR_GTE & TR_OPCODE_TYPE_MASK: sprintf (typeString, "GTE"); break; -+ case TR_LT & TR_OPCODE_TYPE_MASK: sprintf (typeString, "LT"); break; -+ case TR_EQ & TR_OPCODE_TYPE_MASK: sprintf (typeString, "EQ"); break; -+ case TR_NEQ & TR_OPCODE_TYPE_MASK: sprintf (typeString, "NEQ"); break; -+ case TR_WRITEWORD & TR_OPCODE_TYPE_MASK: sprintf (typeString, "Write Word"); break; -+ case TR_WRITEDOUBLEWORD & TR_OPCODE_TYPE_MASK: sprintf (typeString, "Write Double"); break; -+ case TR_ATOMICADDWORD & TR_OPCODE_TYPE_MASK: sprintf (typeString, "Atomic Add"); break; -+ case TR_TESTANDWRITE & TR_OPCODE_TYPE_MASK: sprintf (typeString, "Test and Write"); break; -+ default: sprintf (typeString, "Type=%d", Type & TR_OPCODE_TYPE_MASK); break; -+ } -+ } -+ sprintf (buffer, "%15s Addr=%08x Cntx=%08x", typeString, Addr, Context); -+ /*(Type & TR_SENDACK) ? " Sendack" : "", */ -+ /*(Type & TR_LAST_TRANS) ? " LastTrans" : "", */ -+ /*(Type & TR_WAIT_FOR_EOP) ? " WaitForEop" : ""); */ -+ } -+ -+ if (StatusValid) -+ { -+ sprintf (statusString, " Type=%s %x", MiToName (hdrp->s.IProcTrapStatus.s.TrapType), hdrp->s.IProcTrapStatus.Status); -+ strcat (buffer, statusString); -+ -+ if (hdrp->s.IProcTrapStatus.s.BadLength) -+ strcat (buffer, " BadLength"); -+ switch (hdrp->s.IProcTrapStatus.Status & CRC_MASK) -+ { -+ case CRC_STATUS_DISCARD: -+ strcat (buffer, " CRC Discard"); -+ break; -+ case CRC_STATUS_ERROR: -+ strcat (buffer, " CRC Error"); -+ break; -+ -+ case CRC_STATUS_BAD: -+ strcat (buffer, " CRC Bad"); -+ break; -+ } -+ } -+ -+ return (buffer); -+} -+ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/elan3/Makefile linux-2.6.9/drivers/net/qsnet/elan3/Makefile ---- clean/drivers/net/qsnet/elan3/Makefile 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan3/Makefile 2005-10-10 17:47:30.000000000 -0400 -@@ -0,0 +1,15 @@ -+# -+# Makefile for Quadrics QsNet -+# -+# Copyright (c) 2002-2004 Quadrics Ltd -+# -+# File: drivers/net/qsnet/elan3/Makefile -+# -+ -+ -+# -+ -+obj-$(CONFIG_ELAN3) += elan3.o -+elan3-objs := context.o cproc.o dproc.o elandebug.o elandev_generic.o elansyscall.o eventcookie.o iproc.o sdram.o minames.o network_error.o route_table.o tproc.o tprocinsts.o routecheck.o virtual_process.o elan3ops.o context_linux.o elandev_linux.o procfs_linux.o tproc_linux.o elan3mmu_generic.o elan3mmu_linux.o -+ -+EXTRA_CFLAGS += -DDEBUG -DDEBUG_PRINTF -DDEBUG_ASSERT -diff -urN clean/drivers/net/qsnet/elan3/Makefile.conf linux-2.6.9/drivers/net/qsnet/elan3/Makefile.conf ---- clean/drivers/net/qsnet/elan3/Makefile.conf 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan3/Makefile.conf 2005-09-07 10:39:38.000000000 -0400 -@@ -0,0 +1,10 @@ -+# Flags for generating QsNet Linux Kernel Makefiles -+MODNAME = elan3.o -+MODULENAME = elan3 -+KOBJFILES = context.o cproc.o dproc.o elandebug.o elandev_generic.o elansyscall.o eventcookie.o iproc.o sdram.o minames.o network_error.o route_table.o tproc.o tprocinsts.o routecheck.o virtual_process.o elan3ops.o context_linux.o elandev_linux.o procfs_linux.o tproc_linux.o elan3mmu_generic.o elan3mmu_linux.o -+EXPORT_KOBJS = elandev_linux.o procfs_linux.o -+CONFIG_NAME = CONFIG_ELAN3 -+SGALFC = -+# EXTRALINES START -+ -+# EXTRALINES END -diff -urN clean/drivers/net/qsnet/elan3/minames.c linux-2.6.9/drivers/net/qsnet/elan3/minames.c ---- clean/drivers/net/qsnet/elan3/minames.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan3/minames.c 2003-06-07 11:57:49.000000000 -0400 -@@ -0,0 +1,38 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: minames.c,v 1.12 2003/06/07 15:57:49 david Exp $" -+/* $Source: /cvs/master/quadrics/elan3mod/elan3/os/minames.c,v $*/ -+ -+#include -+#include -+ -+caddr_t -+MiToName (int mi) -+{ -+ static char space[32]; -+ static struct { -+ int mi; -+ char *name; -+ } info[] = { -+#include -+ }; -+ register int i; -+ -+ -+ for (i = 0; i < sizeof(info)/sizeof(info[0]); i++) -+ if (info[i].mi == mi) -+ return (info[i].name); -+ sprintf (space, "MI %x", mi); -+ return (space); -+} -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/elan3/network_error.c linux-2.6.9/drivers/net/qsnet/elan3/network_error.c ---- clean/drivers/net/qsnet/elan3/network_error.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan3/network_error.c 2004-10-28 07:51:00.000000000 -0400 -@@ -0,0 +1,777 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: network_error.c,v 1.33 2004/10/28 11:51:00 david Exp $" -+/* $Source: /cvs/master/quadrics/elan3mod/elan3/os/network_error.c,v $*/ -+ -+#include -+#include -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#ifdef DIGITAL_UNIX -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+#include -+ -+typedef xdrproc_t kxdrproc_t; -+#endif -+ -+#ifdef LINUX -+#include -+#include -+#include -+#include -+ -+#include -+#define SYS_NMLN __NEW_UTS_LEN -+#endif -+ -+#include -+ -+spinlock_t ResolveRequestLock; -+kcondvar_t ResolveRequestWait; -+ -+NETERR_RESOLVER *ResolveRequestHead; -+NETERR_RESOLVER **ResolveRequestTailp = &ResolveRequestHead; -+int ResolveRequestCount; -+int ResolveRequestThreads; -+int ResolveRequestMaxThreads = 4; -+int ResolveRequestTimeout = 60; -+ -+typedef struct neterr_server -+{ -+ struct neterr_server *Next; -+ struct neterr_server *Prev; -+ unsigned ElanId; -+ -+ char *Name; -+ int RefCount; -+ struct sockaddr_in Addr; -+} NETERR_SERVER; -+ -+#define NETERR_HASH_ENTRIES 64 -+#define NETERR_HASH(elanid) (((unsigned) elanid) % NETERR_HASH_ENTRIES) -+NETERR_SERVER *NeterrServerHash[NETERR_HASH_ENTRIES]; -+kmutex_t NeterrServerLock; -+ -+static NETERR_SERVER *FindNeterrServer (int elanId); -+static void DereferenceNeterrServer (NETERR_SERVER *server); -+static int CallNeterrServer (NETERR_SERVER *server, NETERR_MSG *msg); -+ -+void -+InitialiseNetworkErrorResolver () -+{ -+ spin_lock_init (&ResolveRequestLock); -+ kcondvar_init (&ResolveRequestWait); -+ -+ ResolveRequestHead = NULL; -+ ResolveRequestTailp = &ResolveRequestHead; -+ -+ kmutex_init (&NeterrServerLock); -+} -+ -+void -+FinaliseNetworkErrorResolver () -+{ -+ spin_lock_destroy (&ResolveRequestLock); -+ kcondvar_destroy (&ResolveRequestWait); -+ -+ kmutex_destroy (&NeterrServerLock); -+} -+ -+static NETERR_RESOLVER * -+AllocateNetworkErrorResolver (void) -+{ -+ NETERR_RESOLVER *rvp; -+ -+ KMEM_ZALLOC (rvp, NETERR_RESOLVER *, sizeof (NETERR_RESOLVER), TRUE); -+ spin_lock_init (&rvp->Lock); -+ -+ return (rvp); -+} -+ -+void -+FreeNetworkErrorResolver (NETERR_RESOLVER *rvp) -+{ -+ spin_lock_destroy (&rvp->Lock); -+ KMEM_FREE (rvp, sizeof (NETERR_RESOLVER)); -+} -+ -+static void -+elan3_neterr_resolver (void) -+{ -+ NETERR_RESOLVER *rvp; -+ NETERR_SERVER *server; -+ int status; -+ unsigned long flags; -+ -+ kernel_thread_init("elan3_neterr_resolver"); -+ spin_lock (&ResolveRequestLock); -+ -+ while ((rvp = ResolveRequestHead) != NULL) -+ { -+ if ((ResolveRequestHead = rvp->Next) == NULL) -+ ResolveRequestTailp = &ResolveRequestHead; -+ -+ spin_unlock (&ResolveRequestLock); -+ -+ PRINTF1 (DBG_DEVICE, DBG_NETERR, "elan3_neterr_resolver: rvp = %p\n", rvp); -+ PRINTF1 (DBG_DEVICE, DBG_NETERR, " Rail %d\n", rvp->Message.Rail); -+ PRINTF1 (DBG_DEVICE, DBG_NETERR, " SrcCapability %s\n", CapabilityString (&rvp->Message.SrcCapability)); -+ PRINTF1 (DBG_DEVICE, DBG_NETERR, " DstCapability %s\n", CapabilityString (&rvp->Message.DstCapability)); -+ PRINTF1 (DBG_DEVICE, DBG_NETERR, " CookieAddr %08x\n", rvp->Message.CookieAddr); -+ PRINTF1 (DBG_DEVICE, DBG_NETERR, " CookieVProc %08x\n", rvp->Message.CookieVProc); -+ PRINTF1 (DBG_DEVICE, DBG_NETERR, " NextCookie %08x\n", rvp->Message.NextCookie); -+ PRINTF1 (DBG_DEVICE, DBG_NETERR, " WaitForEop %08x\n", rvp->Message.WaitForEop); -+ -+ if ((server = FindNeterrServer (rvp->Location.loc_node)) == NULL) -+ status = ECONNREFUSED; -+ else if (ResolveRequestTimeout && ((int)(lbolt - rvp->Timestamp)) > (ResolveRequestTimeout*HZ)) -+ { -+ printk ("elan_neterr: rpc to '%s' timedout - context %d killed\n", server->Name, rvp->Message.SrcCapability.cap_mycontext); -+ status = ECONNABORTED; -+ } -+ else -+ { -+ status = CallNeterrServer (server, &rvp->Message); -+ -+ DereferenceNeterrServer (server); -+ } -+ -+ if ((status == EINTR || status == ETIMEDOUT) && rvp->Ctxt != NULL) -+ { -+ PRINTF1 (DBG_DEVICE, DBG_NETERR, "elan3_neterr_resolver: retry rvp=%p\n", rvp); -+ spin_lock (&ResolveRequestLock); -+ rvp->Next = NULL; -+ *ResolveRequestTailp = rvp; -+ ResolveRequestTailp = &rvp->Next; -+ } -+ else -+ { -+ rvp->Status = status; -+ -+ spin_lock (&rvp->Lock); -+ -+ if (rvp->Ctxt != NULL) -+ { -+ PRINTF2 (rvp->Ctxt, DBG_NETERR, "elan3_neterr_resolver: completing rvp %p for ctxt %p\n", rvp, rvp->Ctxt); -+ spin_lock_irqsave (&rvp->Ctxt->Device->IntrLock, flags); -+ -+ rvp->Completed = TRUE; -+ -+ kcondvar_wakeupall (&rvp->Ctxt->Wait, &rvp->Ctxt->Device->IntrLock); -+ -+ /* -+ * drop the locks out of order since the rvp can get freeed -+ * as soon as we drop the IntrLock - so cannot reference the -+ * rvp after this. -+ */ -+ -+ spin_unlock (&rvp->Lock); -+ spin_unlock_irqrestore (&rvp->Ctxt->Device->IntrLock, flags); -+ } -+ else -+ { -+ PRINTF2 (DBG_DEVICE, DBG_NETERR, "elan3_neterr_resolver: completing rvp %p for deceased ctxt %p\n", rvp, rvp->Ctxt); -+ spin_unlock (&rvp->Lock); -+ FreeNetworkErrorResolver (rvp); -+ } -+ -+ spin_lock (&ResolveRequestLock); -+ ResolveRequestCount--; -+ } -+ } -+ -+ ResolveRequestThreads--; -+ -+ spin_unlock (&ResolveRequestLock); -+ kernel_thread_exit(); -+} -+ -+int -+QueueNetworkErrorResolver (ELAN3_CTXT *ctxt, INPUT_TRAP *trap, NETERR_RESOLVER **rvpp) -+{ -+ int isdma = trap->DmaIdentifyTransaction != NULL; -+ E3_IprocTrapHeader_BE *hdrp = isdma ? trap->DmaIdentifyTransaction : trap->ThreadIdentifyTransaction; -+ E3_uint32 process = isdma ? (hdrp->s.TrAddr & 0xFFFF) : (hdrp->s.TrData0 & 0xFFFF); -+ NETERR_RESOLVER *rvp; -+ -+ PRINTF2 (ctxt, DBG_NETERR, "QueueNetworkErrorResolver: process = %d %s\n", process, isdma ? "(dma)" : "(thread)"); -+ -+ if ((rvp = AllocateNetworkErrorResolver()) == NULL) -+ { -+ PRINTF0 (ctxt, DBG_NETERR, "QueueNetworkErrorResolver: cannot allocate resolver\n"); -+ return (ENOMEM); -+ } -+ -+ rvp->Message.Rail = ctxt->Device->Devinfo.dev_rail; -+ -+ krwlock_read (&ctxt->VpLock); -+ rvp->Location = ProcessToLocation (ctxt, NULL, process, &rvp->Message.SrcCapability); -+ krwlock_done (&ctxt->VpLock); -+ -+ if (rvp->Location.loc_node == ELAN3_INVALID_NODE) -+ { -+ PRINTF0 (ctxt, DBG_NETERR, "QueueNetworkErrorResolver: invalid elan id\n"); -+ -+ FreeNetworkErrorResolver (rvp); -+ return (EINVAL); -+ } -+ -+ rvp->Message.DstCapability = ctxt->Capability; -+ rvp->Message.DstProcess = elan3_process (ctxt); -+ rvp->Message.WaitForEop = (trap->WaitForEopTransaction != NULL); -+ -+ if (isdma) -+ { -+ rvp->Message.CookieAddr = 0; -+ rvp->Message.CookieVProc = hdrp->s.TrAddr; -+ rvp->Message.NextCookie = 0; -+ } -+ else -+ { -+ rvp->Message.CookieAddr = hdrp->s.TrAddr; -+ rvp->Message.CookieVProc = hdrp->s.TrData0; -+ rvp->Message.NextCookie = hdrp->s.TrData1; -+ } -+ -+ rvp->Completed = FALSE; -+ rvp->Ctxt = ctxt; -+ rvp->Timestamp = lbolt; -+ -+ spin_lock (&ResolveRequestLock); -+ -+ rvp->Next = NULL; -+ *ResolveRequestTailp = rvp; -+ ResolveRequestTailp = &rvp->Next; -+ ResolveRequestCount++; -+ -+ kcondvar_wakeupone (&ResolveRequestWait, &ResolveRequestLock); -+ -+ if (ResolveRequestCount < ResolveRequestThreads || ResolveRequestThreads >= ResolveRequestMaxThreads) -+ spin_unlock (&ResolveRequestLock); -+ else -+ { -+ ResolveRequestThreads++; -+ -+ spin_unlock (&ResolveRequestLock); -+ if (kernel_thread_create (elan3_neterr_resolver, NULL) == NULL) -+ { -+ spin_lock (&ResolveRequestLock); -+ ResolveRequestThreads--; -+ spin_unlock (&ResolveRequestLock); -+ -+ if (ResolveRequestThreads == 0) -+ { -+ PRINTF0 (ctxt, DBG_NETERR, "QueueNetworkErrorResolver: cannot thread pool\n"); -+ -+ FreeNetworkErrorResolver (rvp); -+ return (ENOMEM); -+ } -+ } -+ } -+ -+ *rvpp = rvp; -+ return (ESUCCESS); -+} -+ -+void -+CancelNetworkErrorResolver (NETERR_RESOLVER *rvp) -+{ -+ spin_lock (&rvp->Lock); -+ -+ PRINTF2 (rvp->Ctxt, DBG_NETERR, "CancelNetworkErrorResolver: rvp=%p %s\n", rvp, rvp->Completed ? "Completed" : "Pending"); -+ -+ if (rvp->Completed) -+ { -+ spin_unlock (&rvp->Lock); -+ FreeNetworkErrorResolver (rvp); -+ } -+ else -+ { -+ rvp->Ctxt = NULL; -+ spin_unlock (&rvp->Lock); -+ } -+} -+ -+static NETERR_FIXUP * -+AllocateNetworkErrorFixup (void) -+{ -+ NETERR_FIXUP *nef; -+ -+ KMEM_ZALLOC (nef, NETERR_FIXUP *, sizeof (NETERR_FIXUP), TRUE); -+ -+ if (nef == (NETERR_FIXUP *) NULL) -+ return (NULL); -+ -+ kcondvar_init (&nef->Wait); -+ -+ return (nef); -+} -+ -+static void -+FreeNetworkErrorFixup (NETERR_FIXUP *nef) -+{ -+ kcondvar_destroy (&nef->Wait); -+ KMEM_FREE (nef, sizeof (NETERR_FIXUP)); -+} -+ -+int -+ExecuteNetworkErrorFixup (NETERR_MSG *msg) -+{ -+ ELAN3_DEV *dev; -+ ELAN3_CTXT *ctxt; -+ NETERR_FIXUP *nef; -+ NETERR_FIXUP **predp; -+ int rc; -+ unsigned long flags; -+ -+ PRINTF1 (DBG_DEVICE, DBG_NETERR, "ExecuteNetworkErrorFixup: msg = %p\n", msg); -+ PRINTF1 (DBG_DEVICE, DBG_NETERR, " Rail %d\n", msg->Rail); -+ PRINTF1 (DBG_DEVICE, DBG_NETERR, " SrcCapability %s\n", CapabilityString (&msg->SrcCapability)); -+ PRINTF1 (DBG_DEVICE, DBG_NETERR, " DstCapability %s\n", CapabilityString (&msg->DstCapability)); -+ PRINTF1 (DBG_DEVICE, DBG_NETERR, " CookieAddr %08x\n", msg->CookieAddr); -+ PRINTF1 (DBG_DEVICE, DBG_NETERR, " CookieVProc %08x\n", msg->CookieVProc); -+ PRINTF1 (DBG_DEVICE, DBG_NETERR, " NextCookie %08x\n", msg->NextCookie); -+ PRINTF1 (DBG_DEVICE, DBG_NETERR, " WaitForEop %08x\n", msg->WaitForEop); -+ -+ if ((dev = elan3_device (msg->Rail)) == NULL) -+ return (ESRCH); -+ -+ if ((nef = AllocateNetworkErrorFixup()) == NULL) -+ return (ENOMEM); -+ -+ if (nef == (NETERR_FIXUP *) NULL) -+ return (ENOMEM); -+ -+ bcopy (msg, &nef->Message, sizeof (NETERR_MSG)); -+ -+ spin_lock_irqsave (&dev->IntrLock, flags); -+ -+ ctxt = ELAN3_DEV_CTX_TABLE(dev, msg->SrcCapability.cap_mycontext); -+ -+ if (ctxt == NULL) -+ rc = ESRCH; -+ else if (!ELAN_CAP_MATCH (&msg->SrcCapability, &ctxt->Capability)) -+ rc = EPERM; -+ else -+ { -+ if (ctxt->Status & CTXT_NO_LWPS) -+ rc = EAGAIN; -+ else -+ { -+ for (predp = &ctxt->NetworkErrorFixups; *predp != NULL; predp = &(*predp)->Next) -+ ; -+ nef->Next = NULL; -+ *predp = nef; -+ -+ kcondvar_wakeupone (&ctxt->Wait, &dev->IntrLock); -+ -+ while (! nef->Completed) -+ kcondvar_wait (&nef->Wait, &dev->IntrLock, &flags); -+ -+ rc = nef->Status; -+ } -+ } -+ -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+ -+ FreeNetworkErrorFixup (nef); -+ -+ return (rc); -+} -+ -+void -+CompleteNetworkErrorFixup (ELAN3_CTXT *ctxt, NETERR_FIXUP *nef, int status) -+{ -+ ELAN3_DEV *dev = ctxt->Device; -+ unsigned long flags; -+ -+ PRINTF2 (ctxt, DBG_NETERR, "CompleteNetworkErrorFixup: %p %d\n", nef, status); -+ -+ spin_lock_irqsave (&dev->IntrLock, flags); -+ -+ nef->Status = status; -+ nef->Completed = TRUE; -+ kcondvar_wakeupone (&nef->Wait, &dev->IntrLock); -+ -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+} -+ -+ -+static NETERR_SERVER * -+NewNeterrServer (int elanId, struct sockaddr_in *addr, char *name) -+{ -+ NETERR_SERVER *server; -+ -+ KMEM_ZALLOC (server, NETERR_SERVER *, sizeof (NETERR_SERVER), TRUE); -+ KMEM_ALLOC (server->Name, char *, strlen (name)+1, TRUE); -+ -+ bcopy (addr, &server->Addr, sizeof (struct sockaddr_in)); -+ bcopy (name, server->Name, strlen (name)+1); -+ -+ server->ElanId = elanId; -+ server->RefCount = 1; -+ -+ return (server); -+} -+ -+static void -+DeleteNeterrServer (NETERR_SERVER *server) -+{ -+ KMEM_FREE (server->Name, strlen(server->Name)+1); -+ KMEM_FREE (server, sizeof (NETERR_SERVER)); -+} -+ -+static NETERR_SERVER * -+FindNeterrServer (int elanId) -+{ -+ NETERR_SERVER *server; -+ -+ kmutex_lock (&NeterrServerLock); -+ -+ for (server = NeterrServerHash[NETERR_HASH(elanId)]; server != NULL; server = server->Next) -+ if (server->ElanId == elanId) -+ break; -+ -+ if (server != NULL) -+ server->RefCount++; -+ kmutex_unlock (&NeterrServerLock); -+ -+ return (server); -+} -+ -+static void -+DereferenceNeterrServer (NETERR_SERVER *server) -+{ -+ kmutex_lock (&NeterrServerLock); -+ if ((--server->RefCount) == 0) -+ DeleteNeterrServer (server); -+ kmutex_unlock (&NeterrServerLock); -+} -+ -+int -+AddNeterrServer (int elanId, struct sockaddr_in *addr, char *name) -+{ -+ NETERR_SERVER *server; -+ NETERR_SERVER *old; -+ int hashval = NETERR_HASH(elanId); -+ -+ server = NewNeterrServer (elanId, addr, name); -+ -+ if (server == NULL) -+ return (ENOMEM); -+ -+ kmutex_lock (&NeterrServerLock); -+ for (old = NeterrServerHash[hashval]; old != NULL; old = old->Next) -+ if (old->ElanId == elanId) -+ break; -+ -+ /* remove "old" server from hash table */ -+ if (old != NULL) -+ { -+ if (old->Prev) -+ old->Prev->Next = old->Next; -+ else -+ NeterrServerHash[hashval] = old->Next; -+ if (old->Next) -+ old->Next->Prev = old->Prev; -+ } -+ -+ /* insert "new" server into hash table */ -+ if ((server->Next = NeterrServerHash[hashval]) != NULL) -+ server->Next->Prev = server; -+ server->Prev = NULL; -+ NeterrServerHash[hashval] = server; -+ -+ kmutex_unlock (&NeterrServerLock); -+ -+ if (old != NULL) -+ DereferenceNeterrServer (old); -+ -+ return (ESUCCESS); -+} -+ -+int -+AddNeterrServerSyscall (int elanId, void *addrp, void *namep, char *unused) -+{ -+ struct sockaddr_in addr; -+ char *name; -+ int error; -+ int nob; -+ -+ /* Sanity check the supplied elanId argument */ -+ if (elanId < 0) -+ return ( set_errno(EINVAL) ); -+ -+ KMEM_ALLOC (name, caddr_t, SYS_NMLN, TRUE); -+ -+ if (copyin ((caddr_t) addrp, (caddr_t) &addr, sizeof (addr)) || -+ copyinstr ((caddr_t) namep, name, SYS_NMLN, &nob)) -+ { -+ error = EFAULT; -+ } -+ else -+ { -+ PRINTF2 (DBG_DEVICE, DBG_NETERR, "AddNeterrServer: '%s' at elanid %d\n", name, elanId); -+ -+ error = AddNeterrServer (elanId, &addr, name); -+ } -+ KMEM_FREE (name, SYS_NMLN); -+ -+ return (error ? set_errno(error) : ESUCCESS); -+} -+ -+ -+#if defined(DIGITAL_UNIX) -+static int -+CallNeterrServer (NETERR_SERVER *server, NETERR_MSG *msg) -+{ -+ cred_t *cr = crget(); -+ struct rpc_err rpcerr; -+ extern cred_t *kcred; -+ struct timeval wait; -+ enum clnt_stat rc; -+ int status; -+ CLIENT *clnt; -+ int error; -+ -+ PRINTF4 (DBG_DEVICE, DBG_NETRPC, "CallNeterrServer(%s) - family=%d port=%d addr=%08x\n", server->Name, -+ server->Addr.sin_family, server->Addr.sin_port, server->Addr.sin_addr.s_addr); -+ -+ if ((clnt = clntkudp_create (&server->Addr, (struct sockaddr_in *)0, NETERR_PROGRAM, NETERR_VERSION, 1, cr)) == NULL) -+ { -+ PRINTF1 (DBG_DEVICE, DBG_NETRPC, "CallNeterrServer(%s): clntkudp_create error\n", server->Name); -+ -+ return (ENOMEM); -+ } -+ -+ wait.tv_sec = NETERR_RPC_TIMEOUT; -+ wait.tv_usec = 0; -+ -+ PRINTF2 (DBG_DEVICE, DBG_NETRPC, "CallNeterrServer(%s): CLNT_CALL timeout = %d\n", server->Name, NETERR_RPC_TIMEOUT); -+ -+ rc = CLNT_CALL(clnt, NETERR_FIXUP_RPC, xdr_neterr_msg, (void *)msg, xdr_int, (void *) &status, wait); -+ -+ PRINTF3 (DBG_DEVICE, DBG_NETRPC, "CallNeterrServer(%s): CLNT_CALL -> %d (%s)\n", server->Name, rc, clnt_sperrno(rc));; -+ -+ switch (rc) -+ { -+ case RPC_SUCCESS: -+ break; -+ -+ case RPC_INTR: -+ status = EINTR; -+ break; -+ -+ case RPC_TIMEDOUT: -+ status = ETIMEDOUT; -+ break; -+ -+ default: -+ printf ("CallNeterrServer(%s): %s\n", server->Name, clnt_sperrno(status)); -+ status = ENOENT; -+ break; -+ } -+ -+ CLNT_DESTROY(clnt); -+ -+ crfree(cr); -+ -+ ASSERT(rc == RPC_SUCCESS || status != 0); -+ -+ PRINTF2 (DBG_DEVICE, DBG_NETRPC, "CallNeterrServer(%s): status=%d\n", server->Name, status); -+ -+ return (status); -+} -+#endif -+ -+#if defined(LINUX) -+ -+#define xdrsize(type) ((sizeof(type) + 3) >> 2) -+ -+static int -+xdr_error(struct rpc_rqst *req, u32 *p, void *dummy) -+{ -+ return -EIO; -+} -+ -+static int -+xdr_decode_int(struct rpc_rqst *req, u32 *p, int *res) -+{ -+ *res = ntohl(*p++); -+ return 0; -+} -+ -+#define XDR_capability_sz ((12 + BT_BITOUL(ELAN3_MAX_VPS)) * sizeof (u32)) -+ -+static int -+xdr_encode_capability(u32 *p, ELAN_CAPABILITY *cap) -+{ -+ u32 *pp = p; -+ -+ /* basic xdr unit is u32 - for opaque types we must round up to that */ -+ memcpy(p, &cap->cap_userkey, sizeof(cap->cap_userkey)); -+ p += xdrsize(cap->cap_userkey); -+ -+ *p++ = htonl(cap->cap_version); -+ ((u16 *) (p++))[1] = htons(cap->cap_type); -+ *p++ = htonl(cap->cap_lowcontext); -+ *p++ = htonl(cap->cap_highcontext); -+ *p++ = htonl(cap->cap_mycontext); -+ *p++ = htonl(cap->cap_lownode); -+ *p++ = htonl(cap->cap_highnode); -+ *p++ = htonl(cap->cap_railmask); -+ -+ memcpy(p, &cap->cap_bitmap[0], sizeof(cap->cap_bitmap)); -+ p += xdrsize(cap->cap_bitmap); -+ -+ ASSERT (((unsigned long) p - (unsigned long) pp) == XDR_capability_sz); -+ -+ return (p - pp); -+} -+ -+ -+#define XDR_neterr_sz (((1 + 5) * sizeof (u32)) + (2*XDR_capability_sz)) -+ -+static int -+xdr_encode_neterr_msg(struct rpc_rqst *req, u32 *p, NETERR_MSG *msg) -+{ -+ u32 *pp = p; -+ -+ *p++ = htonl(msg->Rail); -+ -+ p += xdr_encode_capability(p, &msg->SrcCapability); -+ p += xdr_encode_capability(p, &msg->DstCapability); -+ -+ *p++ = htonl(msg->DstProcess); -+ *p++ = htonl(msg->CookieAddr); -+ *p++ = htonl(msg->CookieVProc); -+ *p++ = htonl(msg->NextCookie); -+ *p++ = htonl(msg->WaitForEop); -+ -+ ASSERT (((unsigned long) p - (unsigned long) pp) == XDR_neterr_sz); -+ -+ req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); -+ -+ return 0; -+} -+ -+static struct rpc_procinfo neterr_procedures[2] = -+{ -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) -+# define RPC_ID_NULL "neterr_null" -+# define RPC_ID_FIXUP_RPC "neterr_fixup_rpc" -+#else -+# define RPC_ID_NULL NETERR_NULL_RPC -+# define RPC_ID_FIXUP_RPC NETERR_FIXUP_RPC -+#endif -+ { -+ RPC_ID_NULL, /* procedure name or number*/ -+ (kxdrproc_t) xdr_error, /* xdr encode fun */ -+ (kxdrproc_t) xdr_error, /* xdr decode fun */ -+ 0, /* req buffer size */ -+ 0, /* call count */ -+ }, -+ { -+ RPC_ID_FIXUP_RPC, -+ (kxdrproc_t) xdr_encode_neterr_msg, -+ (kxdrproc_t) xdr_decode_int, -+ XDR_neterr_sz, -+ 0, -+ }, -+}; -+ -+static struct rpc_version neterr_version1 = -+{ -+ 1, /* version */ -+ 2, /* number of procedures */ -+ neterr_procedures /* procedures */ -+}; -+ -+static struct rpc_version *neterr_version[] = -+{ -+ NULL, -+ &neterr_version1, -+}; -+ -+static struct rpc_stat neterr_stats; -+ -+static struct rpc_program neterr_program = -+{ -+ NETERR_SERVICE, -+ NETERR_PROGRAM, -+ sizeof(neterr_version)/sizeof(neterr_version[0]), -+ neterr_version, -+ &neterr_stats, -+}; -+ -+static int -+CallNeterrServer (NETERR_SERVER *server, NETERR_MSG *msg) -+{ -+ struct rpc_xprt *xprt; -+ struct rpc_clnt *clnt; -+ struct rpc_timeout to; -+ int rc, status; -+ -+ PRINTF (DBG_DEVICE, DBG_NETRPC, "CallNeterrServer(%s)\n", server->Name); -+ -+ xprt_set_timeout(&to, 1, NETERR_RPC_TIMEOUT * HZ); -+ -+ if ((xprt = xprt_create_proto(IPPROTO_UDP, &server->Addr, &to)) == NULL) -+ { -+ PRINTF (DBG_DEVICE, DBG_NETRPC, "CallNeterrServer(%s) xprt_create_proto failed\n", server->Name); -+ return EFAIL; -+ } -+ -+ if ((clnt = rpc_create_client(xprt, server->Name, &neterr_program, NETERR_VERSION, RPC_AUTH_NULL)) == NULL) -+ { -+ PRINTF (DBG_DEVICE, DBG_NETRPC, "CallNeterrServer(%s) rpc_create_client failed\n", server->Name); -+ xprt_destroy (xprt); -+ -+ return EFAIL; -+ } -+ -+ clnt->cl_softrtry = 1; -+ clnt->cl_chatty = 0; -+ clnt->cl_oneshot = 1; -+ clnt->cl_intr = 0; -+ -+ if ((rc = rpc_call(clnt, NETERR_FIXUP_RPC, msg, &status, 0)) < 0) -+ { -+ /* RPC error has occured - determine whether we should retry */ -+ -+ status = ETIMEDOUT; -+ } -+ -+ PRINTF (DBG_DEVICE, DBG_NETRPC, "CallNeterrServer(%s): -> %d\n", server->Name, status); -+ -+ return (status); -+} -+ -+#endif /* defined(LINUX) */ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/elan3/procfs_linux.c linux-2.6.9/drivers/net/qsnet/elan3/procfs_linux.c ---- clean/drivers/net/qsnet/elan3/procfs_linux.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan3/procfs_linux.c 2005-09-07 10:35:03.000000000 -0400 -@@ -0,0 +1,195 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: procfs_linux.c,v 1.21.8.2 2005/09/07 14:35:03 mike Exp $" -+/* $Source: /cvs/master/quadrics/elan3mod/elan3/os/procfs_linux.c,v $*/ -+ -+#include -+ -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+ -+#include -+ -+struct proc_dir_entry *elan3_procfs_root; -+struct proc_dir_entry *elan3_config_root; -+ -+static int -+proc_read_position (char *page, char **start, off_t off, -+ int count, int *eof, void *data) -+{ -+ ELAN3_DEV *dev = (ELAN3_DEV *) data; -+ int len; -+ -+ if (dev->Position.pos_mode == ELAN_POS_UNKNOWN) -+ len = sprintf (page, "\n"); -+ else -+ len = sprintf (page, -+ "NodeId %d\n" -+ "NumLevels %d\n" -+ "NumNodes %d\n", -+ dev->Position.pos_nodeid, dev->Position.pos_levels, dev->Position.pos_nodes); -+ -+ return (qsnet_proc_calc_metrics (page, start, off, count, eof, len)); -+} -+ -+static int -+proc_write_position (struct file *file, const char *buf, unsigned long count, void *data) -+{ -+ ELAN3_DEV *dev = (ELAN3_DEV *) data; -+ unsigned nodeid = ELAN3_INVALID_NODE; -+ unsigned numnodes = 0; -+ char *page, *p; -+ int res; -+ -+ if (count == 0) -+ return (0); -+ -+ if (count >= PAGE_SIZE) -+ return (-EINVAL); -+ -+ if ((page = (char *) __get_free_page (GFP_KERNEL)) == NULL) -+ return (-ENOMEM); -+ -+ MOD_INC_USE_COUNT; -+ -+ if (copy_from_user (page, buf, count)) -+ res = -EFAULT; -+ else -+ { -+ page[count] = '\0'; -+ -+ if (page[count-1] == '\n') -+ page[count-1] = '\0'; -+ -+ if (! strcmp (page, "")) -+ { -+ dev->Position.pos_mode = ELAN_POS_UNKNOWN; -+ dev->Position.pos_nodeid = ELAN3_INVALID_NODE; -+ dev->Position.pos_nodes = 0; -+ dev->Position.pos_levels = 0; -+ } -+ else -+ { -+ for (p = page; *p; ) -+ { -+ while (isspace (*p)) -+ p++; -+ -+ if (! strncmp (p, "NodeId=", strlen("NodeId="))) -+ nodeid = simple_strtoul (p + strlen ("NodeId="), NULL, 0); -+ if (! strncmp (p, "NumNodes=", strlen ("NumNodes="))) -+ numnodes = simple_strtoul (p + strlen ("NumNodes="), NULL, 0); -+ -+ while (*p && !isspace(*p)) -+ p++; -+ } -+ -+ if (ComputePosition (&dev->Position, nodeid, numnodes, dev->Devinfo.dev_num_down_links_value) != 0) -+ printk ("elan%d: invalid values for NodeId=%d NumNodes=%d\n", dev->Instance, nodeid, numnodes); -+ else -+ printk ("elan%d: setting NodeId=%d NumNodes=%d NumLevels=%d\n", dev->Instance, dev->Position.pos_nodeid, -+ dev->Position.pos_nodes, dev->Position.pos_levels); -+ } -+ } -+ -+ MOD_DEC_USE_COUNT; -+ free_page ((unsigned long) page); -+ -+ return (count); -+} -+ -+ -+void -+elan3_procfs_device_init (ELAN3_DEV *dev) -+{ -+ struct proc_dir_entry *dir, *p; -+ char name[NAME_MAX]; -+ -+ sprintf (name, "device%d", dev->Instance); -+ dir = dev->Osdep.procdir = proc_mkdir (name, elan3_procfs_root); -+ -+ if ((p = create_proc_entry ("position", 0, dir)) != NULL) -+ { -+ p->read_proc = proc_read_position; -+ p->write_proc = proc_write_position; -+ p->data = dev; -+ p->owner = THIS_MODULE; -+ } -+ -+} -+ -+void -+elan3_procfs_device_fini (ELAN3_DEV *dev) -+{ -+ struct proc_dir_entry *dir = dev->Osdep.procdir; -+ char name[NAME_MAX]; -+ -+ remove_proc_entry ("position", dir); -+ -+ sprintf (name, "device%d", dev->Instance); -+ remove_proc_entry (name, elan3_procfs_root); -+} -+ -+void -+elan3_procfs_init() -+{ -+ extern int eventint_punt_loops; -+ extern int ResolveRequestTimeout; -+ -+ elan3_procfs_root = proc_mkdir("elan3", qsnet_procfs_root); -+ -+ elan3_config_root = proc_mkdir("config", elan3_procfs_root); -+ -+ qsnet_proc_register_hex (elan3_config_root, "elan3_debug", &elan3_debug, 0); -+ qsnet_proc_register_hex (elan3_config_root, "elan3_debug_console", &elan3_debug_console, 0); -+ qsnet_proc_register_hex (elan3_config_root, "elan3_debug_buffer", &elan3_debug_buffer, 0); -+ qsnet_proc_register_hex (elan3_config_root, "elan3mmu_debug", &elan3mmu_debug, 0); -+ qsnet_proc_register_int (elan3_config_root, "eventint_punt_loops", &eventint_punt_loops, 0); -+ qsnet_proc_register_int (elan3_config_root, "neterr_timeout", &ResolveRequestTimeout, 0); -+ -+#if defined(__ia64__) -+ { -+ extern int enable_sdram_writecombining; -+ qsnet_proc_register_int (elan3_config_root, "enable_sdram_writecombining", &enable_sdram_writecombining, 0); -+ } -+#endif -+} -+ -+void -+elan3_procfs_fini() -+{ -+#if defined(__ia64__) -+ remove_proc_entry ("enable_sdram_writecombining", elan3_config_root); -+#endif -+ remove_proc_entry ("neterr_timeout", elan3_config_root); -+ remove_proc_entry ("eventint_punt_loops", elan3_config_root); -+ remove_proc_entry ("elan3mmu_debug", elan3_config_root); -+ remove_proc_entry ("elan3_debug_buffer", elan3_config_root); -+ remove_proc_entry ("elan3_debug_console", elan3_config_root); -+ remove_proc_entry ("elan3_debug", elan3_config_root); -+ -+ remove_proc_entry ("config", elan3_procfs_root); -+ remove_proc_entry ("version", elan3_procfs_root); -+ -+ remove_proc_entry ("elan3", qsnet_procfs_root); -+} -+ -+EXPORT_SYMBOL(elan3_procfs_root); -+EXPORT_SYMBOL(elan3_config_root); -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/elan3/quadrics_version.h linux-2.6.9/drivers/net/qsnet/elan3/quadrics_version.h ---- clean/drivers/net/qsnet/elan3/quadrics_version.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan3/quadrics_version.h 2005-09-07 10:39:49.000000000 -0400 -@@ -0,0 +1 @@ -+#define QUADRICS_VERSION "5.11.3qsnet" -diff -urN clean/drivers/net/qsnet/elan3/routecheck.c linux-2.6.9/drivers/net/qsnet/elan3/routecheck.c ---- clean/drivers/net/qsnet/elan3/routecheck.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan3/routecheck.c 2003-09-24 09:57:25.000000000 -0400 -@@ -0,0 +1,313 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+/* ------------------------------------------------------------- */ -+ -+#include -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+/* ---------------------------------------------------------------------- */ -+typedef struct elan3_net_location { -+ int netid; -+ int plane; -+ int level; -+} ELAN3_NET_LOCATION; -+/* ---------------------------------------------------------------------- */ -+#define FLIT_LINK_ARRAY_MAX (ELAN3_MAX_LEVELS*2) -+/* ---------------------------------------------------------------------- */ -+int -+elan3_route_follow_link( ELAN3_CTXT *ctxt, ELAN3_NET_LOCATION *loc, int link) -+{ -+ ELAN_POSITION *pos = &ctxt->Position; -+ -+ if ((link<0) || (link>7)) -+ { -+ PRINTF1 (ctxt, DBG_VP, "elan3_route_follow_link: link (%d) out of range \n",link); -+ return (ELAN3_ROUTE_INVALID); -+ } -+ -+ /* going up or down ? */ -+ if ( link >= pos->pos_arity[loc->level] ) -+ { -+ /* Up */ -+ if (loc->level >= pos->pos_levels) -+ loc->plane = 0; -+ else -+ { -+ if ((loc->level == 1) && (pos->pos_arity[0] == 8)) /* oddness in some machines ie 512 */ -+ loc->plane = (16 * ( loc->plane / 8 )) + (4 * ( loc->plane % 4)) -+ +(link - pos->pos_arity[loc->level]); -+ else -+ loc->plane = (loc->plane * (8 - pos->pos_arity[loc->level])) -+ +(link - pos->pos_arity[loc->level]); -+ } -+ loc->level--; -+ if ( loc->level < 0 ) -+ { -+ PRINTF0 (ctxt, DBG_VP, "elan3_route_follow_link: link goes off the top\n"); -+ return (ELAN3_ROUTE_INVALID_LEVEL); -+ } -+ loc->netid = loc->netid / pos->pos_arity[loc->level]; -+ } -+ else -+ { -+ /* going down */ -+ if ((loc->level == 0) && (pos->pos_arity[0] == 8)) /* oddness in some machines ie 512 */ -+ loc->netid = link % 2; -+ else -+ loc->netid =(loc->netid * pos->pos_arity[loc->level])+link; -+ -+ loc->level++; -+ if (loc->level > pos->pos_levels) -+ { -+ PRINTF0 (ctxt, DBG_VP, "elan3_route_follow_link: link goes off the bottom\n"); -+ return (ELAN3_ROUTE_INVALID_LEVEL); -+ } -+ -+ if ( loc->level >= (pos->pos_levels-1)) -+ loc->plane = 0; -+ else -+ if ((loc->level == 1) && (pos->pos_arity[0] == 8)) /* oddness in some machines ie 512 */ -+ loc->plane = (((loc->plane)>>2)*2) - ( ((loc->plane)>>2) & 3 ) + ((link<2)?0:4); /* ((p/4) % 4) */ -+ else -+ loc->plane = loc->plane/(8-pos->pos_arity[loc->level]); -+ } -+ return (ELAN3_ROUTE_SUCCESS); -+} -+/* ---------------------------------------------------------------------- */ -+int /* assumes they are connected, really only used for finding the MyLink */ -+elan3_route_get_mylink (ELAN_POSITION *pos, ELAN3_NET_LOCATION *locA, ELAN3_NET_LOCATION *locB) -+{ -+ /* whats the My Link for locA to LocB */ -+ if ( locA->level > locB->level ) -+ return locB->plane - (locA->plane * (8 - pos->pos_arity[locA->level])) + pos->pos_arity[locA->level]; -+ -+ return locB->netid - (locA->netid * pos->pos_arity[locA->level]); -+} -+/* ---------------------------------------------------------------------- */ -+#define FIRST_GET_HIGH_PRI(FLIT) (FLIT & FIRST_HIGH_PRI) -+#define FIRST_GET_AGE(FLIT) ((FLIT & FIRST_AGE(15))>>11) -+#define FIRST_GET_TIMEOUT(FLIT) ((FLIT & FIRST_TIMEOUT(3))>>9) -+#define FIRST_GET_NEXT(FLIT) ((FLIT & FIRST_PACKED(3))>>7) -+#define FIRST_GET_ROUTE(FLIT) (FLIT & 0x7f) -+#define FIRST_GET_BCAST(FLIT) (FLIT & 0x40) -+#define FIRST_GET_IS_INVALID(FLIT) ((FLIT & 0x78) == 0x08) -+#define FIRST_GET_TYPE(FLIT) ((FLIT & 0x30)>>4) -+#define PRF_GET_ROUTE(FLIT,N) ((FLIT >> (N*4)) & 0x0F) -+#define PRF_GET_IS_MYLINK(ROUTE) (ROUTE == PACKED_MYLINK) -+#define PRF_GET_IS_NORMAL(ROUTE) (ROUTE & 0x8) -+#define PRF_GET_NORMAL_LINK(ROUTE) (ROUTE & 0x7) -+#define PRF_MOVE_ON(INDEX,NEXT) do { if (NEXT==3) {NEXT=0;INDEX++;} else {NEXT++; }} while (0); -+/* ---------------------------------------------------------------------- */ -+int /* turn level needed or -1 if not possible */ -+elan3_route_get_min_turn_level( ELAN_POSITION *pos, int nodeId) -+{ -+ int l,range = 1; -+ -+ for(l=pos->pos_levels-1;l>=0;l--) -+ { -+ range = range * pos->pos_arity[l]; -+ -+ if ( ((pos->pos_nodeid - (pos->pos_nodeid % range)) <= nodeId ) -+ && (nodeId <= (pos->pos_nodeid - (pos->pos_nodeid % range)+range -1))) -+ return l; -+ } -+ return -1; -+} -+/* ---------------------------------------------------------------------- */ -+int -+elan3_route_check(ELAN3_CTXT *ctxt, E3_uint16 *flits, int destNodeId) -+{ -+ ELAN3_NET_LOCATION lastLoc,currLoc; -+ int err; -+ int turnLevel; -+ int goingDown; -+ int lnk,index,next,val; -+ ELAN_POSITION *pos = &ctxt->Position; -+ -+ /* is the dest possible */ -+ if ( (destNodeId <0 ) || (destNodeId >= pos->pos_nodes)) -+ return (ELAN3_ROUTE_PROC_RANGE); -+ -+ /* -+ * walk the route, -+ * - to see if we get there -+ * - checking we dont turn around -+ */ -+ currLoc.netid = pos->pos_nodeid; /* the elan */ -+ currLoc.plane = 0; -+ currLoc.level = pos->pos_levels; -+ -+ turnLevel = currLoc.level; /* track the how far the route goes in */ -+ goingDown = 0; /* once set we cant go up again ie only one change of direction */ -+ -+ /* move onto the network from the elan */ -+ if ((err=elan3_route_follow_link(ctxt,&currLoc,4)) != ELAN3_ROUTE_SUCCESS) -+ { -+ PRINTF0 (ctxt, DBG_VP, "elan3_route_check: initial elan3_route_follow_link failed\n"); -+ return err; -+ } -+ /* do the first part of flit */ -+ switch ( FIRST_GET_TYPE(flits[0]) ) -+ { -+ case 0 /* sent */ : { lnk = (flits[0] & 0x7); break; } -+ case PACKED_MYLINK : { lnk = pos->pos_nodeid % pos->pos_arity[pos->pos_levels-1]; break; } -+ case PACKED_ADAPTIVE : { lnk = 7; /* all routes are the same just check one */ break; } -+ default : -+ PRINTF1 (ctxt, DBG_VP, "elan3_route_check: unexpected first flit (%d)\n",flits[0]); -+ return (ELAN3_ROUTE_INVALID); -+ } -+ -+ /* move along this link and check new location */ -+ memcpy(&lastLoc,&currLoc,sizeof(ELAN3_NET_LOCATION)); /* keep track of last loc */ -+ if ((err=elan3_route_follow_link(ctxt,&currLoc,lnk)) != ELAN3_ROUTE_SUCCESS ) -+ { -+ PRINTF0 (ctxt, DBG_VP, "elan3_route_check: elan3_route_follow_link failed\n"); -+ return err; -+ } -+ if ((currLoc.level > pos->pos_levels) || (currLoc.level < 0 )) -+ { -+ PRINTF0 (ctxt, DBG_VP, "elan3_route_check: route leaves machine\n"); -+ return (ELAN3_ROUTE_INVALID_LEVEL); -+ } -+ if ( lastLoc.level < currLoc.level ) -+ { -+ turnLevel = lastLoc.level; -+ goingDown = 1; -+ } -+ else -+ { -+ if (turnLevel > currLoc.level) -+ turnLevel = currLoc.level; -+ if (goingDown) -+ { -+ PRINTF0 (ctxt, DBG_VP, "elan3_route_check: route ocilated\n"); -+ return (ELAN3_ROUTE_OCILATES); -+ } -+ } -+ -+ /* loop on doing the remaining flits */ -+ index = 1; -+ next = FIRST_GET_NEXT(flits[0]); -+ val = PRF_GET_ROUTE(flits[index],next); -+ while(val) -+ { -+ if (PRF_GET_IS_NORMAL(val) ) -+ lnk = PRF_GET_NORMAL_LINK(val); -+ else -+ { -+ switch ( val ) -+ { -+ case PACKED_MYLINK : -+ { -+ lnk = elan3_route_get_mylink(pos, &currLoc,&lastLoc); -+ break; -+ } -+ default : -+ PRINTF1 (ctxt, DBG_VP, "elan3_route_check: unexpected packed flit (%d)\n",val); -+ return (ELAN3_ROUTE_INVALID); -+ } -+ } -+ -+ /* move along this link and check new location */ -+ memcpy(&lastLoc,&currLoc,sizeof(ELAN3_NET_LOCATION)); /* keep track of last loc */ -+ if ((err=elan3_route_follow_link(ctxt,&currLoc,lnk)) != ELAN3_ROUTE_SUCCESS) -+ return err; -+ -+ if ((currLoc.level > pos->pos_levels ) || ( currLoc.level < 0 )) -+ { -+ PRINTF0 (ctxt, DBG_VP, "elan3_route_check: route leaves machine\n"); -+ return (ELAN3_ROUTE_INVALID_LEVEL); -+ } -+ -+ if ( lastLoc.level < currLoc.level ) -+ goingDown = 1; -+ else -+ { -+ if (turnLevel > currLoc.level) -+ turnLevel = currLoc.level; -+ if (goingDown) -+ { -+ PRINTF0 (ctxt, DBG_VP, "elan3_route_check: route ocilated\n"); -+ return (ELAN3_ROUTE_OCILATES); -+ } -+ } -+ -+ /* move to next part of flit */ -+ PRF_MOVE_ON(index,next); -+ if ( index >= MAX_FLITS) -+ { -+ PRINTF0 (ctxt, DBG_VP, "elan3_route_check: route too long\n"); -+ return (ELAN3_ROUTE_TOO_LONG); -+ } -+ /* extract the new value */ -+ val = PRF_GET_ROUTE(flits[index],next); -+ } -+ -+ /* have we got to where we want ? */ -+ if ((currLoc.level != pos->pos_levels) || (currLoc.netid != destNodeId)) -+ { -+ PRINTF2 (ctxt, DBG_VP, "elan3_route_check: goes to %d instead of %d\n",currLoc.netid , destNodeId ); -+ return (ELAN3_ROUTE_WRONG_DEST); -+ } -+ -+ /* -+ * there is the case of src == dest -+ * getTurnLevel returns pos->pos_levels, and turnLevel is (pos->pos_levels -1) -+ * then we assume they really want to go onto the network. -+ * otherwise we check that the turn at the appriate level -+ */ -+ if ( (pos->pos_nodeid != destNodeId) || ( turnLevel != (pos->pos_levels -1)) ) -+ { -+ int lev; -+ if ((lev = elan3_route_get_min_turn_level(pos,destNodeId)) == -1) -+ { -+ PRINTF0 (ctxt, DBG_VP, "elan3_route_check: cant calculate turn level\n"); -+ return (ELAN3_ROUTE_INVALID); /* not sure this can happen here as checks above should protect me */ -+ } -+ if (turnLevel != lev) -+ { -+ PRINTF2 (ctxt, DBG_VP, "elan3_route_check: turn level should be %d but is %d \n", lev, turnLevel); -+ return (ELAN3_ROUTE_TURN_LEVEL); -+ } -+ } -+ return (ELAN3_ROUTE_SUCCESS); -+} -+/* ---------------------------------------------------------------------- */ -+int -+elan3_route_broadcast_check(ELAN3_CTXT *ctxt , E3_uint16 *flits, int lowNode, int highNode ) -+{ -+ E3_uint16 flitsTmp[MAX_FLITS]; -+ int nflits,i; -+ -+ nflits = GenerateRoute (&ctxt->Position, flitsTmp, lowNode, highNode, DEFAULT_ROUTE_TIMEOUT, DEFAULT_ROUTE_PRIORITY); -+ -+ for(i=0;i -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+static sdramaddr_t -+AllocateLargeRoute (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl, int ctxnum, E3_uint64 *smallRoute) -+{ -+ int bit = -1; -+ ELAN3_ROUTES *rent; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&tbl->Lock, flags); -+ -+ for (rent = tbl->LargeRoutes; rent; rent = rent->Next) -+ { -+ if ((bit = bt_freebit (rent->Bitmap, NROUTES_PER_BLOCK)) != -1) -+ break; -+ } -+ -+ if (bit == -1) /* No spare entries in large routes */ -+ { /* so allocate a new page */ -+ PRINTF0 (DBG_DEVICE, DBG_VP, "AllocateLargeRoute: allocate route entries\n"); -+ -+ spin_unlock_irqrestore (&tbl->Lock, flags); -+ -+ KMEM_ZALLOC(rent, ELAN3_ROUTES *, sizeof (ELAN3_ROUTES), TRUE); -+ -+ if (rent == (ELAN3_ROUTES *) NULL) -+ return ((sdramaddr_t) 0); -+ -+ rent->Routes = elan3_sdram_alloc (dev, PAGESIZE); -+ if (rent->Routes == (sdramaddr_t) 0) -+ { -+ KMEM_FREE (rent, sizeof (ELAN3_ROUTES)); -+ return ((sdramaddr_t) 0); -+ } -+ -+ spin_lock_irqsave (&tbl->Lock, flags); -+ -+ /* Add to list of large routes */ -+ rent->Next = tbl->LargeRoutes; -+ tbl->LargeRoutes = rent; -+ -+ /* and use entry 0 */ -+ bit = 0; -+ } -+ -+ /* Set the bit in the bitmap to mark this route as allocated */ -+ BT_SET (rent->Bitmap, bit); -+ -+ /* And generate the small route pointer and the pointer to the large routes */ -+ (*smallRoute) = BIG_ROUTE_PTR(rent->Routes + (bit*NBYTES_PER_LARGE_ROUTE), ctxnum); -+ -+ PRINTF4 (DBG_DEVICE, DBG_VP, "AllocateLargeRoute: rent %p using entry %d at %lx with route pointer %llx\n", -+ rent, bit, rent->Routes + (bit * NBYTES_PER_LARGE_ROUTE), (long long) (*smallRoute)); -+ -+ /* Invalidate the large route */ -+ elan3_sdram_zeroq_sdram (dev, rent->Routes + (bit * NBYTES_PER_LARGE_ROUTE), NBYTES_PER_LARGE_ROUTE); -+ -+ spin_unlock_irqrestore (&tbl->Lock, flags); -+ -+ return (rent->Routes + (bit * NBYTES_PER_LARGE_ROUTE)); -+} -+ -+static void -+FreeLargeRoute (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl, E3_uint64 smallRoute) -+{ -+ E3_Addr addr = (E3_Addr) (smallRoute & ((1ULL << ROUTE_CTXT_SHIFT)-1)); -+ ELAN3_ROUTES *rent; -+ -+ PRINTF1 (DBG_DEVICE, DBG_VP, "FreeLargeRoute: free route %llx\n", (long long) smallRoute); -+ -+ ASSERT (SPINLOCK_HELD (&tbl->Lock)); -+ -+ for (rent = tbl->LargeRoutes; rent; rent = rent->Next) -+ { -+ if (rent->Routes <= addr && (rent->Routes + ROUTE_BLOCK_SIZE) > addr) -+ { -+ int indx = (addr - rent->Routes)/NBYTES_PER_LARGE_ROUTE; -+ -+ PRINTF2 (DBG_DEVICE, DBG_VP, "FreeLargeRoute: rent=%p indx=%d\n", rent, indx); -+ -+ BT_CLEAR(rent->Bitmap, indx); -+ return; -+ } -+ } -+ -+ panic ("elan: FreeLargeRoute - route not found in large route tables"); -+} -+ -+static void -+FreeLargeRoutes (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl) -+{ -+ ELAN3_ROUTES *rent; -+ -+ while ((rent = tbl->LargeRoutes) != NULL) -+ { -+ PRINTF1 (DBG_DEVICE, DBG_VP, "FreeLargeRoutes: free rent %p\n", rent); -+ -+ tbl->LargeRoutes = rent->Next; -+ -+ elan3_sdram_free (dev, rent->Routes, PAGESIZE); -+ -+ KMEM_FREE (rent, sizeof(ELAN3_ROUTES)); -+ } -+} -+ -+int -+GetRoute (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl, int process, E3_uint16 *flits) -+{ -+ E3_uint64 routeValue; -+ sdramaddr_t largeRouteOff; -+ -+ if (process < 0 || process >= tbl->Size) -+ return (EINVAL); -+ -+ routeValue = elan3_sdram_readq (dev, tbl->Table + process * NBYTES_PER_SMALL_ROUTE); -+ -+ if (routeValue & ROUTE_PTR) -+ { -+ largeRouteOff = (routeValue & ROUTE_PTR_MASK); -+ -+ routeValue = elan3_sdram_readq (dev, largeRouteOff + 0); -+ flits[0] = routeValue & 0xffff; -+ flits[1] = (routeValue >> 16) & 0xffff; -+ flits[2] = (routeValue >> 32) & 0xffff; -+ flits[3] = (routeValue >> 48) & 0xffff; -+ -+ routeValue = elan3_sdram_readq (dev, largeRouteOff + 8); -+ flits[4] = routeValue & 0xffff; -+ flits[5] = (routeValue >> 16) & 0xffff; -+ flits[6] = (routeValue >> 32) & 0xffff; -+ flits[6] = (routeValue >> 48) & 0xffff; -+ } -+ else -+ { -+ flits[0] = routeValue & 0xffff; -+ flits[1] = (routeValue >> 16) & 0xffff; -+ flits[2] = (routeValue >> 32) & 0xffff; -+ } -+ -+ return (ESUCCESS); -+} -+ -+ELAN3_ROUTE_TABLE * -+AllocateRouteTable (ELAN3_DEV *dev, int size) -+{ -+ ELAN3_ROUTE_TABLE *tbl; -+ -+ KMEM_ZALLOC (tbl, ELAN3_ROUTE_TABLE *, sizeof (ELAN3_ROUTE_TABLE), TRUE); -+ -+ if (tbl == (ELAN3_ROUTE_TABLE *) NULL) -+ return (NULL); -+ -+ tbl->Size = size; -+ tbl->Table = elan3_sdram_alloc (dev, size*NBYTES_PER_SMALL_ROUTE); -+ -+ if (tbl->Table == 0) -+ { -+ KMEM_FREE (tbl, sizeof (ELAN3_ROUTE_TABLE)); -+ return (NULL); -+ } -+ spin_lock_init (&tbl->Lock); -+ -+ /* zero the route table */ -+ elan3_sdram_zeroq_sdram (dev, tbl->Table, size*NBYTES_PER_SMALL_ROUTE); -+ -+ return (tbl); -+} -+ -+void -+FreeRouteTable (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl) -+{ -+ elan3_sdram_free (dev, tbl->Table, tbl->Size*NBYTES_PER_SMALL_ROUTE); -+ -+ FreeLargeRoutes (dev, tbl); -+ -+ spin_lock_destroy (&tbl->Lock); -+ -+ KMEM_FREE (tbl, sizeof (ELAN3_ROUTE_TABLE)); -+} -+ -+int -+LoadRoute (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl, int process, int ctxnum, int nflits, E3_uint16 *flits) -+{ -+ E3_uint64 routeValue; -+ E3_uint64 largeRouteValue; -+ sdramaddr_t largeRouteOff; -+ unsigned long flags; -+ -+ if (process < 0 || process >= tbl->Size) -+ return (EINVAL); -+ -+ PRINTF3 (DBG_DEVICE, DBG_VP, "LoadRoute: table %lx process %d ctxnum %x\n", tbl->Table ,process, ctxnum); -+ -+ if (nflits < 4) -+ { -+ spin_lock_irqsave (&tbl->Lock, flags); -+ -+ /* See if we're replacing a "large" route */ -+ routeValue = elan3_sdram_readq (dev, tbl->Table + process * NBYTES_PER_SMALL_ROUTE); -+ if (routeValue & ROUTE_PTR) -+ FreeLargeRoute (dev, tbl, routeValue); -+ -+ routeValue = SMALL_ROUTE(flits, ctxnum); -+ -+ if ( routeValue & ROUTE_PTR) -+ PRINTF0 (DBG_DEVICE, DBG_VP, "SHOULD BE A SMALL ROUTE !!!!!!!\n"); -+ -+ PRINTF2 (DBG_DEVICE, DBG_VP, "LoadRoute: loading small route %d %llx\n", process, (long long) routeValue); -+ elan3_sdram_writeq (dev, tbl->Table + process * NBYTES_PER_SMALL_ROUTE, routeValue); -+ } -+ else -+ { -+ E3_uint64 value0 = BIG_ROUTE0(flits); -+ E3_uint64 value1 = BIG_ROUTE1(flits); -+ -+ if ((largeRouteOff = AllocateLargeRoute (dev, tbl, ctxnum, &largeRouteValue)) == (sdramaddr_t) 0) -+ return (ENOMEM); -+ -+ spin_lock_irqsave (&tbl->Lock, flags); -+ -+ routeValue = elan3_sdram_readq (dev, tbl->Table + process * NBYTES_PER_SMALL_ROUTE); -+ -+ if ((routeValue & ROUTE_PTR) == 0) -+ elan3_sdram_writeq (dev, tbl->Table + process * NBYTES_PER_SMALL_ROUTE, largeRouteValue); -+ else -+ { -+ FreeLargeRoute (dev, tbl, largeRouteValue); -+ -+ largeRouteOff = (routeValue & ROUTE_PTR_MASK); -+ } -+ -+ PRINTF3 (DBG_DEVICE, DBG_VP, "LoadRoute: loading large route %d - %llx %llx\n", process, -+ (long long) value0, (long long) value1); -+ -+ elan3_sdram_writeq (dev, largeRouteOff + 0, value0); -+ elan3_sdram_writeq (dev, largeRouteOff + 8, value1); -+ } -+ -+ spin_unlock_irqrestore (&tbl->Lock, flags); -+ return (ESUCCESS); -+} -+void -+InvalidateRoute (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl, int process) -+{ -+ E3_uint64 routeValue; -+ unsigned long flags; -+ -+ if (process < 0 || process >= tbl->Size) -+ return; -+ -+ spin_lock_irqsave (&tbl->Lock, flags); -+ -+ /* unset ROUTE_VALID -+ * does not matter if its short or long, will check when we re-use it -+ */ -+ routeValue = elan3_sdram_readq (dev, tbl->Table + process * NBYTES_PER_SMALL_ROUTE); -+ elan3_sdram_writeq (dev, tbl->Table + process * NBYTES_PER_SMALL_ROUTE, (routeValue & (~ROUTE_VALID))); -+ -+ spin_unlock_irqrestore (&tbl->Lock, flags); -+} -+void -+ValidateRoute (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl, int process) -+{ -+ E3_uint64 routeValue; -+ unsigned long flags; -+ -+ if (process < 0 || process >= tbl->Size) -+ return; -+ -+ PRINTF2 (DBG_DEVICE, DBG_VP, "ValidateRoute: table %ld process %d \n", tbl->Table ,process); -+ -+ spin_lock_irqsave (&tbl->Lock, flags); -+ -+ /* set ROUTE_VALID -+ */ -+ routeValue = elan3_sdram_readq (dev, tbl->Table + process * NBYTES_PER_SMALL_ROUTE); -+ elan3_sdram_writeq (dev, tbl->Table + process * NBYTES_PER_SMALL_ROUTE, (routeValue | ROUTE_VALID)); -+ -+ spin_unlock_irqrestore (&tbl->Lock, flags); -+} -+void -+ClearRoute (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl, int process) -+{ -+ E3_uint64 routeValue; -+ unsigned long flags; -+ -+ if (process < 0 || process >= tbl->Size) -+ return; -+ -+ spin_lock_irqsave (&tbl->Lock, flags); -+ -+ PRINTF2 (DBG_DEVICE, DBG_VP, "ClearRoute: table %ld process %d \n", tbl->Table ,process); -+ -+ routeValue = elan3_sdram_readq (dev, tbl->Table + process * NBYTES_PER_SMALL_ROUTE); -+ -+ elan3_sdram_writeq (dev, tbl->Table + process * NBYTES_PER_SMALL_ROUTE, 0); -+ -+ if (routeValue & ROUTE_PTR) -+ FreeLargeRoute (dev, tbl, routeValue); -+ -+ spin_unlock_irqrestore (&tbl->Lock, flags); -+} -+ -+static int -+ElanIdEqual (ELAN_POSITION *pos, int level, int ida, int idb) -+{ -+ int l; -+ -+ for (l = pos->pos_levels-1; l >= level; l--) -+ { -+ ida /= pos->pos_arity[l]; -+ idb /= pos->pos_arity[l]; -+ } -+ -+ return (ida == idb); -+} -+ -+static int -+RouteDown (ELAN_POSITION *pos, int level, int elanid) -+{ -+ int l; -+ -+ for (l = (pos->pos_levels - 1); level < pos->pos_levels - 1; level++, l--) -+ { -+ if ( pos->pos_arity[l] ) -+ elanid /= pos->pos_arity[l]; -+ } -+ elanid %= pos->pos_arity[l]; -+ -+ return elanid; -+} -+ -+static int -+InitPackedAndFlits (u_char *packed, E3_uint16 *flits) -+{ -+ int rb = 0; -+ -+ bzero ((caddr_t) packed, MAX_PACKED+4); -+ bzero ((caddr_t) flits, MAX_FLITS * sizeof (E3_uint16)); -+ -+ /* Initialise 4 bytes of packed, so that the "padding" */ -+ /* NEVER terminates with 00, as this is recognised as */ -+ /* as CRC flit */ -+ packed[rb++] = 0xF; -+ packed[rb++] = 0xF; -+ packed[rb++] = 0xF; -+ packed[rb++] = 0xF; -+ -+ return (rb); -+} -+ -+static int -+PackThemRoutesUp (E3_uint16 *flits, u_char *packed, int rb, int timeout, int highPri) -+{ -+ int i, nflits; -+ -+ flits[0] |= FIRST_TIMEOUT(timeout); -+ if (highPri) -+ flits[0] |= FIRST_HIGH_PRI; -+ -+ /* round up the number of route bytes to flits */ -+ /* and subtract the 4 extra we've padded out with */ -+ nflits = (rb-1)/4; -+ -+ for (i = nflits; i > 0; i--) -+ { -+ flits[i] = (packed[rb-1] << 12 | -+ packed[rb-2] << 8 | -+ packed[rb-3] << 4 | -+ packed[rb-4] << 0); -+ rb -= 4; -+ } -+ -+ /* Now set the position of the first packed route */ -+ /* byte in the 2nd 16 bit flit, taking account of the */ -+ /* 4 byte padding */ -+ flits[0] |= FIRST_PACKED (4-rb); -+ -+ return (nflits+1); -+} -+ -+int -+GenerateRoute (ELAN_POSITION *pos, E3_uint16 *flits, int lowid, int highid, int timeout, int highPri) -+{ -+ int broadcast = (lowid != highid); -+ int rb = 0; -+ int first = 1; -+ int noRandom = 0; -+ int level; -+ u_char packed[MAX_PACKED+4]; -+ int numDownLinks; -+ -+ rb = InitPackedAndFlits (packed, flits); -+ -+ for (level = pos->pos_levels-1; /* Move up out of the elan */ -+ level > 0 && ! (ElanIdEqual (pos, level, pos->pos_nodeid, lowid) && -+ ElanIdEqual (pos, level, pos->pos_nodeid, highid)); level--) -+ { -+ noRandom |= pos->pos_random_disabled & (1 << (pos->pos_levels-1-level)); -+ } -+ -+ for (level = pos->pos_levels-1; /* Move up out of the elan */ -+ level > 0 && ! (ElanIdEqual (pos, level, pos->pos_nodeid, lowid) && -+ ElanIdEqual (pos, level, pos->pos_nodeid, highid)); level--) -+ { -+ numDownLinks = pos->pos_arity [level]; -+ if (first) -+ { -+ if (broadcast || noRandom) -+ flits[0] = FIRST_BCAST_TREE; -+ else -+ { -+ if (numDownLinks == 4) -+ flits[0] = FIRST_ADAPTIVE; -+ else -+ flits[0] = FIRST_ROUTE( numDownLinks + ( lowid % (8-numDownLinks) )); -+ } -+ first = 0; -+ } -+ else -+ { -+ if (broadcast || noRandom) -+ packed[rb++] = PACKED_BCAST_TREE; -+ else -+ { -+ if (numDownLinks == 4) -+ packed[rb++] = PACKED_ADAPTIVE; -+ else -+ packed[rb++] = PACKED_ROUTE( numDownLinks + ( lowid % (8-numDownLinks) )); -+ } -+ } -+ } -+ -+ while (level < pos->pos_levels) -+ { -+ int lowRoute = RouteDown (pos, level, lowid); -+ int highRoute = RouteDown (pos, level, highid); -+ -+ if (first) -+ { -+ if (broadcast) -+ flits[0] = FIRST_BCAST(highRoute, lowRoute); -+ else -+ flits[0] = FIRST_ROUTE(lowRoute); -+ -+ first = 0; -+ } -+ else -+ { -+ if (broadcast) -+ { -+ packed[rb++] = PACKED_BCAST0(highRoute, lowRoute); -+ packed[rb++] = PACKED_BCAST1(highRoute, lowRoute); -+ } -+ else -+ packed[rb++] = PACKED_ROUTE(lowRoute); -+ } -+ -+ level++; -+ } -+ -+#ifdef ELITE_REVA_SUPPORTED -+ if (broadcast && (pos->pos_levels == 3)) -+ { -+ packed[rb++] = PACKED_BCAST0(0, 0); -+ packed[rb++] = PACKED_BCAST1(0, 0); -+ } -+#endif -+ -+ return (PackThemRoutesUp (flits, packed, rb, timeout, highPri)); -+} -+ -+int -+GenerateCheckRoute (ELAN_POSITION *pos, E3_uint16 *flits, int level, int adaptive) -+{ -+ int notfirst = 0; -+ int l, rb; -+ u_char packed[MAX_PACKED+4]; -+ -+ rb = InitPackedAndFlits (packed, flits); -+ -+ for (l = pos->pos_levels-1; l > level; l--) -+ if (! notfirst++) -+ flits[0] = adaptive ? FIRST_ADAPTIVE : FIRST_BCAST_TREE; -+ else -+ packed[rb++] = adaptive ? PACKED_ADAPTIVE : PACKED_BCAST_TREE; -+ -+ if (! notfirst++ ) -+ flits[0] = FIRST_MYLINK; -+ else -+ packed[rb++] = PACKED_MYLINK; -+ -+ for (l++ /* consume mylink */; l < pos->pos_levels; l++) -+ if (! notfirst++) -+ flits[0] = FIRST_ROUTE (RouteDown (pos, l, pos->pos_nodeid)); -+ else -+ packed[rb++] = PACKED_ROUTE (RouteDown (pos, l, pos->pos_nodeid)); -+ -+ -+ return (PackThemRoutesUp (flits, packed, rb, DEFAULT_ROUTE_TIMEOUT, HIGH_ROUTE_PRIORITY)); -+} -+ -+ -+/* -+ * In this case "level" is the number of levels counted from the bottom. -+ */ -+int -+GenerateProbeRoute (E3_uint16 *flits, int nodeid, int level, int *linkup, int *linkdown, int adaptive ) -+{ -+ int first = 1; -+ int i, rb; -+ u_char packed[MAX_PACKED+4]; -+ -+ rb = InitPackedAndFlits (packed, flits); -+ -+ /* Generate "up" routes */ -+ for (i = 0; i < level; i++) -+ { -+ if (first) -+ flits[0] = linkup ? FIRST_ROUTE(linkup[i]) : adaptive ? FIRST_ADAPTIVE : FIRST_BCAST_TREE; -+ else -+ packed[rb++] = linkup ? PACKED_ROUTE(linkup[i]) : adaptive ? PACKED_ADAPTIVE : PACKED_BCAST_TREE; -+ first = 0; -+ } -+ -+ /* Generate a "to-me" route down */ -+ if (first) -+ flits[0] = FIRST_MYLINK; -+ else -+ packed[rb++] = PACKED_MYLINK; -+ -+ for (i = level-1; i >= 0; i--) -+ packed[rb++] = PACKED_ROUTE(linkdown[i]); -+ -+ return (PackThemRoutesUp (flits, packed, rb, DEFAULT_ROUTE_TIMEOUT, HIGH_ROUTE_PRIORITY)); -+} -+ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/elan3/sdram.c linux-2.6.9/drivers/net/qsnet/elan3/sdram.c ---- clean/drivers/net/qsnet/elan3/sdram.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan3/sdram.c 2003-09-24 09:57:25.000000000 -0400 -@@ -0,0 +1,807 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: sdram.c,v 1.17 2003/09/24 13:57:25 david Exp $" -+/* $Source: /cvs/master/quadrics/elan3mod/elan3/os/sdram.c,v $*/ -+ -+ -+#include -+ -+#include -+#include -+#include -+ -+/* sdram access functions */ -+#define sdram_off_to_bank(dev,off) (&dev->SdramBanks[(off) >> ELAN3_SDRAM_BANK_SHIFT]) -+#define sdram_off_to_offset(dev,off) ((off) & (ELAN3_SDRAM_BANK_SIZE-1)) -+#define sdram_off_to_bit(dev,indx,off) (sdram_off_to_offset(dev,off) >> (SDRAM_MIN_BLOCK_SHIFT+(indx))) -+ -+#define sdram_off_to_mapping(dev,off) (sdram_off_to_bank(dev,off)->Mapping + sdram_off_to_offset(dev,off)) -+ -+unsigned char -+elan3_sdram_readb (ELAN3_DEV *dev, sdramaddr_t off) -+{ -+ return (readb ((unsigned char *) sdram_off_to_mapping(dev, off))); -+} -+ -+unsigned short -+elan3_sdram_readw (ELAN3_DEV *dev, sdramaddr_t off) -+{ -+ return (readw ((unsigned short *) sdram_off_to_mapping(dev, off))); -+} -+ -+unsigned int -+elan3_sdram_readl (ELAN3_DEV *dev, sdramaddr_t off) -+{ -+ return (readl ((unsigned int *) sdram_off_to_mapping(dev, off))); -+} -+ -+unsigned long long -+elan3_sdram_readq (ELAN3_DEV *dev, sdramaddr_t off) -+{ -+ return (readq ((unsigned long long *) sdram_off_to_mapping(dev, off))); -+} -+ -+void -+elan3_sdram_writeb (ELAN3_DEV *dev, sdramaddr_t off, unsigned char val) -+{ -+ writeb (val, (unsigned char *) sdram_off_to_mapping(dev, off)); -+ wmb(); -+} -+ -+void -+elan3_sdram_writew (ELAN3_DEV *dev, sdramaddr_t off, unsigned short val) -+{ -+ writew (val, (unsigned short *) sdram_off_to_mapping(dev, off)); -+ wmb(); -+} -+ -+void -+elan3_sdram_writel (ELAN3_DEV *dev, sdramaddr_t off, unsigned int val) -+{ -+ writel (val, (unsigned int *) sdram_off_to_mapping(dev, off)); -+ wmb(); -+} -+ -+void -+elan3_sdram_writeq (ELAN3_DEV *dev, sdramaddr_t off, unsigned long long val) -+{ -+ writeq (val, (unsigned long long *) sdram_off_to_mapping(dev, off)); -+ wmb(); -+} -+ -+void -+elan3_sdram_copyb_from_sdram (ELAN3_DEV *dev, sdramaddr_t from, void *to, int nbytes) -+{ -+ bcopy ((void *)sdram_off_to_mapping(dev, from), to, nbytes); -+} -+ -+void -+elan3_sdram_copyw_from_sdram (ELAN3_DEV *dev, sdramaddr_t from, void *to, int nbytes) -+{ -+#ifdef __LITTLE_ENDIAN__ -+ bcopy ((void *)sdram_off_to_mapping(dev, from), to, nbytes); -+#else -+#error incorrect for big endian -+#endif -+} -+ -+void -+elan3_sdram_copyl_from_sdram (ELAN3_DEV *dev, sdramaddr_t from, void *to, int nbytes) -+{ -+#ifdef __LITTLE_ENDIAN__ -+ bcopy ((void *)sdram_off_to_mapping(dev, from), to, nbytes); -+#else -+#error incorrect for big endian -+#endif -+} -+ -+void -+elan3_sdram_copyq_from_sdram (ELAN3_DEV *dev, sdramaddr_t from, void *to, int nbytes) -+{ -+#ifdef __LITTLE_ENDIAN__ -+ bcopy ((void *)sdram_off_to_mapping(dev, from), to, nbytes); -+#else -+#error incorrect for big endian -+#endif -+} -+ -+#define E3_WRITEBUFFER_SIZE 16 -+#define E3_WRITEBUFFER_OFFSET(x) (((unsigned long) x) & (E3_WRITEBUFFER_SIZE-1)) -+#define E3_WRITEBUFFER_BASE(x) (((unsigned long) x) & ~((unsigned long) (E3_WRITEBUFFER_SIZE-1))) -+ -+void -+elan3_sdram_copyb_to_sdram (ELAN3_DEV *dev, void *from, sdramaddr_t to, int nbytes) -+{ -+ virtaddr_t dbase = (virtaddr_t) sdram_off_to_mapping (dev, to); -+ virtaddr_t dlim = (virtaddr_t) dbase + nbytes; -+ virtaddr_t slim = (virtaddr_t) from + nbytes; -+ unsigned nbase = E3_WRITEBUFFER_SIZE - E3_WRITEBUFFER_OFFSET (dbase); -+ unsigned ntop = E3_WRITEBUFFER_OFFSET (dlim - sizeof (uint8_t)) + sizeof (uint8_t); -+ int i; -+ -+ if (E3_WRITEBUFFER_BASE(dbase) == E3_WRITEBUFFER_BASE(dlim)) -+ { -+ for (i = 0; i < nbytes/sizeof(uint8_t); i++) -+ writeb (((uint8_t *) from)[i], &((uint8_t *) dbase)[i]); -+ wmb(); -+ } -+ else -+ { -+ if (ntop < E3_WRITEBUFFER_SIZE) -+ { -+ slim -= ntop; -+ dlim -= ntop; -+ -+ for (i = 0; i < ntop/sizeof(uint8_t); i++) -+ writeb (((uint8_t *) slim)[i], &((uint8_t *) dlim)[i]); -+ wmb(); -+ } -+ -+ while (dlim >= (dbase + E3_WRITEBUFFER_SIZE)) -+ { -+ dlim -= E3_WRITEBUFFER_SIZE; -+ slim -= E3_WRITEBUFFER_SIZE; -+ -+ for (i = 0; i < E3_WRITEBUFFER_SIZE/sizeof (uint8_t); i++) -+ writeb (((uint8_t *) slim)[i], &((uint8_t *) dlim)[i]); -+ wmb(); -+ } -+ -+ if (nbase < E3_WRITEBUFFER_SIZE) -+ { -+ for (i = 0; i < nbase/sizeof(uint8_t); i++) -+ writeb (((uint8_t *) from)[i], &((uint8_t *) dbase)[i]); -+ wmb(); -+ } -+ } -+} -+ -+void -+elan3_sdram_zerob_sdram (ELAN3_DEV *dev, sdramaddr_t to, int nbytes) -+{ -+ virtaddr_t dbase = (virtaddr_t) sdram_off_to_mapping (dev, to); -+ virtaddr_t dlim = (virtaddr_t) dbase + nbytes; -+ unsigned nbase = E3_WRITEBUFFER_SIZE - E3_WRITEBUFFER_OFFSET (dbase); -+ unsigned ntop = E3_WRITEBUFFER_OFFSET (dlim - sizeof (uint8_t)) + sizeof (uint8_t); -+ int i; -+ -+ if (E3_WRITEBUFFER_BASE(dbase) == E3_WRITEBUFFER_BASE(dlim)) -+ { -+ for (i = 0; i < nbytes/sizeof(uint8_t); i++) -+ writeb (0, &((uint8_t *) dbase)[i]); -+ wmb(); -+ } -+ else -+ { -+ if (ntop < E3_WRITEBUFFER_SIZE) -+ { -+ dlim -= ntop; -+ -+ for (i = 0; i < ntop/sizeof(uint8_t); i++) -+ writeb (0, &((uint8_t *) dlim)[i]); -+ wmb(); -+ } -+ -+ while (dlim >= (dbase + E3_WRITEBUFFER_SIZE)) -+ { -+ dlim -= E3_WRITEBUFFER_SIZE; -+ -+ writeq (0, &((uint64_t *) dlim)[0]); -+ writeq (0, &((uint64_t *) dlim)[1]); -+ -+ wmb(); -+ } -+ -+ if (nbase < E3_WRITEBUFFER_SIZE) -+ { -+ for (i = 0; i < nbase/sizeof(uint8_t); i++) -+ writeb (0, &((uint8_t *) dbase)[i]); -+ wmb(); -+ } -+ } -+} -+ -+void -+elan3_sdram_copyw_to_sdram (ELAN3_DEV *dev, void *from, sdramaddr_t to, int nbytes) -+{ -+ virtaddr_t dbase = (virtaddr_t) sdram_off_to_mapping (dev, to); -+ virtaddr_t dlim = (virtaddr_t) dbase + nbytes; -+ virtaddr_t slim = (virtaddr_t) from + nbytes; -+ unsigned nbase = E3_WRITEBUFFER_SIZE - E3_WRITEBUFFER_OFFSET (dbase); -+ unsigned ntop = E3_WRITEBUFFER_OFFSET (dlim - sizeof (uint16_t)) + sizeof (uint16_t); -+ int i; -+ -+ if (E3_WRITEBUFFER_BASE(dbase) == E3_WRITEBUFFER_BASE(dlim)) -+ { -+ for (i = 0; i < nbytes/sizeof(uint16_t); i++) -+ writew (((uint16_t *) from)[i], &((uint16_t *) dbase)[i]); -+ wmb(); -+ } -+ else -+ { -+ if (ntop < E3_WRITEBUFFER_SIZE) -+ { -+ slim -= ntop; -+ dlim -= ntop; -+ -+ for (i = 0; i < ntop/sizeof(uint16_t); i++) -+ writew (((uint16_t *) slim)[i], &((uint16_t *) dlim)[i]); -+ wmb(); -+ } -+ -+ while (dlim >= (dbase + E3_WRITEBUFFER_SIZE)) -+ { -+ dlim -= E3_WRITEBUFFER_SIZE; -+ slim -= E3_WRITEBUFFER_SIZE; -+ -+ writew (((uint16_t *) slim)[0], &((uint16_t *) dlim)[0]); -+ writew (((uint16_t *) slim)[1], &((uint16_t *) dlim)[1]); -+ writew (((uint16_t *) slim)[2], &((uint16_t *) dlim)[2]); -+ writew (((uint16_t *) slim)[3], &((uint16_t *) dlim)[3]); -+ writew (((uint16_t *) slim)[4], &((uint16_t *) dlim)[4]); -+ writew (((uint16_t *) slim)[5], &((uint16_t *) dlim)[5]); -+ writew (((uint16_t *) slim)[6], &((uint16_t *) dlim)[6]); -+ writew (((uint16_t *) slim)[7], &((uint16_t *) dlim)[7]); -+ wmb(); -+ } -+ -+ if (nbase < E3_WRITEBUFFER_SIZE) -+ { -+ for (i = 0; i < nbase/sizeof(uint16_t); i++) -+ writew (((uint16_t *) from)[i], &((uint16_t *) dbase)[i]); -+ wmb(); -+ } -+ } -+} -+ -+void -+elan3_sdram_zerow_sdram (ELAN3_DEV *dev, sdramaddr_t to, int nbytes) -+{ -+ virtaddr_t dbase = (virtaddr_t) sdram_off_to_mapping (dev, to); -+ virtaddr_t dlim = (virtaddr_t) dbase + nbytes; -+ unsigned nbase = E3_WRITEBUFFER_SIZE - E3_WRITEBUFFER_OFFSET (dbase); -+ unsigned ntop = E3_WRITEBUFFER_OFFSET (dlim - sizeof (uint16_t)) + sizeof (uint16_t); -+ int i; -+ -+ if (E3_WRITEBUFFER_BASE(dbase) == E3_WRITEBUFFER_BASE(dlim)) -+ { -+ for (i = 0; i < nbytes/sizeof(uint16_t); i++) -+ writew (0, &((uint16_t *) dbase)[i]); -+ wmb(); -+ } -+ else -+ { -+ if (ntop < E3_WRITEBUFFER_SIZE) -+ { -+ dlim -= ntop; -+ -+ for (i = 0; i < ntop/sizeof(uint16_t); i++) -+ writew (0, &((uint16_t *) dlim)[i]); -+ wmb(); -+ } -+ -+ while (dlim >= (dbase + E3_WRITEBUFFER_SIZE)) -+ { -+ dlim -= E3_WRITEBUFFER_SIZE; -+ -+ writeq (0, &((uint64_t *) dlim)[0]); -+ writeq (0, &((uint64_t *) dlim)[1]); -+ wmb(); -+ } -+ -+ if (nbase < E3_WRITEBUFFER_SIZE) -+ { -+ for (i = 0; i < nbase/sizeof(uint16_t); i++) -+ writew (0, &((uint16_t *) dbase)[i]); -+ wmb(); -+ } -+ } -+} -+ -+void -+elan3_sdram_copyl_to_sdram (ELAN3_DEV *dev, void *from, sdramaddr_t to, int nbytes) -+{ -+ virtaddr_t dbase = (virtaddr_t) sdram_off_to_mapping (dev, to); -+ virtaddr_t dlim = (virtaddr_t) dbase + nbytes; -+ virtaddr_t slim = (virtaddr_t) from + nbytes; -+ unsigned nbase = E3_WRITEBUFFER_SIZE - E3_WRITEBUFFER_OFFSET (dbase); -+ unsigned ntop = E3_WRITEBUFFER_OFFSET (dlim - sizeof (uint32_t)) + sizeof (uint32_t); -+ int i; -+ -+ if (E3_WRITEBUFFER_BASE(dbase) == E3_WRITEBUFFER_BASE(dlim)) -+ { -+ for (i = 0; i < nbytes/sizeof(uint32_t); i++) -+ writel (((uint32_t *) from)[i], &((uint32_t *) dbase)[i]); -+ wmb(); -+ } -+ else -+ { -+ if (ntop < E3_WRITEBUFFER_SIZE) -+ { -+ slim -= ntop; -+ dlim -= ntop; -+ -+ for (i = 0; i < ntop/sizeof(uint32_t); i++) -+ writel (((uint32_t *) slim)[i], &((uint32_t *) dlim)[i]); -+ wmb(); -+ } -+ -+ while (dlim >= (dbase + E3_WRITEBUFFER_SIZE)) -+ { -+ dlim -= E3_WRITEBUFFER_SIZE; -+ slim -= E3_WRITEBUFFER_SIZE; -+ -+ writel (((uint32_t *) slim)[0], &((uint32_t *) dlim)[0]); -+ writel (((uint32_t *) slim)[1], &((uint32_t *) dlim)[1]); -+ writel (((uint32_t *) slim)[2], &((uint32_t *) dlim)[2]); -+ writel (((uint32_t *) slim)[3], &((uint32_t *) dlim)[3]); -+ wmb(); -+ } -+ -+ if (nbase < E3_WRITEBUFFER_SIZE) -+ { -+ for (i = 0; i < nbase/sizeof(uint32_t); i++) -+ writel (((uint32_t *) from)[i], &((uint32_t *) dbase)[i]); -+ wmb(); -+ } -+ } -+} -+ -+void -+elan3_sdram_zerol_sdram (ELAN3_DEV *dev, sdramaddr_t to, int nbytes) -+{ -+ virtaddr_t dbase = (virtaddr_t) sdram_off_to_mapping (dev, to); -+ virtaddr_t dlim = (virtaddr_t) dbase + nbytes; -+ unsigned nbase = E3_WRITEBUFFER_SIZE - E3_WRITEBUFFER_OFFSET (dbase); -+ unsigned ntop = E3_WRITEBUFFER_OFFSET (dlim - sizeof (uint32_t)) + sizeof (uint32_t); -+ int i; -+ -+ if (E3_WRITEBUFFER_BASE(dbase) == E3_WRITEBUFFER_BASE(dlim)) -+ { -+ for (i = 0; i < nbytes/sizeof(uint32_t); i++) -+ writel (0, &((uint32_t *) dbase)[i]); -+ wmb(); -+ } -+ else -+ { -+ if (ntop < E3_WRITEBUFFER_SIZE) -+ { -+ dlim -= ntop; -+ -+ for (i = 0; i < ntop/sizeof(uint32_t); i++) -+ writel (0, &((uint32_t *) dlim)[i]); -+ wmb(); -+ } -+ -+ while (dlim >= (dbase + E3_WRITEBUFFER_SIZE)) -+ { -+ dlim -= E3_WRITEBUFFER_SIZE; -+ -+ writeq (0, &((uint64_t *) dlim)[0]); -+ writeq (0, &((uint64_t *) dlim)[1]); -+ wmb(); -+ } -+ -+ if (nbase < E3_WRITEBUFFER_SIZE) -+ { -+ for (i = 0; i < nbase/sizeof(uint32_t); i++) -+ writel (0, &((uint32_t *) dbase)[i]); -+ wmb(); -+ } -+ } -+} -+ -+void -+elan3_sdram_copyq_to_sdram (ELAN3_DEV *dev, void *from, sdramaddr_t to, int nbytes) -+{ -+ virtaddr_t dbase = (virtaddr_t) sdram_off_to_mapping (dev, to); -+ virtaddr_t dlim = (virtaddr_t) dbase + nbytes; -+ virtaddr_t slim = (virtaddr_t) from + nbytes; -+ unsigned nbase = E3_WRITEBUFFER_SIZE - E3_WRITEBUFFER_OFFSET (dbase); -+ unsigned ntop = E3_WRITEBUFFER_OFFSET (dlim - sizeof (uint64_t)) + sizeof (uint64_t); -+ -+ if (E3_WRITEBUFFER_BASE(dbase) == E3_WRITEBUFFER_BASE(dlim)) -+ { -+ writeq (((uint64_t *) from)[0], &((uint64_t *) dbase)[0]); -+ wmb(); -+ } -+ else -+ { -+ if (ntop < E3_WRITEBUFFER_SIZE) -+ { -+ slim -= ntop; -+ dlim -= ntop; -+ -+ writeq (((uint64_t *) slim)[0], &((uint64_t *) dlim)[0]); -+ wmb(); -+ } -+ -+ while (dlim >= (dbase + E3_WRITEBUFFER_SIZE)) -+ { -+ dlim -= E3_WRITEBUFFER_SIZE; -+ slim -= E3_WRITEBUFFER_SIZE; -+ -+ writeq (((uint64_t *) slim)[0], &((uint64_t *) dlim)[0]); -+ writeq (((uint64_t *) slim)[1], &((uint64_t *) dlim)[1]); -+ wmb(); -+ } -+ -+ if (nbase < E3_WRITEBUFFER_SIZE) -+ { -+ writeq (((uint64_t *) from)[0], &((uint64_t *) dbase)[0]); -+ wmb(); -+ } -+ } -+} -+ -+void -+elan3_sdram_zeroq_sdram (ELAN3_DEV *dev, sdramaddr_t to, int nbytes) -+{ -+ virtaddr_t dbase = (virtaddr_t) sdram_off_to_mapping (dev, to); -+ virtaddr_t dlim = (virtaddr_t) dbase + nbytes; -+ unsigned nbase = E3_WRITEBUFFER_SIZE - E3_WRITEBUFFER_OFFSET (dbase); -+ unsigned ntop = E3_WRITEBUFFER_OFFSET (dlim - sizeof (uint64_t)) + sizeof (uint64_t); -+ -+ if (E3_WRITEBUFFER_BASE(dbase) == E3_WRITEBUFFER_BASE(dlim)) -+ { -+ writeq (0, &((uint64_t *) dbase)[0]); -+ wmb(); -+ } -+ else -+ { -+ if (ntop < E3_WRITEBUFFER_SIZE) -+ { -+ dlim -= ntop; -+ -+ writeq (0, &((uint64_t *) dlim)[0]); -+ wmb(); -+ } -+ -+ while (dlim >= (dbase + E3_WRITEBUFFER_SIZE)) -+ { -+ dlim -= E3_WRITEBUFFER_SIZE; -+ -+ writeq (0, &((uint64_t *) dlim)[0]); -+ writeq (0, &((uint64_t *) dlim)[1]); -+ wmb(); -+ } -+ -+ if (nbase < E3_WRITEBUFFER_SIZE) -+ { -+ writeq (0, &((uint64_t *) dbase)[0]); -+ wmb(); -+ } -+ } -+} -+ -+physaddr_t -+elan3_sdram_to_phys (ELAN3_DEV *dev, sdramaddr_t off) -+{ -+#if defined(DIGITAL_UNIX) -+ return (KSEG_TO_PHYS (sdram_off_to_mapping (dev, off))); -+#elif defined(LINUX) -+ return (kmem_to_phys ((void *) sdram_off_to_mapping (dev, off))); -+#endif -+} -+ -+/* sdram buddy allocator */ -+#define read_next(dev, block) elan3_sdram_readl(dev, block + 0) -+#define read_prev(dev, block) elan3_sdram_readl(dev, block + 4) -+#define write_next(dev, block, val) (elan3_sdram_writel(dev, block + 0, val), val) -+#define write_prev(dev, block, val) (elan3_sdram_writel(dev, block + 4, val), val) -+ -+#define freelist_insert(dev,idx,block)\ -+do {\ -+ sdramaddr_t next = dev->SdramFreeLists[(idx)];\ -+\ -+ /*\ -+ * block->prev = NULL;\ -+ * block->next = next;\ -+ * if (next != NULL)\ -+ * next->prev = block;\ -+ * freelist = block;\ -+ */\ -+ write_prev (dev, block, (sdramaddr_t) 0);\ -+ write_next (dev, block, next);\ -+ if (next != (sdramaddr_t) 0)\ -+ write_prev (dev, next, block);\ -+ dev->SdramFreeLists[idx] = block;\ -+\ -+ dev->SdramFreeCounts[idx]++;\ -+ dev->Stats.SdramBytesFree += (SDRAM_MIN_BLOCK_SIZE << idx);\ -+} while (0) -+ -+#define freelist_remove(dev,idx,block)\ -+do {\ -+ /*\ -+ * if (block->prev)\ -+ * block->prev->next = block->next;\ -+ * else\ -+ * dev->SdramFreeLists[idx] = block->next;\ -+ * if (block->next)\ -+ * block->next->prev = block->prev;\ -+ */\ -+ sdramaddr_t blocknext = read_next (dev, block);\ -+ sdramaddr_t blockprev = read_prev (dev, block);\ -+\ -+ if (blockprev)\ -+ write_next (dev, blockprev, blocknext);\ -+ else\ -+ dev->SdramFreeLists[idx] = blocknext;\ -+ if (blocknext)\ -+ write_prev (dev, blocknext, blockprev);\ -+\ -+ dev->SdramFreeCounts[idx]--;\ -+ dev->Stats.SdramBytesFree -= (SDRAM_MIN_BLOCK_SIZE << idx);\ -+} while (0) -+ -+#define freelist_removehead(dev,idx,block)\ -+do {\ -+ sdramaddr_t blocknext = read_next (dev, block);\ -+\ -+ if ((dev->SdramFreeLists[idx] = blocknext) != 0)\ -+ write_prev (dev, blocknext, 0);\ -+\ -+ dev->SdramFreeCounts[idx]--;\ -+ dev->Stats.SdramBytesFree -= (SDRAM_MIN_BLOCK_SIZE << idx);\ -+} while (0) -+ -+#if defined(DEBUG) -+static int -+display_blocks (ELAN3_DEV *dev, int indx, char *string) -+{ -+ sdramaddr_t block; -+ int nbytes = 0; -+ -+ printk ("%s - indx %d\n", string, indx); -+ for (block = dev->SdramFreeLists[indx]; block != (sdramaddr_t) 0; block = read_next (dev, block)) -+ { -+ printk (" %lx", block); -+ nbytes += (SDRAM_MIN_BLOCK_SIZE << indx); -+ } -+ printk ("\n"); -+ -+ return (nbytes); -+} -+ -+ -+void -+elan3_sdram_display (ELAN3_DEV *dev, char *string) -+{ -+ int indx; -+ int nbytes = 0; -+ -+ printk ("elan3_sdram_display: dev=%p\n", dev); -+ for (indx = 0; indx < SDRAM_NUM_FREE_LISTS; indx++) -+ if (dev->SdramFreeLists[indx] != (sdramaddr_t) 0) -+ nbytes += display_blocks (dev, indx, string); -+ printk ("\n%d bytes free\n", nbytes); -+} -+ -+void -+elan3_sdram_verify (ELAN3_DEV *dev) -+{ -+ int indx, size, nbits, i, b; -+ sdramaddr_t block; -+ -+ for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; indx < SDRAM_NUM_FREE_LISTS; indx++, size <<= 1) -+ { -+ unsigned count = 0; -+ -+ for (block = dev->SdramFreeLists[indx]; block; block = read_next (dev, block), count++) -+ { -+ ELAN3_SDRAM_BANK *bank = sdram_off_to_bank (dev, block); -+ unsigned off = sdram_off_to_offset (dev, block); -+ int bit = sdram_off_to_bit (dev, indx, block); -+ -+ if ((block & (size-1)) != 0) -+ printk ("elan3_sdram_verify: block=%lx indx=%x - not aligned\n", block, indx); -+ -+ if (bank == NULL || off > bank->Size) -+ printk ("elan3_sdram_verify: block=%lx indx=%x - outside bank\n", block, indx); -+ else if (BT_TEST (bank->Bitmaps[indx], bit) == 0) -+ printk ("elan3_sdram_verify: block=%lx indx=%x - bit not set\n", block, indx); -+ else -+ { -+ for (i = indx-1, nbits = 2; i >= 0; i--, nbits <<= 1) -+ { -+ bit = sdram_off_to_bit (dev, i, block); -+ -+ for (b = 0; b < nbits; b++) -+ if (BT_TEST(bank->Bitmaps[i], bit + b)) -+ printk ("elan3_sdram_verify: block=%lx indx=%x - also free i=%d bit=%x\n", block, indx, i, bit+b); -+ } -+ } -+ } -+ -+ if (dev->SdramFreeCounts[indx] != count) -+ printk ("elan3_sdram_verify: indx=%x expected %d got %d\n", indx, dev->SdramFreeCounts[indx], count); -+ } -+} -+ -+#endif /* defined(DEBUG) */ -+ -+static void -+free_block (ELAN3_DEV *dev, sdramaddr_t block, int indx) -+{ -+ ELAN3_SDRAM_BANK *bank = sdram_off_to_bank (dev, block); -+ unsigned bit = sdram_off_to_bit(dev, indx, block); -+ unsigned size = SDRAM_MIN_BLOCK_SIZE << indx; -+ -+ PRINTF3 (DBG_DEVICE, DBG_SDRAM, "free_block: block=%lx indx=%d bit=%x\n", block, indx, bit); -+ -+ ASSERT ((block & (size-1)) == 0); -+ ASSERT (BT_TEST (bank->Bitmaps[indx], bit) == 0); -+ -+ while (BT_TEST (bank->Bitmaps[indx], bit ^ 1)) -+ { -+ sdramaddr_t buddy = block ^ size; -+ -+ PRINTF3 (DBG_DEVICE, DBG_SDRAM, "free_block: merge block=%lx buddy=%lx indx=%d\n", block, buddy, indx); -+ -+ BT_CLEAR (bank->Bitmaps[indx], bit ^ 1); -+ -+ freelist_remove (dev, indx, buddy); -+ -+ block = (block < buddy) ? block : buddy; -+ indx++; -+ size <<= 1; -+ bit >>= 1; -+ } -+ -+ PRINTF3 (DBG_DEVICE, DBG_SDRAM, "free_block: free block=%lx indx=%d bit=%x\n", block, indx, bit); -+ -+ freelist_insert (dev, indx, block); -+ -+ BT_SET (bank->Bitmaps[indx], bit); -+} -+ -+void -+elan3_sdram_init (ELAN3_DEV *dev) -+{ -+ int indx; -+ -+ spin_lock_init (&dev->SdramLock); -+ -+ for (indx = 0; indx < SDRAM_NUM_FREE_LISTS; indx++) -+ { -+ dev->SdramFreeLists[indx] = (sdramaddr_t) 0; -+ dev->SdramFreeCounts[indx] = 0; -+ } -+} -+ -+void -+elan3_sdram_fini (ELAN3_DEV *dev) -+{ -+ spin_lock_destroy (&dev->SdramLock); -+} -+ -+void -+elan3_sdram_add (ELAN3_DEV *dev, sdramaddr_t base, sdramaddr_t top) -+{ -+ register int indx; -+ register unsigned long size; -+ -+ /* align to the minimum block size */ -+ base = (base + SDRAM_MIN_BLOCK_SIZE - 1) & ~((sdramaddr_t) SDRAM_MIN_BLOCK_SIZE-1); -+ top &= ~((sdramaddr_t) SDRAM_MIN_BLOCK_SIZE-1); -+ -+ /* don't allow 0 as a valid "base" */ -+ if (base == 0) -+ base = E3_CACHE_SIZE; -+ -+ /* carve the bottom to the biggest boundary */ -+ for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; indx < SDRAM_NUM_FREE_LISTS; indx++, size <<= 1) -+ { -+ if ((base & size) == 0) -+ continue; -+ -+ if ((base + size) > top) -+ break; -+ -+ free_block (dev, base, indx); -+ -+ base += size; -+ } -+ -+ /* carve the top down to the biggest boundary */ -+ for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; indx < SDRAM_NUM_FREE_LISTS; indx++, size <<= 1) -+ { -+ if ((top & size) == 0) -+ continue; -+ -+ if ((top - size) < base) -+ break; -+ -+ free_block (dev, (top - size), indx); -+ -+ top -= size; -+ } -+ -+ /* now free of the space in between */ -+ while (base < top) -+ { -+ free_block (dev, base, (SDRAM_NUM_FREE_LISTS-1)); -+ -+ base += SDRAM_MAX_BLOCK_SIZE; -+ } -+} -+ -+sdramaddr_t -+elan3_sdram_alloc (ELAN3_DEV *dev, int nbytes) -+{ -+ sdramaddr_t block; -+ register int i, indx; -+ unsigned long size; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&dev->SdramLock, flags); -+ -+ for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; size < nbytes; indx++, size <<= 1) -+ ; -+ -+ PRINTF2 (DBG_DEVICE, DBG_SDRAM, "elan3_sdram_alloc: nbytes=%d indx=%d\n", nbytes, indx); -+ -+ /* find the smallest block which is big enough for this allocation */ -+ for (i = indx; i < SDRAM_NUM_FREE_LISTS; i++, size <<= 1) -+ if (dev->SdramFreeLists[i]) -+ break; -+ -+ if (i == SDRAM_NUM_FREE_LISTS) -+ { -+ spin_unlock_irqrestore (&dev->SdramLock, flags); -+ return ((sdramaddr_t) 0); -+ } -+ -+ PRINTF2 (DBG_DEVICE, DBG_SDRAM, "elan3_sdram_alloc: use block=%lx indx=%d\n", dev->SdramFreeLists[i], i); -+ -+ /* remove the block from the free list */ -+ freelist_removehead (dev, i, (block = dev->SdramFreeLists[i])); -+ -+ /* clear the approriate bit in the bitmap */ -+ BT_CLEAR (sdram_off_to_bank (dev, block)->Bitmaps[i], sdram_off_to_bit (dev,i, block)); -+ -+ /* and split it up as required */ -+ while (i-- > indx) -+ free_block (dev, block + (size >>= 1), i); -+ -+ PRINTF1 (DBG_DEVICE, DBG_SDRAM, "elan3_sdram_alloc: return block=%lx\n", block); -+ -+ spin_unlock_irqrestore (&dev->SdramLock, flags); -+ -+ ASSERT ((block & ((SDRAM_MIN_BLOCK_SIZE << (indx))-1)) == 0); -+ -+ return ((sdramaddr_t) block); -+} -+ -+void -+elan3_sdram_free (ELAN3_DEV *dev, sdramaddr_t block, int nbytes) -+{ -+ register int indx; -+ unsigned long size; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&dev->SdramLock, flags); -+ -+ for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; size < nbytes; indx++, size <<= 1) -+ ; -+ -+ PRINTF2 (DBG_DEVICE, DBG_SDRAM, "elan3_sdram_free: indx=%d block=%lx\n", indx, block); -+ -+ free_block (dev, block, indx); -+ -+ spin_unlock_irqrestore (&dev->SdramLock, flags); -+} -+ -+ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/elan3/tproc.c linux-2.6.9/drivers/net/qsnet/elan3/tproc.c ---- clean/drivers/net/qsnet/elan3/tproc.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan3/tproc.c 2004-11-15 06:14:12.000000000 -0500 -@@ -0,0 +1,778 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: tproc.c,v 1.52 2004/11/15 11:14:12 mike Exp $" -+/* $Source: /cvs/master/quadrics/elan3mod/elan3/os/tproc.c,v $ */ -+ -+#include -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+int -+HandleTProcTrap (ELAN3_DEV *dev, E3_uint32 *RestartBits) -+{ -+ THREAD_TRAP *trap = dev->ThreadTrap; -+ int delay = 1; -+ -+ ASSERT(SPINLOCK_HELD (&dev->IntrLock)); -+ -+ trap->Status.Status = read_reg32 (dev, Exts.TProcStatus); -+ trap->sp = read_reg32 (dev, Thread_Desc_SP); -+ trap->pc = read_reg32 (dev, ExecutePC); -+ trap->npc = read_reg32 (dev, ExecuteNPC); -+ trap->StartPC = read_reg32 (dev, StartPC); -+ trap->mi = GET_STATUS_TRAPTYPE(trap->Status); -+ trap->TrapBits.Bits = read_reg32 (dev, TrapBits.Bits); -+ trap->DirtyBits.Bits = read_reg32 (dev, DirtyBits.Bits); -+ -+ if ( ! (trap->Status.s.WakeupFunction == SleepOneTick) ) { -+ int p,i; -+ E3_uint32 reg = read_reg32 (dev, Exts.InterruptReg); -+ -+ ELAN_REG_REC(reg); -+ p = elan_reg_rec_index; -+ for(i=0;iStatus.s.WakeupFunction == SleepOneTick); -+ -+ /* copy the four access fault areas */ -+ elan3_sdram_copyq_from_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, TProc), (void *) &trap->FaultSave, 16); -+ elan3_sdram_copyq_from_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, ThreadProcData), (void *) &trap->DataFaultSave, 16); -+ elan3_sdram_copyq_from_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, ThreadProcInst), (void *) &trap->InstFaultSave, 16); -+ elan3_sdram_copyq_from_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, ThreadProcOpen), (void *) &trap->OpenFaultSave, 16); -+ -+ /* copy the registers, note the endian swap flips the odd registers into the even registers -+ and visa versa. */ -+ copy_thread_regs (dev, trap->Registers); -+ -+ /* -+ * If the output was open then the ack may not have returned yet. Must wait for the -+ * ack to become valid and update trap_dirty with the new value. Will simulate the -+ * instructions later. -+ */ -+ if (trap->TrapBits.s.OutputWasOpen) -+ { -+ trap->TrapBits.Bits = read_reg32 (dev, TrapBits.Bits); -+ while (! trap->TrapBits.s.AckBufferValid) -+ { -+ PRINTF0 (DBG_DEVICE, DBG_INTR, "tproc: waiting for ack to become valid\n"); -+ trap->TrapBits.Bits = read_reg32 (dev, TrapBits.Bits); -+ DELAY (delay); -+ -+ if ((delay <<= 1) == 0) delay = 1; -+ } -+ } -+ -+ /* update device statistics */ -+ BumpStat (dev, TProcTraps); -+ switch (trap->mi) -+ { -+ case MI_UnimplementedError: -+ if (trap->TrapBits.s.ForcedTProcTrap) -+ BumpStat (dev, ForcedTProcTraps); -+ if (trap->TrapBits.s.ThreadTimeout) -+ { -+ if (trap->TrapBits.s.PacketTimeout) -+ BumpStat (dev, ThreadOutputTimeouts); -+ else if (trap->TrapBits.s.PacketAckValue == E3_PAckError) -+ BumpStat (dev, ThreadPacketAckErrors); -+ } -+ if (trap->TrapBits.s.TrapForTooManyInsts) -+ BumpStat (dev, TrapForTooManyInsts); -+ break; -+ } -+ -+ elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, TProc), 16); -+ elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, ThreadProcData), 16); -+ elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, ThreadProcInst), 16); -+ elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, ThreadProcOpen), 16); -+ -+ *RestartBits |= RestartTProc; -+ -+ return (TRUE); -+} -+ -+void -+DeliverTProcTrap (ELAN3_DEV *dev, THREAD_TRAP *threadTrap, E3_uint32 Pend) -+{ -+ ELAN3_CTXT *ctxt; -+ THREAD_TRAP *trap; -+ -+ ASSERT(SPINLOCK_HELD (&dev->IntrLock)); -+ -+ ctxt = ELAN3_DEV_CTX_TABLE(dev, threadTrap->Status.s.Context); -+ -+ if (ctxt == NULL) -+ { -+ PRINTF1 (DBG_DEVICE, DBG_INTR, "DeliverTProcTrap: context %x invalid\n", threadTrap->Status.s.Context); -+ BumpStat (dev, InvalidContext); -+ } -+ else -+ { -+ if (ELAN3_OP_TPROC_TRAP (ctxt, threadTrap) == OP_DEFER) -+ { -+ if (ELAN3_QUEUE_REALLY_FULL (ctxt->ThreadTrapQ)) -+ { -+ ctxt->Status |= CTXT_COMMAND_OVERFLOW_ERROR; -+ StartSwapoutContext (ctxt, Pend, NULL); -+ } -+ else -+ { -+ trap = ELAN3_QUEUE_BACK (ctxt->ThreadTrapQ, ctxt->ThreadTraps); -+ -+ bcopy (threadTrap, trap, sizeof (THREAD_TRAP)); -+ -+ PRINTF4 (ctxt, DBG_INTR, "DeliverTProcTrap: SP=%08x PC=%08x NPC=%08x StartPC %08x\n", -+ trap->sp, trap->pc, trap->npc, trap->StartPC); -+ PRINTF3 (ctxt, DBG_INTR, " mi=%s trap=%08x dirty=%08x\n", -+ MiToName (trap->mi), trap->TrapBits.Bits, trap->DirtyBits.Bits); -+ PRINTF3 (ctxt, DBG_INTR, " FaultSave : FaultAddress %08x EventAddress %08x FSR %08x\n", -+ trap->FaultSave.s.FaultAddress, trap->FaultSave.s.EventAddress, trap->FaultSave.s.FSR.Status); -+ PRINTF3 (ctxt, DBG_INTR, " DataFault : FaultAddress %08x EventAddress %08x FSR %08x\n", -+ trap->DataFaultSave.s.FaultAddress, trap->DataFaultSave.s.EventAddress, trap->DataFaultSave.s.FSR.Status); -+ PRINTF3 (ctxt, DBG_INTR, " InstFault : FaultAddress %08x EventAddress %08x FSR %08x\n", -+ trap->InstFaultSave.s.FaultAddress, trap->InstFaultSave.s.EventAddress, trap->InstFaultSave.s.FSR.Status); -+ PRINTF3 (ctxt, DBG_INTR, " OpenFault : FaultAddress %08x EventAddress %08x FSR %08x\n", -+ trap->OpenFaultSave.s.FaultAddress, trap->OpenFaultSave.s.EventAddress, trap->OpenFaultSave.s.FSR.Status); -+ -+ PRINTF4 (ctxt, DBG_INTR, " g0=%08x g1=%08x g2=%08x g3=%08x\n", -+ trap->Registers[REG_GLOBALS+(0^WordEndianFlip)], trap->Registers[REG_GLOBALS+(1^WordEndianFlip)], -+ trap->Registers[REG_GLOBALS+(2^WordEndianFlip)], trap->Registers[REG_GLOBALS+(3^WordEndianFlip)]); -+ PRINTF4 (ctxt, DBG_INTR, " g4=%08x g5=%08x g6=%08x g7=%08x\n", -+ trap->Registers[REG_GLOBALS+(4^WordEndianFlip)], trap->Registers[REG_GLOBALS+(5^WordEndianFlip)], -+ trap->Registers[REG_GLOBALS+(6^WordEndianFlip)], trap->Registers[REG_GLOBALS+(7^WordEndianFlip)]); -+ PRINTF4 (ctxt, DBG_INTR, " o0=%08x o1=%08x o2=%08x o3=%08x\n", -+ trap->Registers[REG_OUTS+(0^WordEndianFlip)], trap->Registers[REG_OUTS+(1^WordEndianFlip)], -+ trap->Registers[REG_OUTS+(2^WordEndianFlip)], trap->Registers[REG_OUTS+(3^WordEndianFlip)]); -+ PRINTF4 (ctxt, DBG_INTR, " o4=%08x o5=%08x o6=%08x o7=%08x\n", -+ trap->Registers[REG_OUTS+(4^WordEndianFlip)], trap->Registers[REG_OUTS+(5^WordEndianFlip)], -+ trap->Registers[REG_OUTS+(6^WordEndianFlip)], trap->Registers[REG_OUTS+(7^WordEndianFlip)]); -+ PRINTF4 (ctxt, DBG_INTR, " l0=%08x l1=%08x l2=%08x l3=%08x\n", -+ trap->Registers[REG_LOCALS+(0^WordEndianFlip)], trap->Registers[REG_LOCALS+(1^WordEndianFlip)], -+ trap->Registers[REG_LOCALS+(2^WordEndianFlip)], trap->Registers[REG_LOCALS+(3^WordEndianFlip)]); -+ PRINTF4 (ctxt, DBG_INTR, " l4=%08x l5=%08x l6=%08x l7=%08x\n", -+ trap->Registers[REG_LOCALS+(4^WordEndianFlip)], trap->Registers[REG_LOCALS+(5^WordEndianFlip)], -+ trap->Registers[REG_LOCALS+(6^WordEndianFlip)], trap->Registers[REG_LOCALS+(7^WordEndianFlip)]); -+ PRINTF4 (ctxt, DBG_INTR, " i0=%08x i1=%08x i2=%08x i3=%08x\n", -+ trap->Registers[REG_INS+(0^WordEndianFlip)], trap->Registers[REG_INS+(1^WordEndianFlip)], -+ trap->Registers[REG_INS+(2^WordEndianFlip)], trap->Registers[REG_INS+(3^WordEndianFlip)]); -+ PRINTF4 (ctxt, DBG_INTR, " i4=%08x i5=%08x i6=%08x i7=%08x\n", -+ trap->Registers[REG_INS+(4^WordEndianFlip)], trap->Registers[REG_INS+(5^WordEndianFlip)], -+ trap->Registers[REG_INS+(6^WordEndianFlip)], trap->Registers[REG_INS+(7^WordEndianFlip)]); -+ -+ ELAN3_QUEUE_ADD (ctxt->ThreadTrapQ); -+ kcondvar_wakeupone (&ctxt->Wait, &dev->IntrLock); -+ -+ if (ELAN3_QUEUE_FULL (ctxt->ThreadTrapQ)) -+ { -+ PRINTF0 (ctxt, DBG_INTR, "DeliverTProcTrap: thread queue full, must swap out\n"); -+ ctxt->Status |= CTXT_THREAD_QUEUE_FULL; -+ -+ StartSwapoutContext (ctxt, Pend, NULL); -+ } -+ } -+ } -+ } -+} -+ -+int -+NextTProcTrap (ELAN3_CTXT *ctxt, THREAD_TRAP *trap) -+{ -+ ELAN3_DEV *dev = ctxt->Device; -+ -+ ASSERT (SPINLOCK_HELD (&dev->IntrLock)); -+ -+ if (ELAN3_QUEUE_EMPTY (ctxt->ThreadTrapQ)) -+ return (0); -+ -+ *trap = *ELAN3_QUEUE_FRONT (ctxt->ThreadTrapQ, ctxt->ThreadTraps); -+ ELAN3_QUEUE_REMOVE (ctxt->ThreadTrapQ); -+ -+ return (1); -+} -+ -+void -+ResolveTProcTrap (ELAN3_CTXT *ctxt, THREAD_TRAP *trap) -+{ -+ int i; -+ int res; -+ E3_Addr StackPointer; -+ -+ PRINTF4 (ctxt, DBG_TPROC, "ResolveTProcTrap: SP=%08x PC=%08x NPC=%08x StartPC %08x\n", -+ trap->sp, trap->pc, trap->npc, trap->StartPC); -+ PRINTF3 (ctxt, DBG_TPROC, " mi=%s trap=%08x dirty=%08x\n", -+ MiToName (trap->mi), trap->TrapBits.Bits, trap->DirtyBits.Bits); -+ PRINTF3 (ctxt, DBG_TPROC, " FaultSave : FaultAddress %08x EventAddress %08x FSR %08x\n", -+ trap->FaultSave.s.FaultAddress, trap->FaultSave.s.EventAddress, trap->FaultSave.s.FSR.Status); -+ PRINTF3 (ctxt, DBG_TPROC, " DataFault : FaultAddress %08x EventAddress %08x FSR %08x\n", -+ trap->DataFaultSave.s.FaultAddress, trap->DataFaultSave.s.EventAddress, trap->DataFaultSave.s.FSR.Status); -+ PRINTF3 (ctxt, DBG_TPROC, " InstFault : FaultAddress %08x EventAddress %08x FSR %08x\n", -+ trap->InstFaultSave.s.FaultAddress, trap->InstFaultSave.s.EventAddress, trap->InstFaultSave.s.FSR.Status); -+ PRINTF3 (ctxt, DBG_TPROC, " OpenFault : FaultAddress %08x EventAddress %08x FSR %08x\n", -+ trap->OpenFaultSave.s.FaultAddress, trap->OpenFaultSave.s.EventAddress, trap->OpenFaultSave.s.FSR.Status); -+ -+ PRINTF4 (ctxt, DBG_TPROC, " g0=%08x g1=%08x g2=%08x g3=%08x\n", -+ trap->Registers[REG_GLOBALS+(0^WordEndianFlip)], trap->Registers[REG_GLOBALS+(1^WordEndianFlip)], -+ trap->Registers[REG_GLOBALS+(2^WordEndianFlip)], trap->Registers[REG_GLOBALS+(3^WordEndianFlip)]); -+ PRINTF4 (ctxt, DBG_TPROC, " g4=%08x g5=%08x g6=%08x g7=%08x\n", -+ trap->Registers[REG_GLOBALS+(4^WordEndianFlip)], trap->Registers[REG_GLOBALS+(5^WordEndianFlip)], -+ trap->Registers[REG_GLOBALS+(6^WordEndianFlip)], trap->Registers[REG_GLOBALS+(7^WordEndianFlip)]); -+ PRINTF4 (ctxt, DBG_TPROC, " o0=%08x o1=%08x o2=%08x o3=%08x\n", -+ trap->Registers[REG_OUTS+(0^WordEndianFlip)], trap->Registers[REG_OUTS+(1^WordEndianFlip)], -+ trap->Registers[REG_OUTS+(2^WordEndianFlip)], trap->Registers[REG_OUTS+(3^WordEndianFlip)]); -+ PRINTF4 (ctxt, DBG_TPROC, " o4=%08x o5=%08x o6=%08x o7=%08x\n", -+ trap->Registers[REG_OUTS+(4^WordEndianFlip)], trap->Registers[REG_OUTS+(5^WordEndianFlip)], -+ trap->Registers[REG_OUTS+(6^WordEndianFlip)], trap->Registers[REG_OUTS+(7^WordEndianFlip)]); -+ PRINTF4 (ctxt, DBG_TPROC, " l0=%08x l1=%08x l2=%08x l3=%08x\n", -+ trap->Registers[REG_LOCALS+(0^WordEndianFlip)], trap->Registers[REG_LOCALS+(1^WordEndianFlip)], -+ trap->Registers[REG_LOCALS+(2^WordEndianFlip)], trap->Registers[REG_LOCALS+(3^WordEndianFlip)]); -+ PRINTF4 (ctxt, DBG_TPROC, " l4=%08x l5=%08x l6=%08x l7=%08x\n", -+ trap->Registers[REG_LOCALS+(4^WordEndianFlip)], trap->Registers[REG_LOCALS+(5^WordEndianFlip)], -+ trap->Registers[REG_LOCALS+(6^WordEndianFlip)], trap->Registers[REG_LOCALS+(7^WordEndianFlip)]); -+ PRINTF4 (ctxt, DBG_TPROC, " i0=%08x i1=%08x i2=%08x i3=%08x\n", -+ trap->Registers[REG_INS+(0^WordEndianFlip)], trap->Registers[REG_INS+(1^WordEndianFlip)], -+ trap->Registers[REG_INS+(2^WordEndianFlip)], trap->Registers[REG_INS+(3^WordEndianFlip)]); -+ PRINTF4 (ctxt, DBG_TPROC, " i4=%08x i5=%08x i6=%08x i7=%08x\n", -+ trap->Registers[REG_INS+(4^WordEndianFlip)], trap->Registers[REG_INS+(5^WordEndianFlip)], -+ trap->Registers[REG_INS+(6^WordEndianFlip)], trap->Registers[REG_INS+(7^WordEndianFlip)]); -+ -+ -+ BumpUserStat (ctxt, TProcTraps); -+ -+ switch (trap->mi) -+ { -+ case MI_UnimplementedError: -+ { -+ /* -+ * This occurs if the threads processor trapped. All other cases will be for the ucode -+ * thread trapping. -+ */ -+ int restart = 1; -+ int skip = 0; -+ -+ PRINTF1 (ctxt, DBG_TPROC, "TProc: Mi=Unimp. Using trap->TrapBits=%x\n", trap->TrapBits.Bits); -+ -+ /* -+ * Data Access Exception. -+ */ -+ if (trap->TrapBits.s.DataAccessException) -+ { -+ ASSERT (CTXT_IS_KERNEL(ctxt) || trap->DataFaultSave.s.FSR.Status == 0 || -+ ctxt->Capability.cap_mycontext == trap->DataFaultSave.s.FaultContext); -+ -+ PRINTF1 (ctxt, DBG_TPROC, "ResolveTProcTrap: DataAccessException %08x\n", trap->DataFaultSave.s.FaultAddress); -+ -+ if ((res = elan3_pagefault (ctxt, &trap->DataFaultSave, 1)) != ESUCCESS) -+ { -+ PRINTF1 (ctxt, DBG_TPROC, "ResolveTProcTrap: elan3_pagefault failed for data %08x\n", -+ trap->DataFaultSave.s.FaultAddress); -+ -+ if (ElanException (ctxt, EXCEPTION_INVALID_ADDR, THREAD_PROC, trap, &trap->DataFaultSave, res) != OP_IGNORE) -+ restart = 0; -+ } -+ } -+ -+ /* -+ * Instruction Access Exception. -+ */ -+ if (trap->TrapBits.s.InstAccessException) -+ { -+ ASSERT (CTXT_IS_KERNEL (ctxt) || trap->InstFaultSave.s.FSR.Status == 0 || -+ ctxt->Capability.cap_mycontext == trap->InstFaultSave.s.FaultContext); -+ -+ PRINTF1 (ctxt, DBG_TPROC, "ResolveTProcTrap: InstAccessException %08x\n", trap->InstFaultSave.s.FaultAddress); -+ -+ if ((res = elan3_pagefault (ctxt, &trap->InstFaultSave, 1)) != ESUCCESS) -+ { -+ PRINTF1 (ctxt, DBG_TPROC, "ResolveTProcTrap: elan3_pagefault failed for inst %08x\n", -+ trap->InstFaultSave.s.FaultAddress); -+ -+ ElanException (ctxt, EXCEPTION_INVALID_ADDR, THREAD_PROC, trap, &trap->InstFaultSave, res); -+ restart = 0; -+ } -+ } -+ -+ /* -+ * Forced TProc trap/Unimplemented instruction -+ * -+ * If there is a force tproc trap then don't look at -+ * the unimplemented instruction bit - since it can -+ * be set in obscure circumstances. -+ */ -+ if (trap->TrapBits.s.ForcedTProcTrap) -+ PRINTF0 (ctxt, DBG_TPROC, "ResolveTProcTrap: forced tproc trap, restarting\n"); -+ else if (trap->TrapBits.s.Unimplemented) -+ { -+ E3_uint32 instr = ELAN3_OP_LOAD32 (ctxt, trap->pc & PC_MASK); -+ -+ PRINTF1 (ctxt, DBG_TPROC, "ResolveTProcTrap: unimplemented instruction %08x\n", instr); -+ -+ if ((instr & OPCODE_MASK) == OPCODE_Ticc && -+ (instr & OPCODE_IMM) == OPCODE_IMM && -+ (Ticc_COND(instr) == Ticc_TA)) -+ { -+ switch (INSTR_IMM(instr)) -+ { -+ case ELAN3_ELANCALL_TRAPNUM: -+ /* -+ * Since the thread cannot easily access the global variable which holds -+ * the elan system call number, we provide a different trap for the elan -+ * system call, and copy the system call number into %g1 before calling -+ * ThreadSyscall(). -+ */ -+ BumpUserStat (ctxt, ThreadElanCalls); -+ -+ if (ThreadElancall (ctxt, trap, &skip) != ESUCCESS) -+ { -+ ElanException (ctxt, EXCEPTION_BAD_SYSCALL, THREAD_PROC, trap); -+ restart = 0; -+ } -+ break; -+ -+ case ELAN3_SYSCALL_TRAPNUM: -+ BumpUserStat (ctxt, ThreadSystemCalls); -+ -+ if (ThreadSyscall (ctxt, trap, &skip) != ESUCCESS) -+ { -+ ElanException (ctxt, EXCEPTION_BAD_SYSCALL, THREAD_PROC, trap); -+ restart = 0; -+ } -+ break; -+ -+ case ELAN3_DEBUG_TRAPNUM: -+ ElanException (ctxt, EXCEPTION_DEBUG, THREAD_PROC, trap); -+ skip = 1; -+ break; -+ -+ case ELAN3_ABORT_TRAPNUM: -+ default: -+ ElanException (ctxt, EXCEPTION_UNIMP_INSTR, THREAD_PROC, trap, instr); -+ restart = 0; -+ break; -+ } -+ -+ } -+ else -+ { -+ ElanException (ctxt, EXCEPTION_UNIMP_INSTR, THREAD_PROC, trap, instr); -+ restart = 0; -+ } -+ } -+ -+ /* -+ * Faulted fetching routes. -+ */ -+ if (trap->TrapBits.s.OpenRouteFetch) -+ { -+ PRINTF1 (ctxt, DBG_TPROC, "ResolveTProcTrap: OpenRouteFetch %08x\n", trap->OpenFaultSave.s.FaultAddress); -+ -+ if ((res = ResolveVirtualProcess (ctxt, trap->OpenFaultSave.s.FaultAddress)) != ESUCCESS && -+ ElanException (ctxt, EXCEPTION_INVALID_PROCESS, THREAD_PROC, trap, trap->DataFaultSave.s.FaultAddress, res) != OP_IGNORE) -+ { -+ restart = 0; -+ } -+ else if (RollThreadToClose (ctxt, trap, E3_PAckDiscard) != ESUCCESS) /* Force a discard */ -+ { -+ restart = 0; -+ } -+ } -+ -+ /* -+ * Thread Timeout -+ */ -+ if (trap->TrapBits.s.ThreadTimeout) -+ { -+ if (ElanException (ctxt, EXCEPTION_PACKET_TIMEOUT, THREAD_PROC, trap) != OP_IGNORE) -+ restart = 0; -+ else -+ { -+ PRINTF0 (ctxt, DBG_TPROC, "ResolveTProcTrap: timeout or PAckError!\n"); -+ -+ /* Might deschedule the thread for a while or mark the link error here. */ -+ if (! trap->TrapBits.s.OutputWasOpen && RollThreadToClose (ctxt, trap, trap->TrapBits.s.PacketAckValue) != ESUCCESS) -+ { -+ restart = 0; -+ } -+ } -+ } -+ -+ /* -+ * Open exception -+ */ -+ if (trap->TrapBits.s.OpenException) -+ { -+ PRINTF0 (ctxt, DBG_TPROC, "ResolveTProcTrap: open exception\n"); -+ if (ElanException (ctxt, EXCEPTION_THREAD_KILLED, THREAD_PROC, trap) != OP_IGNORE) -+ restart = 0; -+ } -+ -+ /* -+ * Too many instructions. -+ */ -+ if (trap->TrapBits.s.TrapForTooManyInsts) -+ { -+ PRINTF0 (ctxt, DBG_TPROC, "ResolveTProcTrap: too many instructions\n"); -+ if (ElanException (ctxt, EXCEPTION_THREAD_KILLED, THREAD_PROC, trap) != OP_IGNORE) -+ restart = 0; -+ } -+ -+ if (restart) -+ { -+ /* -+ * If the output was open when the trap was taken then the trap code must move -+ * the PC on past the close instruction and simulate the effect of all the instructions -+ * that do not output onto the link. The value of the ack received is then used to -+ * simulate the close instruction. -+ */ -+ if (trap->TrapBits.s.OutputWasOpen && RollThreadToClose(ctxt, trap, trap->TrapBits.s.PacketAckValue) != ESUCCESS) -+ { -+ /* -+ * Don't restart if we couldn't roll it forweards -+ * to a close instruction. -+ */ -+ break; -+ } -+ -+ /* -+ * We must check back 3 instructions from the PC, and if we see the -+ * c_close_cookie() sequence then we must execute the instructions to -+ * the end of it. -+ */ -+ /* XXXX: code to be written */ -+ -+ StackPointer = SaveThreadToStack (ctxt, trap, skip); -+ -+ ReissueStackPointer (ctxt, StackPointer); -+ } -+ -+ break; -+ } -+ -+ /* -+ * This case is different from the others as %o6 has been overwritten with -+ * the SP. The real PC can be read from StartPC and written back -+ * into %o6 on the stack. -+ */ -+ case MI_TProcNext: /* Reading the outs block */ -+ { -+ E3_Addr stack = (trap->sp & SP_MASK) - sizeof (E3_Stack); -+ -+ if (ELAN3_OP_START_FAULT_CHECK (ctxt)) -+ { -+ ELAN3_OP_END_FAULT_CHECK (ctxt); -+ -+ PRINTF0 (ctxt, DBG_TPROC, "ResolveTProcTrap: faulted writing StartPc to o6\n"); -+ ElanException (ctxt, EXCEPTION_CANNOT_SAVE_THREAD, THREAD_PROC, NULL); -+ break; -+ } -+ ELAN3_OP_STORE32 (ctxt, stack + offsetof (E3_Stack, Outs[6]), trap->StartPC & PC_MASK); -+ ELAN3_OP_END_FAULT_CHECK (ctxt); -+ /* DROPTHROUGH */ -+ } -+ /* -+ * all of these will be generated when starting up a thread. -+ * Just re-issue the command after fixing the trap. The ucode keeps the startup -+ * from trap information in Thread_Desc_SP while it is still loading the regs. -+ */ -+ case MI_WaitForGlobalsRead: /* Reading the globals block (trap restart) */ -+ case MI_WaitForNPCRead: /* Reading the nPC, V and C (trap restart) */ -+ case MI_WaitForPCload: /* Reading the PC, N and Z (trap restart) */ -+ case MI_WaitForInsRead: /* Reading the ins block (trap restart) */ -+ case MI_WaitForLocals: /* Reading the ins block (trap restart) */ -+ case MI_WaitForPCload2: /* Reading the PC (normal thread start) */ -+ case MI_WaitForSpStore: /* Writing the SP to the outs block */ -+ PRINTF2 (ctxt, DBG_TPROC, "ResolveTProcTrap: %s %08x\n", MiToName (trap->mi), trap->InstFaultSave.s.FaultAddress); -+ -+ if ((res = elan3_pagefault (ctxt, &trap->FaultSave, 1)) != ESUCCESS) -+ { -+ PRINTF1 (ctxt, DBG_TPROC, "ResolveTProcTrap: elan3_pagefault failed at %08x\n", -+ trap->FaultSave.s.FaultAddress); -+ if (ElanException (ctxt, EXCEPTION_INVALID_ADDR, THREAD_PROC, &trap->FaultSave, trap, res) != OP_IGNORE) -+ break; -+ } -+ -+ ReissueStackPointer (ctxt, trap->sp); -+ break; -+ -+ /* -+ * These traps could occur after the threads proc has stopped (either for a wait, -+ * break, or suspend, but not a trap). Must simulate the uCode's job. -+ */ -+ case MI_WaitForOutsWrite: /* Writing the outs block */ -+ case MI_WaitForNPCWrite: /* Writing the nPC block */ -+ { -+ E3_uint32 DeschedBits = (trap->TrapBits.Bits & E3_TProcDescheduleMask); -+ E3_Addr stack = (trap->sp & SP_MASK) - sizeof (E3_Stack); -+ -+ PRINTF1 (ctxt, DBG_TPROC, "ResolveTProcTrap: trapped on %s while stopping a thread\n", MiToName(trap->mi)); -+ -+ /* -+ * Copy npc into o6. -+ */ -+ trap->Registers[REG_OUTS+(6^WordEndianFlip)] = trap->npc; -+ -+ if (ELAN3_OP_START_FAULT_CHECK (ctxt)) -+ { -+ ELAN3_OP_END_FAULT_CHECK (ctxt); -+ -+ PRINTF0 (ctxt, DBG_TPROC, "ResolveTProcTrap: faulted writing outs to stack\n"); -+ ElanException (ctxt, EXCEPTION_CANNOT_SAVE_THREAD, THREAD_PROC, NULL); -+ break; -+ } -+ -+ /* -+ * Now write the outs back to the stack. NOTE then endian flip is undone. -+ */ -+ for (i = 0; i < 8; i++) -+ ELAN3_OP_STORE32 (ctxt, stack + offsetof (E3_Stack, Outs[i]), trap->Registers[REG_OUTS+(i^WordEndianFlip)]); -+ ELAN3_OP_END_FAULT_CHECK (ctxt); -+ -+ /* -+ * thread has been saved. Now find out why the thread proc stopped. -+ */ -+ if (DeschedBits == E3_TProcDescheduleSuspend) -+ { -+ PRINTF0 (ctxt, DBG_TPROC, "ResolveTProcTrap: suspend instruction executed\n"); -+ break; -+ } -+ -+ /* -+ * Break. Just reissue the command. -+ */ -+ if (DeschedBits == E3_TProcDescheduleBreak) -+ { -+ PRINTF1 (ctxt, DBG_TPROC, "ResolveTProcTrap: break instruction, reissue sp %08x\n", trap->sp); -+ ReissueStackPointer (ctxt, trap->sp); -+ break; -+ } -+ -+ ASSERT (DeschedBits == E3_TProcDescheduleWait); -+ -+ /* DROPTHROUGH to fix up a wait event */ -+ } -+ -+ /* -+ * Trapped here trying to execute a wait instruction. All the thread state has already -+ * been saved and the trap has been fixed so simplest thing to do is to start the -+ * thread up at the wait instruction again. -+ */ -+ case MI_WaitForEventWaitAddr: /* Reading back the %o0,%o1 pair for a -+ wait event instr. */ -+ case MI_WaitForWaitEventAccess: /* Locked dword read of the event location. -+ Note that this read is done with write -+ permissions so we never get a trap on the write */ -+ { -+ E3_Addr stack = (trap->sp & SP_MASK) - sizeof (E3_Stack); -+ -+ if ((res = elan3_pagefault (ctxt, &trap->FaultSave, 1)) != ESUCCESS) -+ { -+ PRINTF1 (ctxt, DBG_TPROC, "ResolveTProcTrap: elan3_pagefault failed at %08x\n", -+ trap->FaultSave.s.FaultAddress); -+ if (ElanException (ctxt, EXCEPTION_INVALID_ADDR, THREAD_PROC, trap, &trap->DataFaultSave, res) != OP_IGNORE) -+ break; -+ } -+ -+ if (ELAN3_OP_START_FAULT_CHECK (ctxt)) -+ { -+ ELAN3_OP_END_FAULT_CHECK (ctxt); -+ -+ PRINTF0 (ctxt, DBG_TPROC, "ResolveTProcTrap: faulted writing pc to stack\n"); -+ ElanException (ctxt, EXCEPTION_CANNOT_SAVE_THREAD, THREAD_PROC, NULL); -+ break; -+ } -+ -+ ELAN3_OP_STORE32 (ctxt, stack + offsetof (E3_Stack, Outs[6]), trap->pc); -+ ELAN3_OP_END_FAULT_CHECK (ctxt); -+ -+ ReissueStackPointer (ctxt, trap->sp); -+ break; -+ } -+ -+ /* -+ * Assume the fault will be fixed by FixupEventTrap. -+ */ -+ default: -+ FixupEventTrap (ctxt, THREAD_PROC, trap, trap->mi, &trap->FaultSave, 0); -+ break; -+ } -+} -+ -+int -+TProcNeedsRestart (ELAN3_CTXT *ctxt) -+{ -+ return (ctxt->ItemCount[LIST_THREAD] != 0); -+} -+ -+void -+RestartTProcItems (ELAN3_CTXT *ctxt) -+{ -+ void *item; -+ E3_uint32 StackPointer; -+ -+ kmutex_lock (&ctxt->SwapListsLock); -+ -+ while (ctxt->ItemCount[LIST_THREAD]) -+ { -+ if (! ELAN3_OP_GET_WORD_ITEM (ctxt, LIST_THREAD, &item, &StackPointer)) -+ ctxt->ItemCount[LIST_THREAD] = 0; -+ else -+ { -+ if (IssueCommand (ctxt, offsetof (E3_CommandPort, RunThread), StackPointer, 0) == ISSUE_COMMAND_RETRY) -+ { -+ ELAN3_OP_PUTBACK_ITEM (ctxt, LIST_THREAD, item); -+ kmutex_unlock (&ctxt->SwapListsLock); -+ return; -+ } -+ -+ ctxt->ItemCount[LIST_THREAD]--; -+ ELAN3_OP_FREE_WORD_ITEM (ctxt, item); -+ } -+ } -+ kmutex_unlock (&ctxt->SwapListsLock); -+} -+ -+E3_Addr -+SaveThreadToStack (ELAN3_CTXT *ctxt, THREAD_TRAP *trap, int SkipInstruction) -+{ -+ E3_Addr stack = (trap->sp & SP_MASK) - sizeof (E3_Stack); -+ E3_Addr orflag; -+ register int i; -+ -+ /* -+ * When the thread deschedules normally, the N & Z flags are written -+ * to the stack in o6, and the V & C flags are lost. -+ * Since the Elan will store the NPC into o6 (to skip the instruction), -+ * the CC flags are visible to the trap handler in the trapped PC and NPC. -+ * If the instruction needs to be re-executed then the CC flags need to be -+ * kept in the right place to be read in when the thread re-starts. -+ * -+ * PC has N & Z from trapped NPC. -+ * NPC has V & C from trapped PC. -+ */ -+ if (SkipInstruction) -+ { -+ trap->Registers[REG_OUTS+(6^WordEndianFlip)] = trap->npc; -+ trap->Registers[REG_GLOBALS+(0^WordEndianFlip)] = ((trap->npc & PC_MASK) + 4) | (trap->pc & CC_MASK); -+ } -+ else -+ { -+ trap->Registers[REG_OUTS+(6^WordEndianFlip)] = (trap->pc & PC_MASK) | (trap->npc & CC_MASK); -+ trap->Registers[REG_GLOBALS+(0^WordEndianFlip)] = (trap->npc & PC_MASK) | (trap->pc & CC_MASK); -+ } -+ -+ if (ELAN3_OP_START_FAULT_CHECK(ctxt)) -+ { -+ PRINTF0 (ctxt, DBG_TPROC, "RestartThread: faulted writing out thread\n"); -+ ELAN3_OP_END_FAULT_CHECK(ctxt); -+ -+ ElanException (ctxt, EXCEPTION_CANNOT_SAVE_THREAD, THREAD_PROC, NULL); -+ return ((E3_Addr) 0); -+ } -+ -+ -+#ifdef DEBUG_PRINTF -+ PRINTF4 (ctxt, DBG_TPROC, "SaveThreadToStack: SP=%08x PC=%08x NPC=%08x DIRTY=%08x\n", -+ trap->sp, trap->pc, trap->npc, trap->DirtyBits.Bits); -+ if (trap->DirtyBits.s.GlobalsDirty) -+ { -+ PRINTF4 (ctxt, DBG_TPROC, " g0=%08x g1=%08x g2=%08x g3=%08x\n", -+ trap->Registers[REG_GLOBALS+(0^WordEndianFlip)], trap->Registers[REG_GLOBALS+(1^WordEndianFlip)], -+ trap->Registers[REG_GLOBALS+(2^WordEndianFlip)], trap->Registers[REG_GLOBALS+(3^WordEndianFlip)]); -+ PRINTF4 (ctxt, DBG_TPROC, " g4=%08x g5=%08x g6=%08x g7=%08x\n", -+ trap->Registers[REG_GLOBALS+(4^WordEndianFlip)], trap->Registers[REG_GLOBALS+(5^WordEndianFlip)], -+ trap->Registers[REG_GLOBALS+(6^WordEndianFlip)], trap->Registers[REG_GLOBALS+(7^WordEndianFlip)]); -+ } -+ if (trap->DirtyBits.s.OutsDirty) -+ { -+ PRINTF4 (ctxt, DBG_TPROC, " o0=%08x o1=%08x o2=%08x o3=%08x\n", -+ trap->Registers[REG_OUTS+(0^WordEndianFlip)], trap->Registers[REG_OUTS+(1^WordEndianFlip)], -+ trap->Registers[REG_OUTS+(2^WordEndianFlip)], trap->Registers[REG_OUTS+(3^WordEndianFlip)]); -+ PRINTF4 (ctxt, DBG_TPROC, " o4=%08x o5=%08x o6=%08x o7=%08x\n", -+ trap->Registers[REG_OUTS+(4^WordEndianFlip)], trap->Registers[REG_OUTS+(5^WordEndianFlip)], -+ trap->Registers[REG_OUTS+(6^WordEndianFlip)], trap->Registers[REG_OUTS+(7^WordEndianFlip)]); -+ } -+ if (trap->DirtyBits.s.LocalsDirty) -+ { -+ PRINTF4 (ctxt, DBG_TPROC, " l0=%08x l1=%08x l2=%08x l3=%08x\n", -+ trap->Registers[REG_LOCALS+(0^WordEndianFlip)], trap->Registers[REG_LOCALS+(1^WordEndianFlip)], -+ trap->Registers[REG_LOCALS+(2^WordEndianFlip)], trap->Registers[REG_LOCALS+(3^WordEndianFlip)]); -+ PRINTF4 (ctxt, DBG_TPROC, " l4=%08x l5=%08x l6=%08x l7=%08x\n", -+ trap->Registers[REG_LOCALS+(4^WordEndianFlip)], trap->Registers[REG_LOCALS+(5^WordEndianFlip)], -+ trap->Registers[REG_LOCALS+(6^WordEndianFlip)], trap->Registers[REG_LOCALS+(7^WordEndianFlip)]); -+ } -+ if (trap->DirtyBits.s.InsDirty) -+ { -+ PRINTF4 (ctxt, DBG_TPROC, " i0=%08x i1=%08x i2=%08x i3=%08x\n", -+ trap->Registers[REG_INS+(0^WordEndianFlip)], trap->Registers[REG_INS+(1^WordEndianFlip)], -+ trap->Registers[REG_INS+(2^WordEndianFlip)], trap->Registers[REG_INS+(3^WordEndianFlip)]); -+ PRINTF4 (ctxt, DBG_TPROC, " i4=%08x i5=%08x i6=%08x i7=%08x\n", -+ trap->Registers[REG_INS+(4^WordEndianFlip)], trap->Registers[REG_INS+(5^WordEndianFlip)], -+ trap->Registers[REG_INS+(6^WordEndianFlip)], trap->Registers[REG_INS+(7^WordEndianFlip)]); -+ } -+#endif -+ -+ PRINTF1 (ctxt, DBG_TPROC, "flushing registers to stack %08x\n", stack); -+ -+ /* -+ * NOTE - store the register to the stack in reverse order, since the stack -+ * will be allocated in sdram, and we cannot use the sdram accessing functions -+ * here, as it is "mapped" in user-space. -+ */ -+ for (i = 0; i < 8; i++) -+ { -+ if (trap->DirtyBits.s.GlobalsDirty & (1 << i)) -+ ELAN3_OP_STORE32 (ctxt, stack + offsetof (E3_Stack, Globals[i]), trap->Registers[REG_GLOBALS+(i^WordEndianFlip)]); -+ if (trap->DirtyBits.s.OutsDirty & (1 << i)) -+ ELAN3_OP_STORE32 (ctxt, stack + offsetof (E3_Stack, Outs[i]), trap->Registers[REG_OUTS+(i^WordEndianFlip)]); -+ if (trap->DirtyBits.s.LocalsDirty & (1 << i)) -+ ELAN3_OP_STORE32 (ctxt, stack + offsetof (E3_Stack, Locals[i]), trap->Registers[REG_LOCALS+(i^WordEndianFlip)]); -+ if (trap->DirtyBits.s.InsDirty & (1 << i)) -+ ELAN3_OP_STORE32 (ctxt, stack + offsetof (E3_Stack, Ins[i]), trap->Registers[REG_INS+(i^WordEndianFlip)]); -+ } -+ -+ /* always restore all registers */ -+ orflag = ThreadRestartFromTrapBit | ThreadReloadAllRegs; -+ -+ ELAN3_OP_END_FAULT_CHECK (ctxt); -+ -+ return (trap->sp | orflag); -+} -+ -+void -+ReissueStackPointer (ELAN3_CTXT *ctxt, E3_Addr StackPointer) -+{ -+ PRINTF1 (ctxt, DBG_TPROC, "ReissueStackPointer : Queue SP %08x\n", StackPointer); -+ -+ kmutex_lock (&ctxt->SwapListsLock); -+ ctxt->ItemCount[LIST_THREAD]++; -+ ELAN3_OP_PUT_WORD_ITEM (ctxt, LIST_THREAD, StackPointer); -+ kmutex_unlock (&ctxt->SwapListsLock); -+} -+ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/elan3/tprocinsts.c linux-2.6.9/drivers/net/qsnet/elan3/tprocinsts.c ---- clean/drivers/net/qsnet/elan3/tprocinsts.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan3/tprocinsts.c 2003-09-24 09:57:25.000000000 -0400 -@@ -0,0 +1,401 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: tprocinsts.c,v 1.20 2003/09/24 13:57:25 david Exp $" -+/* $Source: /cvs/master/quadrics/elan3mod/elan3/os/tprocinsts.c,v $*/ -+ -+#include -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define MAXINSTR 256 /* # Instructions to look at while looking for close */ -+ -+static E3_uint32 ALU (ELAN3_CTXT *ctxt, -+ E3_uint32 fcode, E3_uint32 X, E3_uint32 Y, -+ E3_uint32 *Z, E3_uint32 *N, E3_uint32 *C, E3_uint32 *V); -+ -+char *OpcodeNames[] = -+{ -+ "ADD ", -+ "AND ", -+ "OR ", -+ "XOR ", -+ "SUB ", -+ "ANDN ", -+ "ORN ", -+ "XNOR ", -+ "ADDX ", -+ "UNIP ", -+ "UMUL ", -+ "SMUL ", -+ "SUBX ", -+ "UNIP ", -+ "UDIV ", -+ "SDIV ", -+ "ADDcc ", -+ "ANDcc ", -+ "ORcc ", -+ "XORcc ", -+ "SUBcc ", -+ "ANDNcc", -+ "ORNcc ", -+ "XNORcc", -+ "ADDXcc", -+ "UNIPcc", -+ "UMULcc", -+ "SMULcc", -+ "SUBXcc", -+ "UNIPcc", -+ "UDIVcc", -+ "SDIVcc" -+}; -+ -+#define REGISTER_VALUE(trap, rN) (((rN) == 0) ? 0 : (trap)->Registers[(rN)^WordEndianFlip]) -+#define ASSIGN_REGISTER(trap, rN, value) ((rN) != 0 ? trap->Registers[(rN)^WordEndianFlip] = (value) : 0) -+ -+int -+RollThreadToClose (ELAN3_CTXT *ctxt, THREAD_TRAP *trap, E3_uint32 PAckVal) -+{ -+ E3_Addr pc = (trap->pc & PC_MASK); -+ E3_Addr npc = (trap->npc & PC_MASK); -+ E3_uint32 Z = (trap->npc & PSR_Z_BIT) ? 1 : 0; -+ E3_uint32 N = (trap->npc & PSR_N_BIT) ? 1 : 0; -+ E3_uint32 C = (trap->pc & PSR_C_BIT) ? 1 : 0; -+ E3_uint32 V = (trap->pc & PSR_V_BIT) ? 1 : 0; -+ E3_uint32 instr; -+ E3_Addr addr; -+ -+ if (ELAN3_OP_START_FAULT_CHECK (ctxt)) -+ { -+ failed: -+ ELAN3_OP_END_FAULT_CHECK (ctxt); -+ -+ ElanException (ctxt, EXCEPTION_SIMULATION_FAILED, THREAD_PROC, trap); -+ return (EFAULT); -+ } -+ -+ /* -+ * Thread trapped with output open, or while closing, -+ * so roll the PC forwards to the instruction after the -+ * next c_close, and execute that with the register -+ * specified in c_close set to the trap which occured. -+ * (This is not 1 which means an ACK) -+ */ -+ PRINTF1 (ctxt, DBG_TPROC, "RollThreadToClose: roll pc %x to c_close\n", pc); -+ -+ for (;;) -+ { -+ instr = ELAN3_OP_LOAD32 (ctxt, pc); -+ -+ PRINTF2 (ctxt, DBG_TPROC, "RollThreadToClose: PC=%x INSTR=%x\n", pc, instr); -+ -+ switch (OPCODE_CLASS(instr)) -+ { -+ case OPCODE_CLASS_0: -+ switch ((instr) & OPCODE_CLASS0_MASK) -+ { -+ case OPCODE_SETHI: -+ PRINTF3 (ctxt, DBG_TPROC, "PC %x : sethi r%d = %x\n", pc, INSTR_RD(instr), instr << 10); -+ -+ ASSIGN_REGISTER (trap, INSTR_RD(instr), instr << 10); -+ break; -+ -+ case OPCODE_SENDREG: -+ PRINTF1 (ctxt, DBG_TPROC, "PC %x : sendreg\n", pc); -+ break; -+ -+ case OPCODE_SENDMEM: -+ PRINTF1 (ctxt, DBG_TPROC, "PC %x : sendmem\n", pc); -+ break; -+ -+ case OPCODE_BICC: -+ { -+ int DoBranch = (instr >> 28) & 1; -+ int CondBranch = 1; -+ E3_Addr OldnPC = npc; -+ -+ PRINTF5 (ctxt, DBG_TPROC, "PC %x : Bicc Z=%x N=%x C=%x V=%x ", pc, Z, N, C, V); -+ switch (instr & OPCODE_BICC_MASK) -+ { -+ case OPCODE_BICC_BN: CondBranch = 0; break; -+ case OPCODE_BICC_BE: DoBranch ^= Z; break; -+ case OPCODE_BICC_BLE: DoBranch ^= Z | (N ^ V); break; -+ case OPCODE_BICC_BL: DoBranch ^= N ^ V; break; -+ case OPCODE_BICC_BLEU: DoBranch ^= C | Z; break; -+ case OPCODE_BICC_BCS: DoBranch ^= C; break; -+ case OPCODE_BICC_BNEG: DoBranch ^= N; break; -+ case OPCODE_BICC_BVS: DoBranch ^= V; break; -+ } -+ -+ /* Do the branch */ -+ if (DoBranch != 0) -+ { -+ npc = pc + (((instr & 0x3fffff) << 2) | -+ (((instr & 0x200000) != 0) ? 0xff000000 : 0)); -+ -+ PRINTF2 (ctxt, DBG_TPROC, "PC %x : branch taken to %x\n", pc, npc); -+ } -+ else -+ { -+ npc = npc + 4; -+ PRINTF1 (ctxt, DBG_TPROC, "PC %x : branch not taken\n", pc); -+ } -+ pc = OldnPC; -+ -+ /* Test if the next is annuled */ -+ if (((instr & OPCODE_BICC_ANNUL) != 0) & -+ ((DoBranch == 0) | (CondBranch == 0))) -+ { -+ PRINTF1 (ctxt, DBG_TPROC, "PC %x : branch annulled\n", pc); -+ -+ pc = npc; -+ npc += 4; -+ } -+ -+ /* -+ * we've already consumed the instruction - so continue rather -+ * than break; -+ */ -+ continue; -+ } -+ -+ default: -+ PRINTF2 (ctxt, DBG_TPROC, "PC %x : unknown class 0 instr %x\n", pc, instr); -+ goto failed; -+ } -+ break; -+ -+ case OPCODE_CLASS_1: -+ PRINTF2 (ctxt, DBG_TPROC, "PC %x : unknown class 1 instr %x\n", pc, instr); -+ goto failed; -+ -+ case OPCODE_CLASS_2: -+ { -+ E3_uint32 X = REGISTER_VALUE (trap, INSTR_RS1(instr)); -+ E3_uint32 Y = (instr & OPCODE_IMM) ? INSTR_IMM(instr) : REGISTER_VALUE (trap, INSTR_RS2(instr)); -+ -+ if ((instr & OPCODE_NOT_ALUOP) == 0) -+ { -+ E3_uint32 fcode = (instr >> OPCODE_FCODE_SHIFT) & OPCODE_FCODE_MASK; -+ E3_uint32 result = ALU (ctxt, fcode, X, Y, &Z, &N, &C, &V); -+ -+ PRINTF5 (ctxt, DBG_TPROC, "PC %x : %s %x %x -> %x", pc, OpcodeNames[fcode], X, Y, result); -+ PRINTF4 (ctxt, DBG_TPROC, " Z=%x N=%x C=%x V=%x\n", Z, N, C, V); -+ -+ ASSIGN_REGISTER (trap, INSTR_RD(instr), result); -+ } -+ else -+ { -+ switch (instr & OPCODE_MASK) -+ { -+ case OPCODE_OPEN: -+ PRINTF1 (ctxt, DBG_TPROC, "PC %x : c_open\n", pc); -+ break; -+ -+ case OPCODE_CLOSE: -+ PRINTF1 (ctxt, DBG_TPROC, "PC %x : c_close\n", pc); -+ goto found_close; -+ -+ case OPCODE_SLL: -+ PRINTF1 (ctxt, DBG_TPROC, "PC %x : SLL\n", pc); -+ -+ ASSIGN_REGISTER (trap, INSTR_RD(instr), X << Y); -+ break; -+ -+ case OPCODE_SRL: -+ PRINTF1 (ctxt, DBG_TPROC, "PC %x : SRL\n", pc); -+ -+ ASSIGN_REGISTER (trap, INSTR_RD(instr), X >> Y); -+ break; -+ -+ case OPCODE_SRA: -+ PRINTF1 (ctxt, DBG_TPROC, "PC %x : SRA\n", pc); -+ -+ ASSIGN_REGISTER (trap, INSTR_RD(instr), X >> Y); -+ break; -+ -+ case OPCODE_BREAKTEST: -+ PRINTF1 (ctxt, DBG_TPROC, "PC %x : BREAKTEST not allowed while open\n", pc); -+ goto failed; -+ -+ case OPCODE_BREAK: -+ PRINTF1 (ctxt, DBG_TPROC, "PC %x : BREAK not allowed while open\n", pc); -+ goto failed; -+ -+ case OPCODE_SUSPEND: -+ PRINTF1 (ctxt, DBG_TPROC, "PC %x : SUSPEND not allowed while open\n", pc); -+ goto failed; -+ -+ case OPCODE_WAIT: -+ PRINTF1 (ctxt, DBG_TPROC, "PC %x : WAIT not allowed while open\n", pc); -+ goto failed; -+ -+ default: -+ PRINTF2 (ctxt, DBG_TPROC, "PC %x : unknown class 2 instr %x\n", pc, instr); -+ goto failed; -+ } -+ } -+ break; -+ } -+ -+ case OPCODE_CLASS_3: -+ { -+ if ((instr & OPCODE_IMM) != 0) -+ addr = REGISTER_VALUE (trap, INSTR_RS1(instr)) + INSTR_IMM(instr); -+ else -+ addr = (REGISTER_VALUE (trap, INSTR_RS1(instr)) + -+ REGISTER_VALUE (trap, INSTR_RS2(instr))); -+ -+ switch (instr & OPCODE_MASK) -+ { -+ case OPCODE_LD: -+ PRINTF3 (ctxt, DBG_TPROC, "PC %x : LD [%x], r%d\n", pc, addr, INSTR_RD(instr)); -+ -+ ASSIGN_REGISTER (trap, INSTR_RD(instr), ELAN3_OP_LOAD32 (ctxt, addr)); -+ break; -+ -+ case OPCODE_LDD: -+ case OPCODE_LDBLOCK16: -+ case OPCODE_LDBLOCK32: -+ case OPCODE_LDBLOCK64: -+ PRINTF2 (ctxt, DBG_TPROC, "PC %x : LDBLOCKx @ %x is not possible while output open\n", pc, addr); -+ goto failed; -+ -+ case OPCODE_ST: -+ PRINTF2 (ctxt, DBG_TPROC, "PC %x : ST @ %x\n", pc, addr); -+ -+ ELAN3_OP_STORE32 (ctxt, addr, REGISTER_VALUE (trap, INSTR_RD(instr))); -+ break; -+ -+ case OPCODE_STD: -+ case OPCODE_STBLOCK16: -+ case OPCODE_STBLOCK32: -+ case OPCODE_STBLOCK64: -+ PRINTF2 (ctxt, DBG_TPROC, "PC %x : STD @ %x is not posisble while output open\n", pc, addr); -+ goto failed; -+ -+ case OPCODE_SWAP: -+ PRINTF2 (ctxt, DBG_TPROC, "PC %x : SWAP @ %x is not posible while output open\n", pc, addr); -+ goto failed; -+ -+ default: -+ PRINTF2 (ctxt, DBG_TPROC, "PC %x : unknown class 3 instr %x\n", pc, instr); -+ goto failed; -+ } -+ break; -+ }} -+ -+ pc = npc; -+ npc += 4; -+ } -+ -+found_close: -+ ELAN3_OP_END_FAULT_CHECK (ctxt); -+ -+ PRINTF1 (ctxt, DBG_TPROC, "PC %x : c_close\n", pc); -+ -+ /* -+ * Found the new pc, and have the close instruction in *instr -+ */ -+ ASSIGN_REGISTER (trap, INSTR_RD(instr), PAckVal); -+ -+ /* -+ * Move to instruction after close. -+ */ -+ trap->pc = npc; -+ -+ /* Insert the value of Z and N from the close inst */ -+ trap->npc = (npc + 4) | ((PAckVal == E3_PAckOk) ? 1 : -+ (PAckVal == E3_PAckTestFail) ? 2 : 0); -+ -+ return (ESUCCESS); -+} -+ -+E3_uint32 -+ALU (ELAN3_CTXT *ctxt, -+ E3_uint32 fcode, E3_uint32 X, E3_uint32 Y, -+ E3_uint32 *Z, E3_uint32 *N, E3_uint32 *C, E3_uint32 *V) -+{ -+ E3_uint32 XMSB, YMSB, ZMSB, Cprime; -+ E3_uint32 Yprime; -+ E3_uint32 Result=0; -+ -+ Yprime = ((fcode >> 2) & 1) ? ~Y : Y; -+ Cprime = ((fcode >> 2) & 1) ^ (*C & ((fcode >> 3) & 1)); -+ XMSB = (X >> 31) & 1; -+ YMSB = (Yprime >> 31) & 1; -+ /* mul or div */ -+ if ((fcode & 0xa) == 0xa) -+ { -+ PRINTF0 (ctxt, DBG_TPROC, "ALU: tried a multiply or a divide\n"); -+ return (0); -+ } -+ -+ switch (fcode & 3) -+ { -+ /*ADD */ -+ case 0: -+ Result = X + Yprime + Cprime ; -+ if ((fcode & 0x10) == 0) -+ return (Result); -+ -+ ZMSB = Result >> 31; -+ *V = ((XMSB & YMSB & ~ZMSB) | (~XMSB &~YMSB & ZMSB)); -+ *C = ((fcode >> 2) & 1) ^ ( (XMSB & YMSB) | (~ZMSB & (XMSB | YMSB))); -+ break; -+ -+ /*AND */ -+ case 1: -+ Result = X & Yprime ; -+ if ((fcode & 0x10) == 0) -+ return (Result); -+ -+ *V = 0; -+ *C = 0; -+ break; -+ -+ /*OR */ -+ case 2: -+ Result = X | Yprime ; -+ if ((fcode & 0x10) == 0) -+ return (Result); -+ -+ *V = 0; -+ *C = 0; -+ break; -+ -+ /*XOR */ -+ case 3: -+ Result = X ^ Yprime ; -+ if ((fcode & 0x10) == 0) -+ return (Result); -+ -+ *V = 0; -+ *C = 0; -+ break; -+ } -+ -+ *Z = (Result == 0) ? 1 : 0; -+ *N = (Result >> 31) & 1; -+ -+ return (Result); -+} -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/elan3/tproc_linux.c linux-2.6.9/drivers/net/qsnet/elan3/tproc_linux.c ---- clean/drivers/net/qsnet/elan3/tproc_linux.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan3/tproc_linux.c 2005-05-31 06:29:07.000000000 -0400 -@@ -0,0 +1,223 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "$Id: tproc_linux.c,v 1.22.2.1 2005/05/31 10:29:07 addy Exp $" -+/* $Source: /cvs/master/quadrics/elan3mod/elan3/os/tproc_linux.c,v $*/ -+ -+#include -+#include -+ -+#include -+#include -+ -+#ifdef NO_ABI -+#include -+extern asmlinkage long sys_open(const char *, int, int); -+extern asmlinkage ssize_t sys_write(unsigned int, const char *, size_t); -+extern asmlinkage ssize_t sys_read(unsigned int, char *, size_t); -+extern asmlinkage off_t sys_lseek(unsigned int, off_t, unsigned int); -+extern asmlinkage long sys_poll(struct pollfd *, unsigned int, long); -+extern asmlinkage long sys_kill(int, int); -+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0) -+# include -+#else -+# include -+#endif -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+/* -+ * NOTE: system calls from kernel on Linux are different on alpha and i386 -+ * on alpha they return -errno on failure -+ * on i386 they return -1 on failure and set errno -+ */ -+ -+static void -+ReturnSyscall (THREAD_TRAP *trap, unsigned long rc, int *skip) -+{ -+ if (rc >= (unsigned long) (-130)) -+ { -+ trap->pc |= PSR_C_BIT; /* clear carry to indicate failure */ -+ -+ trap->Registers[REG_OUTS+(0^WordEndianFlip)] = -rc; -+ } -+ else -+ { -+ trap->pc &= ~PSR_C_BIT; /* set carry to indicate success */ -+ trap->Registers[REG_OUTS+(0^WordEndianFlip)] = rc; -+ } -+ trap->Registers[REG_OUTS+(1^WordEndianFlip)] = 0; -+ *skip = 1; -+} -+ -+static void -+dump_regs(ELAN3_CTXT *ctxt, THREAD_TRAP *trap) -+{ -+ PRINTF (ctxt, DBG_TPROC, " OUTS %08x %08x %08x %08x\n", -+ trap->Registers[REG_OUTS+(0^WordEndianFlip)], -+ trap->Registers[REG_OUTS+(1^WordEndianFlip)], -+ trap->Registers[REG_OUTS+(2^WordEndianFlip)], -+ trap->Registers[REG_OUTS+(3^WordEndianFlip)]); -+ PRINTF (ctxt, DBG_TPROC, " %08x %08x %08x %08x\n", -+ trap->Registers[REG_OUTS+(4^WordEndianFlip)], -+ trap->Registers[REG_OUTS+(5^WordEndianFlip)], -+ trap->Registers[REG_OUTS+(6^WordEndianFlip)], -+ trap->Registers[REG_OUTS+(7^WordEndianFlip)]); -+} -+ -+int -+ThreadSyscall (ELAN3_CTXT *ctxt, THREAD_TRAP *trap, int *skip) -+{ -+ int code; -+ caddr_t maddr; -+ struct file *file; -+ unsigned long rc; -+ int i; -+ uintptr_t av[6]; -+ uintptr_t ptr; -+ -+ PRINTF (ctxt, DBG_TPROC, "ThreadSyscall: PC %08x G1 %08x\n", -+ trap->pc, trap->Registers[REG_GLOBALS+(1^WordEndianFlip)]); -+ dump_regs(ctxt, trap); -+ -+ code = trap->Registers[REG_GLOBALS+(1^WordEndianFlip)]; -+ -+ /* Copy the system call arguments from %o0-%o5 */ -+ for (i = 0; i < 6; i++) -+ av[i] = trap->Registers[REG_OUTS+(i^WordEndianFlip)]; -+ -+ rc = (unsigned long) -EINVAL; -+ -+ switch (code) { -+#if defined(IOPROC_PATCH_APPLIED) -+ case ELAN3_SYS_open: -+ maddr = elan3mmu_mainaddr (ctxt->Elan3mmu, (E3_Addr) av[0]); -+ if (maddr != NULL) -+ rc = sys_open((const char *)maddr, av[1], av[2]); -+ break; -+ -+ case ELAN3_SYS_close: -+ rc = sys_close(av[0]); -+ break; -+ -+ case ELAN3_SYS_write: -+ maddr = elan3mmu_mainaddr (ctxt->Elan3mmu, (E3_Addr) av[1]); -+ if (maddr != NULL) -+ rc = sys_write(av[0], (const char *)maddr, av[2]); -+ break; -+ -+ case ELAN3_SYS_read: -+ maddr = elan3mmu_mainaddr (ctxt->Elan3mmu, (E3_Addr) av[1]); -+ if (maddr != NULL) -+ rc = sys_read(av[0], (char *)maddr, av[2]); -+ break; -+ -+ case ELAN3_SYS_poll: -+ maddr = elan3mmu_mainaddr (ctxt->Elan3mmu, (E3_Addr) av[0]); -+ if (maddr != NULL) -+ rc = sys_poll((struct pollfd *)maddr, av[1], av[2]); -+ break; -+ -+ case ELAN3_SYS_lseek: -+ rc = sys_lseek(av[0], av[1], av[2]); -+ break; -+ -+ case ELAN3_SYS_mmap: -+ if ((E3_Addr) av[0] == (E3_Addr) 0) -+ maddr = NULL; -+ else if ((maddr = elan3mmu_mainaddr (ctxt->Elan3mmu, (E3_Addr) av[0])) == NULL) -+ break; -+ -+ file = NULL; -+ /* GNAT 5515: If *not* anonymous memory need to do fget */ -+ if ((av[3] & MAP_ANONYMOUS) == 0 && (file = fget (av[4])) == NULL) -+ { -+ rc = -EBADF; -+ break; -+ } -+ -+ down_write (¤t->mm->mmap_sem); -+ ptr = do_mmap_pgoff (file, (unsigned long) maddr, av[1], av[2], av[3], av[5] >>PAGE_SHIFT); -+ up_write (¤t->mm->mmap_sem); -+ -+ if (file) -+ fput (file); -+ -+ if (IS_ERR((void *) ptr)) -+ rc = PTR_ERR((void *) ptr); -+ else -+ rc = elan3mmu_elanaddr (ctxt->Elan3mmu, (caddr_t)ptr); -+ -+ break; -+ -+ case ELAN3_SYS_munmap: -+ maddr = elan3mmu_mainaddr (ctxt->Elan3mmu, (E3_Addr) av[0]); -+ -+#ifdef AC -+ if (maddr != NULL) -+ rc = do_munmap(current->mm, (unsigned long) maddr, av[1], 1); -+#else -+ if (maddr != NULL) -+ rc = do_munmap(current->mm, (unsigned long) maddr, av[1]); -+#endif -+ break; -+ -+ case ELAN3_SYS_kill: -+ rc = sys_kill(av[0], av[1]); -+ break; -+ -+ case ELAN3_SYS_getpid: -+ rc = current->pid; -+ break; -+#else -+ -+#warning "NO IOPROC patch applied - thread cannot perform system calls" -+ -+#endif /* defined(IOPROC_PATCH_APPLIED) */ -+ -+ default: -+ return EINVAL; -+ } -+ ReturnSyscall(trap, rc, skip); -+ return ESUCCESS; -+} -+ -+ -+int -+ThreadElancall (ELAN3_CTXT *ctxt, THREAD_TRAP *trap, int *skip) -+{ -+ int ret = ESUCCESS; -+ -+ PRINTF (ctxt, DBG_TPROC, "ThreadElancall: PC %08x\n", trap->pc); -+ dump_regs(ctxt, trap); -+ -+ /* -+ * Elan system call 'type' is passed in o0 -+ */ -+ switch (trap->Registers[REG_OUTS+(0^WordEndianFlip)]) -+ { -+ default: -+ ret = EINVAL; -+ break; -+ } -+ return ret; -+} -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/elan3/virtual_process.c linux-2.6.9/drivers/net/qsnet/elan3/virtual_process.c ---- clean/drivers/net/qsnet/elan3/virtual_process.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan3/virtual_process.c 2004-06-07 09:50:10.000000000 -0400 -@@ -0,0 +1,884 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: virtual_process.c,v 1.68 2004/06/07 13:50:10 mike Exp $" -+/* $Source: /cvs/master/quadrics/elan3mod/elan3/os/virtual_process.c,v $*/ -+ -+#include -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+static ELAN3_VPSEG * -+InstallSegment (ELAN3_CTXT *ctxt, int process, int entries) -+{ -+ ELAN3_VPSEG **prevSeg, *seg; -+ int lastTop = -1; -+ int top = process + entries-1; -+ -+ ASSERT (krwlock_is_write_locked (&ctxt->VpLock)); -+ -+ for (prevSeg = &ctxt->VpSegs; (seg = (*prevSeg)) != NULL; prevSeg = &seg->Next) -+ { -+ int thisTop = seg->Process + seg->Entries - 1; -+ -+ if (process < seg->Process && (process <= lastTop || top >= seg->Process)) -+ { -+ /* -+ * Overlaps with last segment, or this one -+ */ -+ return (NULL); -+ } -+ if (seg->Process > process) -+ break; -+ -+ lastTop = thisTop; -+ } -+ -+ KMEM_ZALLOC (seg, ELAN3_VPSEG *, sizeof (ELAN3_VPSEG), TRUE); -+ -+ if (seg == (ELAN3_VPSEG *) NULL) -+ return (NULL); -+ -+ seg->Process = process; -+ seg->Entries = entries; -+ -+ -+ PRINTF2 (ctxt, DBG_VP, "InstallSegment: add seg %p before %p\n", seg, *prevSeg); -+ -+ seg->Next = *prevSeg; -+ *prevSeg = seg; -+ -+ return (seg); -+} -+ -+static int -+RemoveSegment (ELAN3_CTXT *ctxt, ELAN3_VPSEG *seg) -+{ -+ ELAN3_VPSEG **prevSeg, *thisSeg; -+ -+ ASSERT (krwlock_is_write_locked (&ctxt->VpLock)); -+ -+ for (prevSeg = &ctxt->VpSegs; (thisSeg = (*prevSeg)) != NULL; prevSeg = &thisSeg->Next) -+ { -+ if (thisSeg == seg) -+ break; -+ } -+ -+ if (thisSeg == (ELAN3_VPSEG *) NULL) -+ return (EINVAL); -+ -+ -+ PRINTF2 (ctxt, DBG_VP, "RemoveSegment: remove seg %p next %p\n", thisSeg, thisSeg->Next); -+ -+ *prevSeg = thisSeg->Next; -+ -+ KMEM_FREE ((caddr_t) seg, sizeof (ELAN3_VPSEG)); -+ -+ return (ESUCCESS); -+} -+ -+static ELAN3_VPSEG * -+FindSegment (ELAN3_CTXT *ctxt, int low, int high) -+{ -+ ELAN3_VPSEG *seg; -+ -+ ASSERT(krwlock_is_locked (&ctxt->VpLock)); -+ -+ for (seg = ctxt->VpSegs; seg; seg = seg->Next) -+ { -+ if (seg->Process <= low && (seg->Process + seg->Entries) > high) -+ return (seg); -+ } -+ -+ return ((ELAN3_VPSEG *) NULL); -+} -+ -+ELAN_LOCATION -+ProcessToLocation (ELAN3_CTXT *ctxt, ELAN3_VPSEG *seg, int process, ELAN_CAPABILITY *cap) -+{ -+ ELAN_LOCATION location; -+ int nnodes,nctxs; -+ int node,ctx,i; -+ -+ ASSERT(krwlock_is_locked (&ctxt->VpLock)); -+ -+ location.loc_node = ELAN3_INVALID_NODE; -+ location.loc_context = -1; -+ -+ PRINTF3 (ctxt, DBG_VP, "ProcessToLocation: process %d seg %p cap %p\n", process, seg, cap); -+ -+ if (seg == NULL) -+ seg = FindSegment (ctxt, process, process); -+ -+ if (!seg || (seg->Type != ELAN3_VPSEG_P2P)) -+ return (location); -+ -+ cap = &seg->SegCapability; -+ nnodes = ELAN_CAP_NUM_NODES (cap); -+ nctxs = ELAN_CAP_NUM_CONTEXTS (cap); -+ -+ switch (seg->SegCapability.cap_type & ELAN_CAP_TYPE_MASK) -+ { -+ case ELAN_CAP_TYPE_BLOCK: -+ { -+ int entries = ELAN_CAP_ENTRIES(cap); -+ -+ for (node = 0, i = 0; node < nnodes && i < entries; node++) -+ { -+ for (ctx = 0; ctx < nctxs && i < entries; ctx++) -+ { -+ if (( seg->SegCapability.cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (seg->SegCapability.cap_bitmap, ctx + (node * nctxs))) -+ { -+ if (i++ == (process - seg->Process)) -+ { -+ location.loc_node = seg->SegCapability.cap_lownode + node; -+ location.loc_context = seg->SegCapability.cap_lowcontext + ctx; -+ goto found; -+ } -+ } -+ } -+ } -+ break; -+ } -+ case ELAN_CAP_TYPE_CYCLIC: -+ { -+ int entries = ELAN_CAP_ENTRIES(cap); -+ -+ for (ctx = 0, i = 0; ctx < nctxs && i < entries; ctx++) -+ { -+ for (node = 0; node < nnodes && i < entries; node++) -+ { -+ if ((seg->SegCapability.cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (seg->SegCapability.cap_bitmap, node + (ctx * nnodes))) -+ { -+ if (i++ == (process - seg->Process)) -+ { -+ location.loc_node = seg->SegCapability.cap_lownode + node; -+ location.loc_context = seg->SegCapability.cap_lowcontext + ctx; -+ goto found; -+ } -+ } -+ } -+ } -+ break; -+ } -+ default: -+ break; -+ } -+ -+ found: -+ -+ PRINTF3 (ctxt, DBG_VP, "ProcessToLocation: process %d -> Node %d Context %d\n", process, location.loc_node, location.loc_context); -+ -+ if (cap != NULL) -+ { -+ bcopy ((caddr_t) &seg->SegCapability, (caddr_t) cap, sizeof (ELAN_CAPABILITY)); -+ cap->cap_mycontext = location.loc_context; -+ } -+ -+ return (location); -+} -+ -+int -+LocationToProcess (ELAN3_CTXT *ctxt, ELAN3_VPSEG *seg, ELAN_LOCATION loc, ELAN_CAPABILITY *cap) -+{ -+ int nnodes,nctxs; -+ int node,ctx,i; -+ -+ if (seg == NULL) -+ return ELAN3_INVALID_PROCESS; -+ -+ if (!seg || (seg->Type != ELAN3_VPSEG_P2P)) -+ return ELAN3_INVALID_PROCESS; -+ -+ nnodes = cap->cap_highnode - cap->cap_lownode + 1; -+ nctxs = cap->cap_highcontext - cap->cap_lowcontext + 1; -+ -+ switch (cap->cap_type & ELAN_CAP_TYPE_MASK) -+ { -+ case ELAN_CAP_TYPE_BLOCK: -+ { -+ int entries = ELAN_CAP_ENTRIES(cap); -+ -+ for (node = 0, i = 0; node < nnodes && i < entries; node++) -+ { -+ for (ctx = 0; ctx < nctxs && i < entries; ctx++) -+ { -+ if ((cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (cap->cap_bitmap, ctx + (node * nctxs))) -+ { -+ if ((loc.loc_node == (cap->cap_lownode + node) ) -+ && (loc.loc_context == (cap->cap_lowcontext + ctx) )) -+ { -+ return (i + seg->Process); -+ } -+ i++; -+ } -+ } -+ } -+ break; -+ } -+ case ELAN_CAP_TYPE_CYCLIC: -+ { -+ int entries = ELAN_CAP_ENTRIES(cap); -+ -+ for (ctx = 0, i = 0; ctx < nctxs && i < entries; ctx++) -+ { -+ for (node = 0; node < nnodes && i < entries; node++) -+ { -+ if ((cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (cap->cap_bitmap, node + (ctx * nnodes))) -+ { -+ if ((loc.loc_node == (cap->cap_lownode + node) ) -+ && (loc.loc_context == (cap->cap_lowcontext + ctx) )) -+ { -+ return (i + seg->Process); -+ } -+ i++; -+ -+ } -+ } -+ } -+ break; -+ } -+ default: -+ break; -+ } -+ -+ return ELAN3_INVALID_PROCESS; -+} -+ -+int -+elan3_addvp (ELAN3_CTXT *ctxt, int process, ELAN_CAPABILITY *cap) -+{ -+ ELAN3_DEV *dev = ctxt->Device; -+ ELAN_POSITION *pos = &ctxt->Position; -+ ELAN3_VPSEG *seg; -+ int i; -+ int nodeOff; -+ int ctxOff; -+ int nnodes; -+ int nctxs; -+ E3_uint16 flits[MAX_FLITS]; -+ int nflits; -+ int entries; -+ -+ PRINTF2 (ctxt, DBG_VP, "elan3_addvp: %d -> %s\n", process, CapabilityString (cap)); -+ -+ entries = ELAN_CAP_ENTRIES(cap); -+ if (entries <= 0 || (process + entries) > ELAN3_MAX_VPS) -+ return (EINVAL); -+ -+ /* -+ * Scan the virtual process segment list, to add this entry, and ensure that -+ * the ranges don't overlap. -+ */ -+ krwlock_write (&ctxt->VpLock); -+ -+ /* check cap. */ -+ switch (elan3_validate_cap (ctxt->Device, cap, ELAN_USER_P2P)) -+ { -+ case ELAN_CAP_OK: -+ /* nothing */ -+ break; -+ -+ case ELAN_CAP_RMS: -+ if ( elan_validate_map(cap, cap) != ESUCCESS) -+ { -+ krwlock_done (&ctxt->VpLock); -+ return (EINVAL); -+ } -+ break; -+ -+ default: -+ krwlock_done (&ctxt->VpLock); -+ return (EINVAL); -+ } -+ -+ if ((seg = InstallSegment (ctxt, process, entries)) == NULL) -+ { -+ PRINTF0 (ctxt, DBG_VP, "elan3_addvp: failed to find a seg\n"); -+ krwlock_done (&ctxt->VpLock); -+ return (EINVAL); -+ } -+ -+ seg->Type = ELAN3_VPSEG_P2P; -+ seg->SegCapability = *cap; -+ seg->SegCapability.cap_mycontext = ELAN_CAP_UNINITIALISED; -+ -+ PRINTF3 (ctxt, DBG_VP, "elan3_addvp: segment type %x %d %d\n", -+ seg->SegCapability.cap_type, seg->Process, entries); -+ -+ -+ nnodes = cap->cap_highnode - cap->cap_lownode + 1; -+ nctxs = cap->cap_highcontext - cap->cap_lowcontext + 1; -+ -+ /* position not determined, so cannot load any routes, the hwtest -+ * process must explicitly set it's own routes */ -+ -+ if (!(cap->cap_type & ELAN_CAP_TYPE_HWTEST) && (pos->pos_mode != ELAN_POS_UNKNOWN)) -+ { -+ switch (cap->cap_type & ELAN_CAP_TYPE_MASK) -+ { -+ case ELAN_CAP_TYPE_BLOCK: -+ for (nodeOff = 0, i = 0; nodeOff < nnodes && i < entries; nodeOff++) -+ { -+ for (ctxOff = 0; ctxOff < nctxs && i < entries; ctxOff++) -+ { -+ if ((cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (cap->cap_bitmap, ctxOff + (nodeOff * nctxs))) -+ { -+ /* Don't load a route if there's no switch and trying to talk to myself */ -+ if (pos->pos_mode == ELAN_POS_MODE_SWITCHED || -+ (pos->pos_mode == ELAN_POS_MODE_LOOPBACK && cap->cap_lownode + nodeOff == pos->pos_nodeid) || -+ (pos->pos_mode == ELAN_POS_MODE_BACKTOBACK && cap->cap_lownode + nodeOff != pos->pos_nodeid)) -+ { -+ PRINTF3 (ctxt, DBG_VP, "elan3_addvp: virtual process %d -> node %d context %d\n", -+ seg->Process + i, cap->cap_lownode +nodeOff, cap->cap_lowcontext +ctxOff); -+ -+ nflits = GenerateRoute (pos, flits, cap->cap_lownode + nodeOff, cap->cap_lownode + nodeOff, -+ DEFAULT_ROUTE_TIMEOUT, DEFAULT_ROUTE_PRIORITY); -+ -+ -+ -+ LoadRoute (dev, ctxt->RouteTable, seg->Process+i, cap->cap_lowcontext + ctxOff, nflits, flits); -+ } -+ -+ i++; -+ } -+ } -+ } -+ break; -+ -+ case ELAN_CAP_TYPE_CYCLIC: -+ for (ctxOff = 0, i = 0; ctxOff < nctxs && i < entries; ctxOff++) -+ { -+ for (nodeOff = 0; nodeOff < nnodes && i < entries; nodeOff++) -+ { -+ if ((cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (cap->cap_bitmap, nodeOff + (ctxOff * nnodes))) -+ { -+ /* Don't load a route if there's no switch and trying to talk to myself */ -+ if (pos->pos_mode == ELAN_POS_MODE_SWITCHED || -+ (pos->pos_mode == ELAN_POS_MODE_LOOPBACK && cap->cap_lownode + nodeOff == pos->pos_nodeid) || -+ (pos->pos_mode == ELAN_POS_MODE_BACKTOBACK && cap->cap_lownode + nodeOff != pos->pos_nodeid)) -+ { -+ PRINTF3 (ctxt, DBG_VP, "elan3_addvp: virtual process %d -> node %d context %d\n", -+ seg->Process + i, cap->cap_lownode + nodeOff, cap->cap_lowcontext +ctxOff); -+ -+ nflits = GenerateRoute (pos, flits, cap->cap_lownode + nodeOff, cap->cap_lownode + nodeOff, -+ DEFAULT_ROUTE_TIMEOUT, DEFAULT_ROUTE_PRIORITY); -+ -+ -+ LoadRoute (dev, ctxt->RouteTable, seg->Process+i, cap->cap_lowcontext +ctxOff, nflits, flits); -+ } -+ i++; -+ } -+ } -+ } -+ break; -+ default: -+ break; -+ } -+ } -+ -+ krwlock_done (&ctxt->VpLock); -+ -+ return (ESUCCESS); -+} -+ -+int -+elan3_removevp (ELAN3_CTXT *ctxt, int process) -+{ -+ ELAN3_VPSEG *seg; -+ ELAN3_VPSEG *next; -+ int i; -+ -+ krwlock_write (&ctxt->VpLock); -+ -+ PRINTF1 (ctxt, DBG_VP, "elan3_removevp: remove process %d\n", process); -+ -+ if (process == ELAN3_INVALID_PROCESS) -+ seg = ctxt->VpSegs; -+ else -+ seg = FindSegment (ctxt, process, process); -+ -+ if (seg == (ELAN3_VPSEG *) NULL) -+ { -+ krwlock_done (&ctxt->VpLock); -+ return (EINVAL); -+ } -+ -+ do { -+ PRINTF3 (ctxt, DBG_VP, "elan3_removevp: segment is %p [%x,%x]\n", -+ seg, seg->Process, seg->Process+seg->Entries); -+ -+ for (i = 0; i < seg->Entries; i++) -+ ClearRoute (ctxt->Device, ctxt->RouteTable, seg->Process+i); -+ -+ /* get Next pointer value before structure is free'd */ -+ next = seg->Next; -+ RemoveSegment (ctxt, seg); -+ -+ } while (process == ELAN3_INVALID_PROCESS && (seg = next) != NULL); -+ -+ krwlock_done (&ctxt->VpLock); -+ -+ return (ESUCCESS); -+} -+ -+int -+elan3_addbcastvp (ELAN3_CTXT *ctxt, int process, int lowProc, int highProc) -+{ -+ ELAN_POSITION *pos = &ctxt->Position; -+ ELAN3_VPSEG *seg; -+ ELAN3_VPSEG *aseg; -+ int virtualProcess; -+ E3_uint64 routeValue; -+ -+ PRINTF3 (ctxt, DBG_VP, "elan3_addbcastvp: process %d [%d,%d]\n", process, lowProc, highProc); -+ -+ if (lowProc > highProc || pos->pos_mode != ELAN_POS_MODE_SWITCHED) -+ return (EINVAL); -+ -+ krwlock_write (&ctxt->VpLock); -+ -+ if ((aseg = FindSegment (ctxt, lowProc, highProc)) == NULL || (aseg->Type != ELAN3_VPSEG_P2P)) -+ { -+ PRINTF2 (ctxt, DBG_VP, "elan3_addbcastvp: process [%d,%d] does not map to p2p segment\n", lowProc, highProc); -+ krwlock_done (&ctxt->VpLock); -+ return (EINVAL); -+ } -+ -+ /* check aseg->SegCapability */ -+ switch (elan3_validate_cap (ctxt->Device, &aseg->SegCapability, ELAN_USER_BROADCAST)) -+ { -+ case ELAN_CAP_OK: -+ /* nothing */ -+ break; -+ -+ case ELAN_CAP_RMS: -+ if ( elan_validate_map(&ctxt->Capability, &aseg->SegCapability) != ESUCCESS ) -+ { -+ krwlock_done (&ctxt->VpLock); -+ return (EINVAL); -+ } -+ break; -+ -+ default: -+ krwlock_done (&ctxt->VpLock); -+ return (EINVAL); -+ } -+ -+ if ( ProcessToLocation (ctxt, aseg, lowProc, NULL).loc_context != -+ ProcessToLocation (ctxt, aseg, highProc, NULL).loc_context) -+ { -+ PRINTF2 (ctxt, DBG_VP, "elan3_addbcastvp: process [%d,%d] does not map to single context\n", lowProc, highProc); -+ krwlock_done (&ctxt->VpLock); -+ return (EINVAL); -+ } -+ -+ if ((seg = InstallSegment (ctxt, process, 1)) == NULL) -+ { -+ krwlock_done (&ctxt->VpLock); -+ return (EINVAL); -+ } -+ -+ seg->Type = ELAN3_VPSEG_BROADCAST; -+ seg->SegLowProc = lowProc; -+ seg->SegHighProc = highProc; -+ -+ PRINTF4 (ctxt, DBG_VP, "elan3_addbcastvp: installed seg %p Type %d LowProc %d HighProc %d\n", -+ seg, seg->Type, seg->SegLowProc, seg->SegHighProc); -+ -+ for (virtualProcess = lowProc; virtualProcess <= highProc; virtualProcess++) -+ { -+ if (virtualProcess < 0 || virtualProcess >= ctxt->RouteTable->Size) -+ routeValue = 0; -+ else -+ routeValue = elan3_sdram_readq ( ctxt->Device, ctxt->RouteTable->Table + virtualProcess * NBYTES_PER_SMALL_ROUTE); -+ -+ if (! (routeValue & ROUTE_VALID)) -+ { -+ PRINTF2 (ctxt, DBG_VP, "loadvp[%x]: broadcast %x not valid\n", -+ ctxt->Capability.cap_mycontext, virtualProcess); -+ break; -+ } -+ } -+ -+ if (virtualProcess > highProc) /* All vps now present */ -+ { /* so load up broadcast route */ -+ E3_uint16 flits[MAX_FLITS]; -+ ELAN_LOCATION low = ProcessToLocation (ctxt, aseg, lowProc, NULL); -+ ELAN_LOCATION high = ProcessToLocation (ctxt, aseg, highProc, NULL); -+ int nflits = GenerateRoute (pos, flits, low.loc_node, high.loc_node, DEFAULT_ROUTE_TIMEOUT, DEFAULT_ROUTE_PRIORITY); -+ -+ PRINTF6 (ctxt, DBG_VP, "loadvp[%x]: broadcast %d -> %x.%x [%x.%x]\n", ctxt->Capability.cap_mycontext, -+ seg->Process, low.loc_node, high.loc_node, -+ low.loc_context, high.loc_context); -+ -+ LoadRoute ( ctxt->Device, ctxt->RouteTable, seg->Process, low.loc_context, nflits, flits); -+ } -+ -+ krwlock_done (&ctxt->VpLock); -+ -+ return (ESUCCESS); -+} -+ -+int -+elan3_process (ELAN3_CTXT *ctxt) -+{ -+ int res = ELAN3_INVALID_PROCESS; -+ ELAN3_VPSEG *seg; -+ ELAN_LOCATION loc; -+ -+ krwlock_write (&ctxt->VpLock); -+ -+ loc.loc_node = ctxt->Position.pos_nodeid; -+ loc.loc_context = ctxt->Capability.cap_mycontext; -+ -+ for (seg = ctxt->VpSegs ; seg; seg = seg->Next) -+ { -+ if (seg->Type == ELAN3_VPSEG_P2P && -+ seg->SegCapability.cap_lowcontext <= ctxt->Capability.cap_mycontext && -+ seg->SegCapability.cap_highcontext >= ctxt->Capability.cap_mycontext && -+ seg->SegCapability.cap_lownode <= ctxt->Position.pos_nodeid && -+ seg->SegCapability.cap_highnode >= ctxt->Position.pos_nodeid) -+ { -+ if ((res=LocationToProcess (ctxt,seg,loc,&ctxt->Capability)) != ELAN3_INVALID_PROCESS) -+ { -+ krwlock_done (&ctxt->VpLock); -+ return res; -+ } -+ } -+ } -+ -+ krwlock_done (&ctxt->VpLock); -+ -+ return (res); -+} -+ -+int -+elan3_check_route (ELAN3_CTXT *ctxt, int process, E3_uint16 *flits, E3_uint32 *routeError) -+{ -+ PRINTF5 (ctxt, DBG_VP, "elan3_check_route: vp=%d flits=%04x %04x %04x %04x\n", -+ process, flits[0], flits[1], flits[2], flits[3]); -+ PRINTF4 (ctxt, DBG_VP, " %04x %04x %04x %04x\n", -+ flits[4], flits[5], flits[6], flits[7]); -+ -+ krwlock_read (&ctxt->VpLock); -+ *routeError=elan3_route_check(ctxt,flits,ProcessToLocation (ctxt, NULL, process, NULL).loc_node); -+ krwlock_done (&ctxt->VpLock); -+ -+ return (ESUCCESS); /* the call is a success tho the errorcode may be set */ -+} -+ -+int -+elan3_load_route (ELAN3_CTXT *ctxt, int process, E3_uint16 *flits) -+{ -+ ELAN3_VPSEG *seg; -+ int res = 0; -+ int nflits; -+ int err; -+ -+ PRINTF5 (ctxt, DBG_VP, "elan3_load_route: vp=%d flits=%04x %04x %04x %04x\n", -+ process, flits[0], flits[1], flits[2], flits[3]); -+ PRINTF4 (ctxt, DBG_VP, " %04x %04x %04x %04x\n", -+ flits[4], flits[5], flits[6], flits[7]); -+ -+ krwlock_write (&ctxt->VpLock); -+ -+ /* check the route is valid */ -+ if (!(ctxt->Capability.cap_type & ELAN_CAP_TYPE_HWTEST)) -+ { -+ /* must have already attached to define my context number */ -+ if (ctxt->Capability.cap_mycontext == ELAN_CAP_UNINITIALISED) -+ { -+ krwlock_done (&ctxt->VpLock); -+ return (EINVAL); -+ } -+ -+ if ((err=elan3_route_check(ctxt,flits,ProcessToLocation (ctxt, NULL, process, NULL).loc_node)) != ELAN3_ROUTE_SUCCESS) -+ { -+ krwlock_done (&ctxt->VpLock); -+ return (EINVAL); -+ } -+ } -+ -+ if ((seg = FindSegment (ctxt, process, process)) == NULL || seg->Type != ELAN3_VPSEG_P2P) -+ { -+ krwlock_done (&ctxt->VpLock); -+ return (EINVAL); -+ } -+ -+ /* Calculate number of flits in this route */ -+ for (nflits = 0; nflits < MAX_FLITS && flits[nflits]; nflits++) -+ ; -+ -+ res = LoadRoute (ctxt->Device, ctxt->RouteTable, process, ProcessToLocation (ctxt, seg, process, NULL).loc_context, nflits, flits); -+ -+ krwlock_done (&ctxt->VpLock); -+ -+ return (res); -+} -+ -+int -+elan3_get_route (ELAN3_CTXT *ctxt, int process, E3_uint16 *flits) -+{ -+ ELAN3_VPSEG *seg; -+ int res = 0; -+ -+ PRINTF1 (ctxt, DBG_VP, "elan3_get_route: vp=%d \n", process); -+ -+ krwlock_write (&ctxt->VpLock); -+ -+ if (ctxt->RouteTable == NULL) /* is there a route table */ -+ { -+ krwlock_done (&ctxt->VpLock); -+ return (EINVAL); -+ } -+ -+ if ((seg = FindSegment (ctxt, process, process)) != NULL && seg->Type != ELAN3_VPSEG_P2P) -+ { -+ krwlock_done (&ctxt->VpLock); -+ return (EINVAL); -+ } -+ -+ if (seg == NULL) -+ { -+ krwlock_done (&ctxt->VpLock); -+ return (EINVAL); -+ } -+ -+ res = GetRoute (ctxt->Device, ctxt->RouteTable, process, flits); -+ -+ krwlock_done (&ctxt->VpLock); -+ -+ return (res); -+} -+ -+int -+elan3_reset_route (ELAN3_CTXT *ctxt, int process) -+{ -+ E3_uint16 flits[MAX_FLITS]; -+ -+ PRINTF1 (ctxt, DBG_VP, "elan3_reset_route: vp=%d \n", process); -+ -+ GenerateRoute (&ctxt->Position, flits, process, process, DEFAULT_ROUTE_TIMEOUT, DEFAULT_ROUTE_PRIORITY); -+ -+ return elan3_load_route(ctxt,process,flits); -+} -+ -+int -+ResolveVirtualProcess (ELAN3_CTXT *ctxt, int process) -+{ -+ E3_uint16 flits[MAX_FLITS]; -+ ELAN3_DEV *dev = ctxt->Device; -+ int res = ESUCCESS; -+ ELAN3_VPSEG *seg; -+ ELAN3_VPSEG *aseg; -+ E3_uint64 routeValue; -+ -+ krwlock_read (&ctxt->VpLock); -+ -+ PRINTF1 (ctxt, DBG_VP, "ResolveVirtualProcess: vp=%d \n", process); -+ -+ if (ctxt->RouteTable == NULL || process < 0 || process >= ctxt->RouteTable->Size) -+ { -+ krwlock_done (&ctxt->VpLock); -+ return (EINVAL); -+ } -+ -+ if (! (seg = FindSegment (ctxt, process, process))) -+ { -+ PRINTF1 (ctxt, DBG_VP, "ResolveVirtualProcess: cannot find segment for virtual process %d\n", process); -+ krwlock_done (&ctxt->VpLock); -+ return (EINVAL); -+ } -+ -+ /* check cap. */ -+ switch (elan3_validate_cap (ctxt->Device, &seg->SegCapability, ((seg->Type == ELAN3_VPSEG_P2P) ? ELAN_USER_P2P : ELAN_USER_BROADCAST))) -+ { -+ case ELAN_CAP_OK: -+ /* nothing */ -+ break; -+ -+ case ELAN_CAP_RMS: -+ if ( elan_validate_map(&ctxt->Capability, &seg->SegCapability) != ESUCCESS) -+ { -+ krwlock_done (&ctxt->VpLock); -+ return (EINVAL); -+ } -+ break; -+ -+ default: -+ krwlock_done (&ctxt->VpLock); -+ return (EINVAL); -+ } -+ -+ BumpUserStat (ctxt, LoadVirtualProcess); -+ -+ routeValue = elan3_sdram_readq (dev, ctxt->RouteTable->Table + process * NBYTES_PER_SMALL_ROUTE); -+ if (routeValue & ROUTE_VALID) /* Virtual process already */ -+ { /* loaded */ -+ krwlock_done (&ctxt->VpLock); -+ return (ESUCCESS); -+ } -+ -+ switch (seg->Type) -+ { -+ case ELAN3_VPSEG_P2P: -+ switch (seg->SegCapability.cap_type & ELAN_CAP_TYPE_MASK) -+ { -+ case ELAN_CAP_TYPE_BLOCK: -+ case ELAN_CAP_TYPE_CYCLIC: -+ if ((res = elan_validate_map (&ctxt->Capability,&seg->SegCapability)) == ESUCCESS && -+ (res = GetRoute(dev, ctxt->RouteTable ,process, flits)) == ESUCCESS) -+ { -+ if (elan3_route_check(ctxt, flits, ProcessToLocation (ctxt, seg, process, NULL).loc_node)) -+ res = EINVAL; -+ else -+ ValidateRoute(dev, ctxt->RouteTable, process); -+ } -+ break; -+ default: -+ res = EINVAL; -+ break; -+ } -+ break; -+ -+ case ELAN3_VPSEG_BROADCAST: -+ /* Find the segment that this broadcast range spans. */ -+ aseg = FindSegment (ctxt, seg->SegLowProc, seg->SegHighProc); -+ -+ if (aseg == NULL || (aseg->Type != ELAN3_VPSEG_P2P) || !(aseg->SegCapability.cap_type & ELAN_CAP_TYPE_BROADCASTABLE)) -+ { -+ PRINTF2 (ctxt, DBG_VP, "resolveVirtualProcess: %d -> EINVAL (%s)\n", process, -+ (aseg == NULL ? "no segment" : ((seg->Type != ELAN3_VPSEG_P2P) ? "not point to point" : -+ "not broadcastable"))); -+ res = EINVAL; -+ break; -+ } -+ -+ switch (aseg->SegCapability.cap_type & ELAN_CAP_TYPE_MASK) -+ { -+ case ELAN_CAP_TYPE_BLOCK: -+ case ELAN_CAP_TYPE_CYCLIC: -+ { -+ ELAN_LOCATION lowNode = ProcessToLocation (ctxt,aseg,seg->SegLowProc , NULL); -+ ELAN_LOCATION highNode = ProcessToLocation (ctxt,aseg,seg->SegHighProc , NULL); -+ -+ -+ if ((res = elan_validate_map (&ctxt->Capability,&aseg->SegCapability)) == ESUCCESS && -+ (res=GetRoute(dev, ctxt->RouteTable ,process, flits)) == ESUCCESS) -+ { -+ if (elan3_route_broadcast_check(ctxt,flits, lowNode.loc_node , highNode.loc_node ) != ELAN3_ROUTE_SUCCESS ) -+ res = EINVAL; -+ else -+ ValidateRoute(dev, ctxt->RouteTable, process); -+ } -+ break; -+ } -+ -+ default: -+ res = EINVAL; -+ break; -+ } -+ default: -+ res = EINVAL; -+ break; -+ } -+ -+ krwlock_done (&ctxt->VpLock); -+ return (res); -+} -+ -+void -+UnloadVirtualProcess (ELAN3_CTXT *ctxt, ELAN_CAPABILITY *cap) -+{ -+ ELAN3_DEV *dev = ctxt->Device; -+ ELAN3_VPSEG *seg; -+ ELAN_CAPABILITY *scap; -+ int i; -+ -+ for (seg = ctxt->VpSegs; seg; seg = seg->Next) -+ { -+ switch (seg->Type) -+ { -+ case ELAN3_VPSEG_P2P: -+ scap = &seg->SegCapability; -+ -+ if (cap == NULL || ELAN_CAP_MATCH (scap, cap)) -+ { -+ PRINTF2 (ctxt, DBG_VP, "unloadvp: segment [%x.%x]\n", -+ seg->Process, seg->Process + seg->Entries-1); -+ -+ for (i = 0; i < seg->Entries; i++) -+ InvalidateRoute (dev, ctxt->RouteTable, seg->Process+i); -+ } -+ break; -+ -+ case ELAN3_VPSEG_BROADCAST: -+ for (i = 0; i < seg->Entries; i++) -+ { -+ ELAN3_VPSEG *aseg = FindSegment (ctxt, seg->SegLowProc, seg->SegHighProc); -+ -+ if (aseg != NULL && ELAN_CAP_MATCH(&aseg->SegCapability, cap)) -+ { -+ PRINTF1 (ctxt, DBG_VP, "unloadvp: broadcast vp %d\n", seg->Process); -+ -+ InvalidateRoute (dev, ctxt->RouteTable, seg->Process+i); -+ } -+ } -+ } -+ } -+} -+ -+caddr_t -+CapabilityString (ELAN_CAPABILITY *cap) -+{ -+#define CAPSTR_LEN 200 -+#define NCAPSTRS 4 -+ static char space[CAPSTR_LEN*NCAPSTRS]; -+ static int bufnum; -+ static spinlock_t lock; -+ static int lockinitialised; -+ int num; -+ unsigned long flags; -+ -+ if (! lockinitialised) -+ { -+ spin_lock_init (&lock); -+ lockinitialised = 1; -+ } -+ -+ spin_lock_irqsave (&lock, flags); -+ -+ if ((num = ++bufnum) == NCAPSTRS) -+ num = bufnum = 0; -+ spin_unlock_irqrestore (&lock, flags); -+ -+ sprintf (space + (num * CAPSTR_LEN), "%4x %4x %4x %4x %4x %4x %4x [%x.%x.%x.%x]", cap->cap_type, -+ cap->cap_lownode, cap->cap_highnode, -+ cap->cap_lowcontext, cap->cap_mycontext, cap->cap_highcontext, ELAN_CAP_ENTRIES(cap), -+ cap->cap_userkey.key_values[0], cap->cap_userkey.key_values[1], -+ cap->cap_userkey.key_values[2], cap->cap_userkey.key_values[3]); -+ -+ return (space + (num * CAPSTR_LEN)); -+} -+ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/elan4/debug.c linux-2.6.9/drivers/net/qsnet/elan4/debug.c ---- clean/drivers/net/qsnet/elan4/debug.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan4/debug.c 2005-03-23 06:06:15.000000000 -0500 -@@ -0,0 +1,146 @@ -+/* -+ * Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: debug.c,v 1.17 2005/03/23 11:06:15 david Exp $" -+/* $Source: /cvs/master/quadrics/elan4mod/debug.c,v $*/ -+ -+#include -+ -+#include -+#include -+ -+unsigned elan4_debug = 0; -+unsigned elan4_debug_toconsole = 0; -+unsigned elan4_debug_tobuffer = DBG_ALL; -+ -+unsigned elan4_debug_display_ctxt; -+unsigned elan4_debug_ignore_ctxt; -+unsigned elan4_debug_ignore_type; -+ -+void -+elan4_debug_init() -+{ -+ if ((elan4_debug & elan4_debug_tobuffer) != 0) -+ qsnet_debug_alloc(); -+} -+ -+void -+elan4_debug_fini() -+{ -+} -+ -+void -+elan4_debugf (void *type, int mode, char *fmt,...) -+{ -+ char prefix[128]; -+ int where = 0; -+ va_list ap; -+ -+ if ((mode & elan4_debug_tobuffer) != 0 || type == DBG_BUFFER) -+ where |= QSNET_DEBUG_BUFFER; -+ if ((mode & elan4_debug_toconsole) != 0 || type == DBG_CONSOLE) -+ where |= QSNET_DEBUG_CONSOLE; -+ -+ if (where == 0) -+ return; -+ -+ if ((unsigned long) type > DBG_NTYPES) -+ { -+ ELAN4_CTXT *ctxt = (ELAN4_CTXT *) type; -+ -+ if (elan4_debug_display_ctxt && ctxt->ctxt_num != elan4_debug_display_ctxt) -+ return; -+ if (elan4_debug_ignore_ctxt && ctxt->ctxt_num == elan4_debug_ignore_ctxt) -+ return; -+ -+ sprintf (prefix, "[%08ld.%04d] elan4 (%03x) ", lbolt, current->pid, ctxt->ctxt_num); -+ } -+ else if ((unsigned long) type == (int) DBG_CONSOLE) -+ prefix[0] = '\0'; -+ else -+ { -+ char *what; -+ -+ if (elan4_debug_ignore_type & (1 << ((unsigned long) type))) -+ return; -+ -+ switch ((unsigned long) type) -+ { -+ case (int) DBG_DEVICE: what = "dev"; break; -+ case (int) DBG_USER: what = "usr"; break; -+ default: what = NULL; break; -+ } -+ -+ if (what) -+ sprintf (prefix, "[%08ld.%04d] elan4 [%s] ", lbolt, current->pid, what); -+ else -+ sprintf (prefix, "[%08ld.%04d] elan4 [%3d] ", lbolt, current->pid, (int)(long)type); -+ } -+ -+ va_start(ap,fmt); -+ qsnet_vdebugf (where, prefix, fmt, ap); -+ va_end (ap); -+} -+ -+int -+elan4_assfail (ELAN4_CTXT *ctxt, const char *ex, const char *func, const char *file, const int line) -+{ -+ qsnet_debugf (QSNET_DEBUG_BUFFER, "elan%d: assertion failure: %s, function: %s, file %s, line: %d\n", -+ ctxt->ctxt_dev->dev_instance, ex, func, file, line); -+ -+ printk (KERN_EMERG "elan%d: assertion failure: %s, function: %s, file %s, line: %d\n", -+ ctxt->ctxt_dev->dev_instance, ex, func, file, line); -+ -+ if (panicstr) -+ return 0; -+ -+ if (assfail_mode & 1) /* return to BUG() */ -+ return 1; -+ -+ if (assfail_mode & 2) -+ panic ("elan%d: assertion failure: %s, function: %s, file %s, line: %d\n", -+ ctxt->ctxt_dev->dev_instance, ex, func, file, line); -+ -+ if (assfail_mode & 4) -+ elan4_debug = 0; -+ -+ return 0; -+ -+} -+ -+int -+elan4_debug_trigger (ELAN4_CTXT *ctxt, const char *func, const char *file, const int line, const char *fmt, ...) -+{ -+ va_list ap; -+ -+ va_start (ap, fmt); -+ qsnet_vdebugf (QSNET_DEBUG_CONSOLE|QSNET_DEBUG_BUFFER, "", fmt, ap); -+ va_end (ap); -+ -+ printk (KERN_EMERG "elan%d: debug trigger: function: %s, file %s, line: %d\n", ctxt->ctxt_dev->dev_instance, func, file, line); -+ -+ if (panicstr) -+ return 0; -+ -+ if (assfail_mode & 1) /* return to BUG() */ -+ return 1; -+ -+ if (assfail_mode & 2) -+ panic ("elan%d: debug trigger: function: %s, file %s, line: %d\n", ctxt->ctxt_dev->dev_instance, func, file, line); -+ -+ if (assfail_mode & 4) -+ elan4_debug = 0; -+ -+ return 0; -+} -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/elan4/device.c linux-2.6.9/drivers/net/qsnet/elan4/device.c ---- clean/drivers/net/qsnet/elan4/device.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan4/device.c 2005-08-09 05:57:04.000000000 -0400 -@@ -0,0 +1,3127 @@ -+/* -+ * Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: device.c,v 1.106.2.5 2005/08/09 09:57:04 mike Exp $" -+/* $Source: /cvs/master/quadrics/elan4mod/device.c,v $*/ -+ -+#include -+#include -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+ -+/* allow this code to compile against an Eagle elanmod */ -+#ifdef __ELANMOD_DEVICE_H -+#define ELAN_DEV_OPS ELANMOD_DEV_OPS -+#define ELAN_DEV_OPS_VERSION ELANMOD_DEV_OPS_VERSION -+#define elan_dev_register elanmod_dev_register -+#define elan_dev_deregister elanmod_dev_deregister -+#endif -+ -+/* XXXX configurational defines */ -+ -+#if defined (CONFIG_MPSAS) -+#define HASH_0_SIZE_VAL (12 + 6) -+#define HASH_1_SIZE_VAL (2 + 6) -+#define CTXT_TABLE_SHIFT 8 -+#define LN2_MAX_CQS 8 /* 256 */ -+#else -+#define HASH_0_SIZE_VAL (13 + 6) -+#define HASH_1_SIZE_VAL (2 + 6) -+#define CTXT_TABLE_SHIFT 12 -+#define LN2_MAX_CQS 10 /* 1024 */ -+#endif -+ -+unsigned int elan4_hash_0_size_val = HASH_0_SIZE_VAL; -+unsigned int elan4_hash_1_size_val = HASH_1_SIZE_VAL; -+unsigned int elan4_ctxt_table_shift = CTXT_TABLE_SHIFT; -+unsigned int elan4_ln2_max_cqs = LN2_MAX_CQS; -+unsigned int elan4_dmaq_highpri_size = 2; /* 8192 entries */ -+unsigned int elan4_threadq_highpri_size = 1; /* 1024 entries */ -+unsigned int elan4_dmaq_lowpri_size = 2; /* 8192 entries */ -+unsigned int elan4_threadq_lowpri_size = 1; /* 1024 entries */ -+unsigned int elan4_interruptq_size = 0; /* 1024 entries */ -+unsigned int elan4_mainint_punt_loops = 1; -+unsigned int elan4_mainint_resched_ticks = 0; -+unsigned int elan4_linkport_lock = 0xbe0fcafe; /* default link port lock */ -+unsigned int elan4_eccerr_recheck = 1; -+ -+static int -+elan4_op_get_position (void *arg, ELAN_POSITION *ptr) -+{ -+ ELAN4_DEV *dev = (ELAN4_DEV *)arg; -+ ELAN_POSITION pos; -+ -+ elan4_get_position (dev, &pos); -+ -+ return copyout (&pos, ptr, sizeof (ELAN_POSITION)); -+} -+ -+static int -+elan4_op_set_position (void *arg, unsigned short nodeid, unsigned short numnodes) -+{ -+ /* XXXXX -+ -+ ELAN4_DEV *dev = (ELAN4_DEV *) arg; -+ -+ compute_position (&pos, nodeid, numnode, num_down_links_value); -+ -+ return elan4_set_position (dev, pos); -+ */ -+ return EINVAL; -+} -+ -+ELAN_DEV_OPS elan4_dev_ops = -+{ -+ elan4_op_get_position, -+ elan4_op_set_position, -+ -+ ELAN_DEV_OPS_VERSION -+}; -+ -+static E4_uint32 -+elan4_read_filter (ELAN4_DEV *dev, unsigned networkctx) -+{ -+ return (elan4_sdram_readl (dev, dev->dev_ctxtable + (networkctx * sizeof (E4_ContextControlBlock)) + -+ offsetof (E4_ContextControlBlock, Filter))); -+} -+ -+static void -+elan4_write_filter (ELAN4_DEV *dev, unsigned networkctx, E4_uint32 value) -+{ -+ elan4_sdram_writel (dev, (dev->dev_ctxtable + (networkctx * sizeof (E4_ContextControlBlock)) + -+ offsetof (E4_ContextControlBlock, Filter)), value); -+ pioflush_sdram(dev); -+} -+ -+void -+elan4_set_schedstatus (ELAN4_DEV *dev, E4_uint32 intreg) -+{ -+ E4_uint32 setbits = 0; -+ E4_uint32 intmask = 0; -+ E4_uint32 haltmask; -+ E4_uint32 next_sched; -+ E4_uint32 next_intmask; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&dev->dev_intmask_lock, flags); -+ -+ haltmask = (dev->dev_haltop_mask | dev->dev_haltop_active); -+ -+ if ((haltmask & INT_DProcHalted) || dev->dev_halt_all_count || dev->dev_halt_dproc_count) -+ setbits |= SCH_DProcHalt; -+ -+ if ((haltmask & INT_TProcHalted) || dev->dev_halt_all_count || dev->dev_halt_tproc_count) -+ setbits |= SCH_TProcHalt; -+ -+ if ((haltmask & INT_CProcHalted) || dev->dev_halt_all_count || dev->dev_halt_cproc_count) -+ setbits |= SCH_CProcHalt; -+ -+ if ((haltmask & INT_DiscardingLowPri) || dev->dev_discard_all_count || dev->dev_discard_lowpri_count) -+ setbits |= SCH_DiscardLowPriInput; -+ -+ if ((haltmask & INT_DiscardingHighPri) || dev->dev_discard_all_count || dev->dev_discard_highpri_count) -+ setbits |= SCH_DiscardHighPriInput; -+ -+ if (dev->dev_halt_lowpri_count) -+ setbits |= SCH_StopLowPriQueues; -+ -+ if (haltmask & INT_DProcHalted) intmask |= INT_DProcHalted; -+ if (haltmask & INT_TProcHalted) intmask |= INT_TProcHalted; -+ if (haltmask & INT_CProcHalted) intmask |= INT_CProcHalted; -+ if (haltmask & INT_DiscardingLowPri) intmask |= INT_DiscardingLowPri; -+ if (haltmask & INT_DiscardingHighPri) intmask |= INT_DiscardingHighPri; -+ -+ next_intmask = (dev->dev_intmask & ~(INT_Halted | INT_Discarding)) | (intmask & ~intreg); -+ next_sched = (dev->dev_schedstatus & ~(SCH_Halt | SCH_Discard)) | setbits; -+ -+ PRINTF5 (DBG_DEVICE, DBG_REGISTER, "elan4_set_schedstatus: haltmask=%x setbits=%x intmask=%x next_sched=%x next_intmask=%x\n", -+ haltmask, setbits, intmask, next_sched, next_intmask); -+ -+ CHANGE_INT_MASK (dev, next_intmask); -+ CHANGE_SCHED_STATUS (dev, next_sched); -+ -+ spin_unlock_irqrestore (&dev->dev_intmask_lock, flags); -+} -+ -+int -+elan4_route2str (E4_VirtualProcessEntry *route, char *routeStr) -+{ -+ int part = 0; -+ int shift; -+ int broadcast; -+ E4_uint64 value; -+ char *ptr = routeStr; -+ int b; -+ -+ /* unpack first */ -+ value = route->Values[part] & 0x7f; -+ if ( (value & 0x78) == 0) { -+ /* empty route */ -+ strcpy(routeStr,"Invalid lead route"); -+ return (-EINVAL); -+ } -+ -+ if ( value & 0x40 ) { -+ /* broad cast */ -+ strcpy(routeStr,"Broadcast"); -+ return (-EINVAL); -+ } else { -+ switch ((value & 0x30) >> 4) { -+ case 0: { *ptr++ = '0' + (value & 0x7); break; } -+ case 1: { *ptr++ = 'M'; break; } -+ case 2: { *ptr++ = 'U'; break; } -+ case 3: { *ptr++ = 'A'; break; } -+ } -+ } -+ -+ shift = 16; -+ broadcast = 0; -+ while ( 1 ) { -+ b = (route->Values[part] >> shift) & 0xf; -+ -+ if ( broadcast ) { -+ /* about to pick up the second byte of a broadcast pair */ -+ broadcast = 0; -+ } else { -+ if ( b & 0x8) { -+ /* output link */ -+ *ptr++ = '0' + (b & 0x7); -+ } else { -+ if ( b & 0x4) { -+ /* broad cast */ -+ broadcast = 1; -+ } else { -+ switch ( b & 0x3 ) { -+ case 0: { *ptr++ = 0 ; return (0); break; } -+ case 1: { *ptr++ = 'M'; break; } -+ case 2: { *ptr++ = 'U'; break; } -+ case 3: { *ptr++ = 'A'; break; } -+ } -+ } -+ } -+ } -+ -+ shift += 4; -+ if ( part != 0 ) { -+ if ( shift > 36) { -+ /* too far, now in the crc value */ -+ strcpy(routeStr,"Invalid route length"); -+ return (-EINVAL); -+ } -+ } else { -+ if ( shift >= 64) { -+ /* move to the next 64 bits */ -+ part = 1; -+ shift = 2; -+ } -+ } -+ } -+ -+ /* never reached */ -+ return (-EINVAL); -+} -+ -+static int elan4_hardware_lock_count = 0; -+ -+void -+elan4_hardware_lock_check(ELAN4_DEV *dev, char *from) -+{ -+ -+ int reg = read_reg32 (dev, CommandSchedDataPort[2]); -+ -+ /* dont spam too much */ -+ if ( elan4_hardware_lock_count++ > 10) return; -+ -+ printk ("elan%d: %s timed out intmask=0x%x InterruptReg=0x%x (%d)\n", dev->dev_instance, from, dev->dev_intmask, read_reg32 (dev, InterruptReg), elan4_hardware_lock_count); -+ -+ /* an 0xF in either and we need to output more */ -+ if ((reg & 0xf0) || ( reg & 0x0f)) { -+ ELAN4_ROUTE_RINGBUF *ringbuf; -+ char routestr[33]; -+ -+ printk ("elan%d: CommandSchedDataPort[0] 0x%016x 0x%016x 0x%016x 0x%016x\n", -+ dev->dev_instance, -+ read_reg32 (dev, CommandSchedDataPort[0]), -+ read_reg32 (dev, CommandSchedDataPort[1]), -+ reg, -+ read_reg32 (dev, CommandSchedDataPort[3]) -+ ); -+ /* dump out /proc/qsnet/elan4/deviceN/stats/cproctimeoutroutes */ -+ printk ("elan%d: cat of /proc/qsnet/elan4/device%d/stats/cproctimeoutroutes\n", dev->dev_instance, dev->dev_instance); -+ -+ ringbuf = &dev->dev_cproc_timeout_routes; -+ -+ if (!ringbuf) -+ printk ("elan%d: No stats available\n", dev->dev_instance); -+ else -+ { -+ int start; -+ int end; -+ int i; -+ -+ memset(&routestr, 0, 33); -+ -+ start = ringbuf->start; -+ end = ringbuf->end; -+ -+ if (end < start) -+ end = DEV_STASH_ROUTE_COUNT; -+ -+ for (i=start; iroutes[i], routestr); -+ printk ( "elan%d: Route %llx %llx->%s\n", dev->dev_instance, (long long)ringbuf->routes[i].Values[0], (long long)ringbuf->routes[i].Values[1], routestr); -+ } -+ -+ if (ringbuf->end < start) -+ { -+ start = 0; -+ end = ringbuf->end; -+ for (i=start; iroutes[i], routestr); -+ printk ( "elan%d: Route %llx %llx->%s\n", dev->dev_instance, (long long)ringbuf->routes[i].Values[0], (long long)ringbuf->routes[i].Values[1], routestr); -+ } -+ } -+ -+ } -+ } -+} -+static void -+dev_haltop_timer_func (unsigned long arg) -+{ -+ ELAN4_DEV *dev = (ELAN4_DEV *) arg; -+ -+ elan4_hardware_lock_check(dev,"haltop"); -+} -+ -+void -+elan4_queue_haltop (ELAN4_DEV *dev, ELAN4_HALTOP *op) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave (&dev->dev_haltop_lock, flags); -+ -+ /* add to the end of the halt operations list */ -+ list_add_tail (&op->op_link, &dev->dev_haltop_list); -+ -+ if ((dev->dev_haltop_mask & op->op_mask) != op->op_mask) -+ { -+ dev->dev_haltop_mask |= op->op_mask; -+ -+ elan4_set_schedstatus (dev, 0); -+ } -+ -+ mod_timer (&dev->dev_haltop_timer, (jiffies + (HZ*10))); /* 10 seconds */ -+ -+ spin_unlock_irqrestore (&dev->dev_haltop_lock, flags); -+} -+ -+void -+elan4_queue_intop (ELAN4_DEV *dev, ELAN4_CQ *cq, ELAN4_INTOP *op) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave (&dev->dev_intop_lock, flags); -+ -+ op->op_cookie = INTOP_ONESHOT | ((dev->dev_intop_cookie++) & INTOP_VALUE_MASK); -+ -+ list_add_tail (&op->op_link, &dev->dev_intop_list); -+ -+ writeq ((op->op_cookie << E4_MAIN_INT_SHIFT) | INTERRUPT_CMD, (void *)(cq->cq_mapping)); -+ -+ spin_unlock_irqrestore (&dev->dev_intop_lock, flags); -+} -+ -+void -+elan4_register_intop (ELAN4_DEV *dev, ELAN4_INTOP *op) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave (&dev->dev_intop_lock, flags); -+ -+ op->op_cookie = INTOP_PERSISTENT | ((dev->dev_intop_cookie++) & INTOP_VALUE_MASK); -+ -+ list_add_tail (&op->op_link, &dev->dev_intop_list); -+ -+ spin_unlock_irqrestore (&dev->dev_intop_lock, flags); -+} -+ -+void -+elan4_deregister_intop (ELAN4_DEV *dev, ELAN4_INTOP *op) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave (&dev->dev_intop_lock, flags); -+ list_del (&op->op_link); -+ spin_unlock_irqrestore (&dev->dev_intop_lock, flags); -+} -+ -+static __inline__ void -+__issue_dma_flushop_cmd (ELAN4_DEV *dev, ELAN4_CQ *cq) -+{ -+ E4_uint64 eventaddr = dev->dev_tproc_space + 64; -+ -+ writeq (WAIT_EVENT_CMD | eventaddr, (void *)(cq->cq_mapping)); -+ writeq (0, (void *)(cq->cq_mapping)); -+ writeq (0, (void *)(cq->cq_mapping)); -+ writeq (0, (void *)(cq->cq_mapping)); -+ -+ writeq (DMA_ShMemWrite | RUN_DMA_CMD, (void *)(cq->cq_mapping)); -+ writeq (0 /* cookie */, (void *)(cq->cq_mapping)); -+ writeq (0 /* vproc */, (void *)(cq->cq_mapping)); -+ writeq (0 /* srcAddr */, (void *)(cq->cq_mapping)); -+ writeq (0 /* dstAddr */, (void *)(cq->cq_mapping)); -+ writeq (0 /* srcEvent */, (void *)(cq->cq_mapping)); -+ writeq (0 /* dstEvent */, (void *)(cq->cq_mapping)); -+ writeq (SET_EVENT_CMD, (void *)(cq->cq_mapping)); -+} -+ -+static void -+handle_dma_flushops_intop (ELAN4_DEV *dev, void *arg) -+{ -+ unsigned int hipri = ((unsigned long) arg & 1); -+ E4_uint64 status = dev->dev_dma_flushop[hipri].status; -+ ELAN4_CQ *cq = dev->dev_dma_flushop[hipri].cq; -+ sdramaddr_t cqdesc = dev->dev_cqaddr + (elan4_cq2num(cq) * sizeof (E4_CommandQueueDesc)); -+ E4_uint64 queuePtrs = elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_QueuePtrs)); -+ E4_uint32 completedPtr = CQ_CompletedPtr(queuePtrs); -+ E4_uint32 size = CQ_Size ((queuePtrs >> CQ_SizeShift) & CQ_SizeMask); -+ unsigned long flags; -+ -+ /* -+ * Since we're called from a main interrupt which was issued through the approriate -+ * flushcq the command queue descriptor for dma flushing can no longer be in the -+ * insert cache, nor can it be in the extractor (as it's trapped), hence it is -+ * safe to modify the completed pointer -+ */ -+ -+ spin_lock_irqsave (&dev->dev_haltop_lock, flags); -+ -+ ASSERT (status != 0); -+ -+ /* skip over either the DMA/SETEVENT or just the SETEVENT depending on the trap type */ -+ if (CPROC_TrapType (status) == CommandProcDmaQueueOverflow) -+ completedPtr = (completedPtr & ~(size-1)) | ((completedPtr + 64) & (size - 1)); -+ else -+ completedPtr = (completedPtr & ~(size-1)) | ((completedPtr + 8) & (size - 1)); -+ -+ elan4_sdram_writel (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_QueuePtrs) + 4, -+ ((queuePtrs >> 32) & ~CQ_PtrOffsetMask) | (completedPtr & CQ_PtrOffsetMask)); -+ -+ elan4_restartcq (dev, dev->dev_dma_flushop[hipri].cq); -+ -+ if (! list_empty (&dev->dev_dma_flushop[hipri].list)) -+ __issue_dma_flushop_cmd (dev, dev->dev_dma_flushop[hipri].cq); -+ -+ dev->dev_dma_flushop[hipri].status = 0; -+ -+ spin_unlock_irqrestore (&dev->dev_haltop_lock, flags); -+ -+} -+ -+static void -+handle_dma_flushops (ELAN4_DEV *dev, E4_uint64 status, int cqnum) -+{ -+ unsigned int hipri = (cqnum == elan4_cq2num(dev->dev_dma_flushop[1].cq) ? 1 : 0); -+ ELAN4_CQ *cq = dev->dev_dma_flushop[hipri].cq; -+ ELAN4_CQ *flushq = dev->dev_flush_cq[elan4_cq2num(cq) & (COMMAND_INSERTER_CACHE_ENTRIES-1)]; -+ struct list_head *ops; -+ unsigned long flags; -+ int qfull,count; -+ E4_uint64 queuePtrs; -+ LIST_HEAD(list); -+ -+ spin_lock_irqsave (&dev->dev_haltop_lock, flags); -+ -+ ASSERT (cqnum == elan4_cq2num (dev->dev_dma_flushop[hipri].cq)); -+ ASSERT (! list_empty (&dev->dev_dma_flushop[hipri].list)); -+ ASSERT (dev->dev_dma_flushop[hipri].status == 0); -+ -+ /* remove the whole list */ -+ ops = dev->dev_dma_flushop[hipri].list.next; -+ -+ list_del_init (&dev->dev_dma_flushop[hipri].list); -+ -+ /* and add it to our local list */ -+ list_add_tail (&list, ops); -+ -+ /* now determine whether the queue was full - since it cannot be empty -+ * then if the front and back pointers are the same then it is full */ -+ queuePtrs = hipri ? read_reg64 (dev, DProcHighPriPtrs) : read_reg64 (dev, DProcLowPriPtrs); -+ qfull = (E4_QueueFrontPointer (queuePtrs) == E4_QueueBackPointer (queuePtrs)); -+ -+ if (CPROC_TrapType(status) == CommandProcDmaQueueOverflow && !qfull) -+ printk (" ******* queue overflow trap - but queue not full\n"); -+ -+ if (qfull && CPROC_TrapType(status) != CommandProcDmaQueueOverflow) -+ printk (" ****** queue full - but not overflow trap : %llx %llx %x\n", -+ read_reg64 (dev, DProcLowPriPtrs), read_reg64 (dev, DProcHighPriPtrs), CPROC_TrapType(status)); -+ -+ /* Store the status register, this also indicates that the intop is pending */ -+ dev->dev_dma_flushop[hipri].status = status; -+ -+ spin_unlock_irqrestore (&dev->dev_haltop_lock, flags); -+ -+ /* Issue a main interrupt command to the approriate flush command queue, -+ * which will then safely update the completed pointer to skip over the -+ * command which has trapped, also prevent any new commands to be issued -+ * to the command queue. -+ */ -+ dev->dev_dma_flushop[hipri].intop.op_function = handle_dma_flushops_intop; -+ dev->dev_dma_flushop[hipri].intop.op_arg = (void *) (unsigned long) hipri; -+ -+ elan4_queue_intop (dev, flushq, &dev->dev_dma_flushop[hipri].intop); -+ -+ /* now execute all operations */ -+ for (count = 0; ! list_empty (&list); count++) -+ { -+ ELAN4_DMA_FLUSHOP *op = list_entry (list.next, ELAN4_DMA_FLUSHOP, op_link); -+ -+ list_del (&op->op_link); -+ -+ (*op->op_function) (dev, op->op_arg, qfull); -+ } -+ -+ /* finally release the "reasons" for halting */ -+ spin_lock_irqsave (&dev->dev_haltop_lock, flags); -+ if ((dev->dev_halt_dproc_count -= count) == 0) -+ elan4_set_schedstatus (dev, 0); -+ spin_unlock_irqrestore (&dev->dev_haltop_lock, flags); -+ -+ return; -+} -+ -+void -+elan4_queue_dma_flushop (ELAN4_DEV *dev, ELAN4_DMA_FLUSHOP *op, int hipri) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave (&dev->dev_haltop_lock, flags); -+ -+ if (dev->dev_halt_dproc_count++ == 0) /* ensure that the DMA processor cannot */ -+ elan4_set_schedstatus (dev, 0); /* execute the DMA we issue. */ -+ -+ if (list_empty (&dev->dev_dma_flushop[hipri].list) && dev->dev_dma_flushop[hipri].status == 0) -+ __issue_dma_flushop_cmd (dev, dev->dev_dma_flushop[hipri].cq); -+ -+ list_add_tail (&op->op_link, &dev->dev_dma_flushop[hipri].list); -+ -+ spin_unlock_irqrestore (&dev->dev_haltop_lock, flags); -+} -+ -+static void -+enable_elan_errors (void *arg) -+{ -+ ELAN4_DEV *dev = (ELAN4_DEV *) arg; -+ -+ ENABLE_INT_MASK (dev, INT_ErrorInterrupts); -+} -+ -+#define ERROR_DISABLE_PERIOD (hz/2) -+#define ERROR_SAMPLE_PERIOD (hz/10) -+#define ERROR_LIMIT (100) -+ -+static __inline__ void -+check_error_rate (ELAN4_DEV *dev) -+{ -+ if (dev->dev_error_time == (lbolt/ERROR_SAMPLE_PERIOD)) -+ { -+ if (++dev->dev_errors_per_period >= ERROR_LIMIT && (dev->dev_intmask & INT_ErrorInterrupts)) -+ { -+ DISABLE_INT_MASK (dev, INT_ErrorInterrupts); -+ -+ schedule_timer_fn (&dev->dev_error_timeoutid, enable_elan_errors, (void *) dev, ERROR_DISABLE_PERIOD); -+ } -+ } -+ else -+ { -+ dev->dev_error_time = (lbolt/ERROR_SAMPLE_PERIOD); -+ dev->dev_errors_per_period = 0; -+ } -+} -+ -+static __inline__ int -+handle_mainints (ELAN4_DEV *dev, int nticks, int nintr) -+{ -+ E4_uint32 nfptr = dev->dev_interruptq_nfptr; -+ E4_uint32 bptr = read_reg32 (dev, MainIntQueuePtrs.s.Back); -+ E4_uint32 qsize = E4_QueueSize(elan4_interruptq_size); -+ E4_uint32 qmask = qsize - 1; -+ long tlim = lbolt + nticks; -+ int done = 0; -+ unsigned long flags; -+ -+ do { -+ int todo = ((bptr - nfptr) & qmask) / E4_MainIntEntrySize; -+ -+ ASSERT (todo > 0); -+ -+ PRINTF4 (DBG_DEVICE, DBG_MAININT, "handle_mainints: fptr %x nfptr %x bptr %x : %d todo\n", -+ read_reg32 (dev, MainIntQueuePtrs.s.Front), nfptr, bptr, todo); -+ -+ if (nintr >= 0 && (done + todo) > nintr) /* punt because too may to do in interrupt */ -+ { -+ PRINTF4 (DBG_DEVICE, DBG_MAININT, "handle_mainints: punting (done %d todo %d) (bptr %x fptr %x)\n", -+ done, todo, bptr, read_reg32 (dev, MainIntQueuePtrs.s.Front)); -+ -+ return 1; -+ } -+ -+ BucketDevStat (dev, s_mainints, todo, MainIntBuckets); -+ -+ /* consume all the entries in the queue which we think are there */ -+ do { -+ E4_uint64 value = elan4_sdram_readq (dev, nfptr); -+ ELAN4_CTXT *ctxt = elan4_localctxt (dev, E4_MAIN_INT_CTX (value)); -+ E4_uint32 fptr = nfptr; -+ -+ PRINTF2 (DBG_DEVICE, DBG_MAININT, "handle_mainints: process cookie %llx - write fptr=%x\n", value, nfptr); -+ -+ if (ctxt == NULL) -+ ELAN4_DEBUG_TRIGGER (&dev->dev_ctxt, "elan4:handle_mainints: context %d invalid\n", E4_MAIN_INT_CTX (value)); -+ else -+ ctxt->ctxt_ops->op_interrupt (ctxt, E4_MAIN_INT_COOKIE(value)); -+ -+ /* compute the next queue front pointer, before updating the front pointer -+ * since we need to ensure that elan4_queue_mainintop doesn't see the queue -+ * as being empty if an extra interrupt is queued in between */ -+ dev->dev_interruptq_nfptr = nfptr = (nfptr & ~qmask) | ((nfptr + sizeof (E4_uint64)) & qmask); -+ -+ /* update the queue front pointer, doing this will clear the -+ * interrupt for *all* interrupt cookies which have previously -+ * been added to the queue */ -+ write_reg32 (dev, MainIntQueuePtrs.s.Front, E4_QueueFrontValue (fptr, elan4_interruptq_size)); -+ pioflush_reg (dev); -+ } while (bptr != nfptr); -+ -+ /* re-sample the back pointer and if it's different from the previous -+ * queue front pointer, then the queue has something on it again */ -+ done += todo; -+ -+ if ((nticks > 0 && ((int) (lbolt - tlim)) > 0)) /* been executing for too long in thread */ -+ return 1; -+ -+ bptr = read_reg32 (dev, MainIntQueuePtrs.s.Back); -+ -+ PRINTF3 (DBG_DEVICE, DBG_MAININT, "handle_mainints: resample : fptr %x nfptr %x bptr %x\n", -+ read_reg32 (dev, MainIntQueuePtrs.s.Front), nfptr, bptr); -+ -+ /* at this point we've made some space in the interrupt queue, -+ * so check to see if we've got anything to restart */ -+ spin_lock_irqsave (&dev->dev_mainint_lock, flags); -+ while (! list_empty (&dev->dev_interruptq_list)) -+ { -+ ELAN4_INTOP *op = list_entry (dev->dev_interruptq_list.next, ELAN4_INTOP, op_link); -+ -+ list_del (&op->op_link); -+ -+ op->op_function (dev, op->op_arg); -+ } -+ spin_unlock_irqrestore (&dev->dev_mainint_lock, flags); -+ -+ } while (bptr != nfptr); -+ -+ return 0; -+} -+ -+static void -+elan4_mainint_thread (ELAN4_DEV *dev) -+{ -+ unsigned long flags; -+ -+ kernel_thread_init ("elan4_mainint"); -+ -+ spin_lock_irqsave (&dev->dev_mainint_lock, flags); -+ for (;;) -+ { -+ if (dev->dev_stop_threads) -+ break; -+ -+ if (! (dev->dev_intmask & INT_MainInterrupt)) -+ { -+ spin_unlock_irqrestore (&dev->dev_mainint_lock, flags); -+ -+ if (handle_mainints (dev, elan4_mainint_resched_ticks, -1)) -+ BumpDevStat (dev, s_mainint_rescheds); -+ -+ spin_lock_irqsave (&dev->dev_mainint_lock, flags); -+ ENABLE_INT_MASK (dev, INT_MainInterrupt); -+ } -+ -+ kcondvar_wait (&dev->dev_mainint_wait, &dev->dev_mainint_lock, &flags); -+ } -+ -+ dev->dev_mainint_stopped = 1; -+ kcondvar_wakeupall (&dev->dev_mainint_wait, &dev->dev_mainint_lock); -+ -+ spin_unlock_irqrestore (&dev->dev_mainint_lock, flags); -+ -+ kernel_thread_exit(); -+} -+ -+void -+elan4_queue_mainintop (ELAN4_DEV *dev, ELAN4_INTOP *op) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave (&dev->dev_mainint_lock, flags); -+ if (dev->dev_interruptq_nfptr == read_reg32 (dev, MainIntQueuePtrs.s.Back)) -+ op->op_function (dev, op->op_arg); -+ else -+ list_add_tail (&op->op_link, &dev->dev_interruptq_list); -+ spin_unlock_irqrestore (&dev->dev_mainint_lock, flags); -+} -+ -+static __inline__ E4_uint32 -+handle_cproc_trap (ELAN4_DEV *dev) -+{ -+ E4_uint32 cqptr = read_reg32 (dev, CommandControl.CommandQueueDescsBase) & E4_QueueDescPtrMask; -+ unsigned cqnum = ((cqptr - dev->dev_cqaddr) / sizeof (E4_CommandQueueDesc)); -+ sdramaddr_t cqdesc = dev->dev_cqaddr + (cqnum * sizeof (E4_CommandQueueDesc)); -+ E4_uint64 control = elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_Control)); -+ E4_uint64 status = read_reg64 (dev, CProcStatus); -+ ELAN4_CTXT *ctxt = elan4_localctxt (dev, CQ_Context (control)); -+ -+ PRINTF4 (DBG_DEVICE, DBG_INTR, "handle_cproc_trap: cqnum=%d status=%016llx control=%016llx TrapType\n", -+ cqnum, status, control, CPROC_TrapType (status)); -+ PRINTF4 (DBG_DEVICE, DBG_INTR, " %016llx %016llx %016llx %016llx\n", -+ elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_QueuePtrs)), -+ elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_HoldingValue)), -+ elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_AckBuffers)), -+ elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_Control))); -+ -+ BumpDevStat (dev, s_cproc_traps); -+ -+ if (ctxt == NULL) -+ ELAN4_DEBUG_TRIGGER (&dev->dev_ctxt, "elan4:handle_cproc_trap: context %d is invalid\n", CQ_Context (control)); -+ else -+ ctxt->ctxt_ops->op_cproc_trap (ctxt, status, cqnum); -+ -+ return (CPROC_TrapType (status) == CommandProcWaitTrap ? SCH_RestartCProc | SCH_RestartEProc : SCH_RestartCProc); -+} -+ -+static __inline__ E4_uint32 -+handle_dproc_trap (ELAN4_DEV *dev, int unit) -+{ -+ E4_uint64 status = (unit == 0) ? read_reg64 (dev, DProc0Status) : read_reg64 (dev, DProc1Status); -+ E4_uint32 restart = (unit == 0) ? SCH_RestartDma0Proc : SCH_RestartDma1Proc; -+ ELAN4_CTXT *ctxt = elan4_localctxt (dev, DPROC_Context (status)); -+ -+ PRINTF3 (DBG_DEVICE, DBG_INTR, "handle_dproc_trap: unit %d context %d%s\n", unit, DPROC_Context(status), -+ DPROC_PrefetcherFault(status) ? " (prefetcher)" : ""); -+ -+ if (DPROC_PrefetcherFault (status)) -+ restart |= SCH_RestartDmaPrefetchProc; -+ -+ BumpDevStat (dev, s_dproc_traps); -+ -+ if (ctxt == NULL) -+ ELAN4_DEBUG_TRIGGER (&dev->dev_ctxt, "elan4:handle_dproc_trap: context %d is invalid\n", DPROC_Context (status)); -+ else -+ ctxt->ctxt_ops->op_dproc_trap (ctxt, status, unit); -+ -+ return (restart); -+} -+ -+static __inline__ E4_uint32 -+handle_eproc_trap (ELAN4_DEV *dev) -+{ -+ E4_uint64 status = read_reg64 (dev, EProcStatus); -+ ELAN4_CTXT *ctxt = elan4_localctxt (dev, EPROC_Context (status)); -+ -+ BumpDevStat (dev, s_eproc_traps); -+ -+ if (ctxt == NULL) -+ ELAN4_DEBUG_TRIGGER (&dev->dev_ctxt, "elan4:handle_eproc_trap: context %d is invalid\n", EPROC_Context (status)); -+ else -+ ctxt->ctxt_ops->op_eproc_trap (ctxt, status); -+ -+ return (SCH_RestartEProc); -+} -+ -+static __inline__ E4_uint32 -+handle_tproc_trap (ELAN4_DEV *dev) -+{ -+ E4_uint64 status = read_reg64 (dev, TProcStatus); -+ ELAN4_CTXT *ctxt = elan4_localctxt (dev, TPROC_Context (status)); -+ -+ BumpDevStat (dev, s_tproc_traps); -+ -+ if (ctxt == NULL) -+ ELAN4_DEBUG_TRIGGER (&dev->dev_ctxt, "elan4:handle_tproc_trap: context %d is invalid\n", TPROC_Context (status)); -+ else -+ ctxt->ctxt_ops->op_tproc_trap (ctxt, status); -+ -+ return (SCH_RestartTProc); -+} -+ -+static __inline__ void -+handle_haltints (ELAN4_DEV *dev, E4_uint32 intreg) -+{ -+ struct list_head list = LIST_HEAD_INIT(list); -+ E4_uint32 mask = 0; -+ E4_uint32 active = 0; -+ struct list_head *entry; -+ struct list_head *next; -+ unsigned long flags; -+ -+ BumpDevStat (dev, s_haltints); -+ -+ spin_lock_irqsave (&dev->dev_haltop_lock, flags); -+ -+ list_for_each_safe (entry, next, &dev->dev_haltop_list) { -+ ELAN4_HALTOP *op = list_entry (entry, ELAN4_HALTOP, op_link); -+ -+ PRINTF (DBG_DEVICE, DBG_INTR, "handle_haltints: op=%p op_mask=%x intreg=%x\n", op, op->op_mask, intreg); -+ -+ if ((op->op_mask & intreg) != op->op_mask) -+ mask |= op->op_mask; -+ else -+ { -+ list_del (&op->op_link); /* remove from list */ -+ list_add_tail (&op->op_link, &list); /* add to local list */ -+ -+ active |= op->op_mask; -+ } -+ } -+ -+ ASSERT (dev->dev_haltop_mask == (mask | active)); -+ -+ dev->dev_haltop_mask = mask; -+ -+ if (list_empty (&dev->dev_haltop_list)) { -+ del_timer(&dev->dev_haltop_timer); -+ } -+ -+ if (list_empty (&list)) -+ elan4_set_schedstatus (dev, intreg); -+ else -+ { -+ dev->dev_haltop_active = active; -+ spin_unlock_irqrestore (&dev->dev_haltop_lock, flags); -+ -+ while (! list_empty (&list)) -+ { -+ ELAN4_HALTOP *op = list_entry (list.next, ELAN4_HALTOP, op_link); -+ -+ list_del (&op->op_link); -+ -+ (*op->op_function) (dev, op->op_arg); -+ } -+ -+ spin_lock_irqsave (&dev->dev_haltop_lock, flags); -+ dev->dev_haltop_active = 0; -+ -+ elan4_set_schedstatus (dev, 0); -+ } -+ -+ spin_unlock_irqrestore (&dev->dev_haltop_lock, flags); -+} -+ -+static __inline__ E4_uint32 -+handle_iproc_trap (ELAN4_DEV *dev, unsigned unit) -+{ -+ sdramaddr_t hdroff = dev->dev_inputtraparea + offsetof (E4_IprocTrapState, TrHeader[0][unit]); -+ E4_uint64 status = elan4_sdram_readq (dev, hdroff + offsetof (E4_IprocTrapHeader, IProcStatusCntxAndTrType)); -+ E4_uint32 filter = elan4_read_filter (dev, IPROC_NetworkContext (status)); -+ ELAN4_CTXT *ctxt = elan4_localctxt (dev, filter & E4_FILTER_CONTEXT_MASK); -+ -+ /* -+ * The context is not valid in the following case : -+ * ack not been sent AND bad CRC/bad length. -+ * -+ * NOTE TransCRCStatus and BadLength only valid if NOT an EopTrap. -+ */ -+ ASSERT ((IPROC_GoodAckSent (status) & (1 << IPROC_InputterChan (status))) || IPROC_EOPTrap (status) || -+ (IPROC_TransCRCStatus (status) == CRC_STATUS_GOOD && !IPROC_BadLength (status))); -+ -+ BumpDevStat (dev, s_iproc_traps); -+ -+ if (ctxt == NULL) -+ { -+ ELAN4_DEBUG_TRIGGER (&dev->dev_ctxt, "elan4:handle_iproc_trap: network %d context %d (%x) is invalid\n", IPROC_NetworkContext (status), -+ filter & E4_FILTER_CONTEXT_MASK, filter); -+ -+ elan4_write_filter (dev, IPROC_NetworkContext (status), E4_FILTER_DISCARD_ALL); -+ } -+ else -+ ctxt->ctxt_ops->op_iproc_trap (ctxt, status, unit); -+ -+ return (SCH_RestartCh0LowPriInput << unit); -+} -+ -+void -+handle_pcimemerr (ELAN4_DEV *dev) -+{ -+ elan4_pcierror (dev); -+ -+ check_error_rate (dev); -+} -+ -+void -+handle_sdramint (ELAN4_DEV *dev) -+{ -+ E4_uint64 status = read_reg64 (dev, SDRamECCStatus); -+ E4_uint64 ConfigRegValue = read_reg64 (dev, SDRamConfigReg); -+ char errstr[200]; -+ int i; -+ int Found = 0; -+ -+ PRINTF0 (DBG_DEVICE, DBG_INTR, "handle_sdramint\n"); -+ -+ printk ("elan%d: ECC Error %s status=%llx\n", -+ dev->dev_instance, elan4_sdramerr2str (dev, status, ConfigRegValue, errstr), (long long)status); -+ -+ if (!ECC_UncorrectableErr(status) && !ECC_MultUncorrectErrs(status)) -+ printk ("elan%d: ECC error data=%016llx\n", dev->dev_instance, elan4_sdram_readq (dev, ECC_Addr(status))); -+ -+ if (ECC_CorrectableErr (status)) -+ BumpDevStat (dev, s_correctable_errors); -+ if (ECC_MultCorrectErrs (status)) -+ BumpDevStat (dev, s_multiple_errors); -+ -+ if (ECC_UncorrectableErr(status)) -+ panic ("elan%d: uncorrectable ECC error\n", dev->dev_instance); -+ if (ECC_MultUncorrectErrs(status)) -+ panic ("elan%d: muliple uncorrectable ECC error\n", dev->dev_instance); -+ -+ PULSE_SYSCONTROL (dev, CONT_CLEAR_SDRAM_ERROR); -+ -+ /* -+ * Now try to test for a read/write error type. -+ * This can only be done if it was a correctable error as an uncorrectable error might lockup the node. -+ * It should not be attempted if the data is in the dcache because fetching again would not generate an -+ * error even if the problem was a read, and flushing the cache line would fix a write probelm. -+ * Reading the same location again should cause a new error if the problem was caused by a bad write. -+ */ -+ if (elan4_eccerr_recheck && -+ (dev->dev_devinfo.dev_revision_id != PCI_REVISION_ID_ELAN4_REVA) && -+ ECC_CorrectableErr(status) && !ECC_UncorrectableErr(status)) -+ { -+ E4_uint64 status2; -+ E4_uint64 Addr = ECC_Addr(status) & ~(E4_CACHELINE_SIZE-1); -+ E4_uint32 SetIndex = (Addr >> 6) & ~(E4_NumCacheLines-1); -+ int InCache = 0; -+ -+ /* check the cache tags to see if the data has been read into a cache line. */ -+ for (i=0; idev_regs + offsetof(E4_Registers, Tags.Tags[i][SetIndex].Value)) & 0x7fffe000) == (Addr & 0x7fffe000)) -+ { -+ InCache = 1; -+ break; -+ } -+ -+ if (InCache == 0) -+ { -+ printk ("elan%d: checking if ECC error was read or write\n", dev->dev_instance); -+ -+ /* Now read and throw away the answer. A read of a word will schedule a block read of sdram */ -+ elan4_sdram_readq (dev, Addr); -+ status2 = read_reg64 (dev, SDRamECCStatus); -+ if ((Addr == (ECC_Addr(status2) & ~(E4_CACHELINE_SIZE-1))) && ECC_CorrectableErr(status2)) // Write error. -+ { -+ status = (status & ~0x0030000000000000ULL) | 0x0010000000000000ULL; -+ PULSE_SYSCONTROL (dev, CONT_CLEAR_SDRAM_ERROR); -+ } -+ else -+ status = (status & ~0x0030000000000000ULL) | 0x0020000000000000ULL; -+ } -+ else -+ status = status | 0x0030000000000000ULL; -+ } -+ else -+ status &= ~0x0030000000000000ULL; -+ -+ /* search for this error already being logged */ -+ for (i = sizeof (dev->dev_sdramerrs)/sizeof (dev->dev_sdramerrs[0]) - 1; i >= 0; i--) -+ if ((dev->dev_sdramerrs[i].EccStatus == status) && (dev->dev_sdramerrs[i].ConfigReg == ConfigRegValue)) -+ { -+ Found = 1; -+ dev->dev_sdramerrs[i].ErrorCount += 1; // Keep a count. -+ break; -+ } -+ -+ /* stash the status for /proc */ -+ if (!Found) -+ { -+ for (i = sizeof (dev->dev_sdramerrs)/sizeof (dev->dev_sdramerrs[0]) - 1; i > 0; i--) -+ dev->dev_sdramerrs[i] = dev->dev_sdramerrs[i-1]; -+ dev->dev_sdramerrs[0].EccStatus = status; -+ dev->dev_sdramerrs[0].ConfigReg = ConfigRegValue; -+ dev->dev_sdramerrs[0].ErrorCount = 1; // First error -+ } -+ -+ check_error_rate (dev); -+} -+ -+static void -+clear_linkerr_led (void *arg) -+{ -+ ELAN4_DEV *dev = (ELAN4_DEV *) arg; -+ -+ write_i2c (dev, I2cStatus, read_i2c (dev, I2cStatus) | I2cCntl_ClearLinkError); -+} -+ -+void -+handle_linkerror (ELAN4_DEV *dev) -+{ -+ E4_uint32 LinkState; -+ E4_uint32 CurrState = read_reg32 (dev, LinkControlReg); -+ -+ /* Set for reading errors. */ -+ write_reg32 (dev, LinkControlReg, -+ (CurrState = CurrState & ~((LCONT_TEST_CONTROL_MASK << LCONT_TEST_CONTROL_SHIFT) | -+ (LCONT_TEST_VALUE_MASK << LCONT_TEST_VALUE_SHIFT)))); -+ LinkState = LCONT_LINK_STATE(CurrState = read_reg32 (dev, LinkControlReg)); -+ -+#ifdef DEBUG -+ { -+ E4_uint8 ErrorMsg[256], DataErrorVal[64]; -+ -+ strcpy (ErrorMsg, "handle_linkerror:"); -+ if (LinkState & LS_LockError) strcat (ErrorMsg, " LockError"); -+ if (LinkState & LS_DeskewError) strcat (ErrorMsg, " DeskewError"); -+ if (LinkState & LS_PhaseError) strcat (ErrorMsg, " PhaseError"); -+ if (LinkState & LS_DataError) -+ { -+ E4_uint32 error[4]; -+ E4_uint32 i; -+ strcat (ErrorMsg, " DataError"); -+ /* Errors */ -+ for(i = LRS_ErrorVal8to0; i <= LRS_ErrorVal35to27; i++) -+ { -+ write_reg32 (dev, LinkControlReg, -+ CurrState | LCONT_TEST_VALUE(i) | (LCONT_READ_STATE << LCONT_TEST_CONTROL_SHIFT)); -+ error[i - LRS_ErrorVal8to0] = LCONT_LINK_STATE(read_reg32 (dev, LinkControlReg)); -+ } -+ sprintf (DataErrorVal, " Link State Error Val: %09llx %03x %03x %03x %03x", -+ (unsigned long long) ((error[0] & 0x1ffUL) | ((error[1] & 0x1ffUL) << 9) | -+ ((error[2] & 0x1ffUL) << 18) | ((error[3] & 0x1ffUL) << 27)), -+ error[3], error[2], error[1], error[0]); -+ strcat (ErrorMsg, DataErrorVal); -+ } -+ if (LinkState & LS_FifoOvFlow0) strcat (ErrorMsg, " FifoOvFlow0"); -+ if (LinkState & LS_FifoOvFlow1) strcat (ErrorMsg, " FifoOvFlow1"); -+ if (LinkState & LS_Mod45Changed) strcat (ErrorMsg, " Mod45Changed"); -+ if (LinkState & LS_PAckNotSeenError) strcat (ErrorMsg, " PAckNotSeenError"); -+ strcat (ErrorMsg, "\n"); -+ PRINTF0 (DBG_DEVICE, DBG_INTR, ErrorMsg); -+ } -+#endif -+ -+ BumpDevStat (dev, s_link_errors); -+ -+ if (LinkState & LS_LockError) BumpDevStat (dev, s_lock_errors); -+ if (LinkState & LS_DeskewError) BumpDevStat (dev, s_deskew_errors); -+ if (LinkState & LS_PhaseError) BumpDevStat (dev, s_phase_errors); -+ if (LinkState & LS_DataError) BumpDevStat (dev, s_data_errors); -+ if (LinkState & LS_FifoOvFlow0) BumpDevStat (dev, s_fifo_overflow0); -+ if (LinkState & LS_FifoOvFlow1) BumpDevStat (dev, s_fifo_overflow1); -+ if (LinkState & LS_Mod45Changed) BumpDevStat (dev, s_mod45changed); -+ if (LinkState & LS_PAckNotSeenError) BumpDevStat (dev, s_pack_not_seen); -+ -+ PULSE_SCHED_RESTART (dev, SCH_ClearLinkErrorInt); -+ -+ /* schedule a timer to clear the link error LED, so that it stays on -+ * for a second for every link error that occurs */ -+ if (dev->dev_devinfo.dev_revision_id != PCI_REVISION_ID_ELAN4_REVA && !timer_fn_queued (&dev->dev_linkerr_timeoutid)) -+ schedule_timer_fn (&dev->dev_linkerr_timeoutid, clear_linkerr_led, (void *) dev, HZ); -+ -+ /* -+ * Signal the link error to the switch by -+ * enabling the INT_LinkPortKeyFail bit. -+ * Always clear the error bit as the switch -+ * might have produced a spurious "ack" ... -+ */ -+ PULSE_SYSCONTROL (dev, CONT_CLEAR_LINKPORT_INT); -+ -+ if (dev->dev_linkerr_signalled == 0) -+ dev->dev_linkerr_signalled = 1; -+ else -+ dev->dev_linkerr_signalled = 2; -+ -+ ENABLE_INT_MASK (dev, INT_LinkPortKeyFail); -+ -+ check_error_rate (dev); -+} -+ -+void -+handle_linkportkeyfail (ELAN4_DEV *dev) -+{ -+ PRINTF0 (DBG_DEVICE, DBG_INTR, "handle_linkportkeyfail\n"); -+ -+ PULSE_SYSCONTROL (dev, CONT_CLEAR_LINKPORT_INT); -+ -+ if (! dev->dev_linkerr_signalled) -+ { -+ /* Hmmm - they're not playing ball */ -+ BumpDevStat (dev, s_linkport_keyfail); -+ -+ DISABLE_INT_MASK (dev, INT_LinkPortKeyFail); -+ } -+ else -+ { -+ /* If more link errors have occured since we -+ * signalled the error, then leave it signalled. */ -+ if (--dev->dev_linkerr_signalled == 0) -+ DISABLE_INT_MASK (dev, INT_LinkPortKeyFail); -+ } -+} -+ -+ -+static __inline__ void -+__elan4_4msi0 (ELAN4_DEV *dev, E4_uint32 intreg, E4_uint32 intmask) -+{ -+ unsigned long flags; -+ -+ if (intreg & intmask & INT_MainInterrupt) -+ { -+ DISABLE_INT_MASK (dev, INT_MainInterrupt); -+ -+ if (handle_mainints (dev, -1, elan4_mainint_punt_loops) == 0) -+ ENABLE_INT_MASK (dev, INT_MainInterrupt); -+ else -+ { -+ BumpDevStat (dev, s_mainint_punts); -+ -+ spin_lock_irqsave (&dev->dev_mainint_lock, flags); -+ kcondvar_wakeupone (&dev->dev_mainint_wait, &dev->dev_mainint_lock); -+ spin_unlock_irqrestore (&dev->dev_mainint_lock, flags); -+ } -+ } -+} -+ -+static __inline__ void -+__elan4_4msi1 (ELAN4_DEV *dev, E4_uint32 intreg, E4_uint32 intmask) -+{ -+ E4_uint32 restart = 0; -+ -+ PRINTF1 (DBG_DEVICE, DBG_INTR, "__elan4_4msi1: %x\n", intreg); -+ -+ spin_lock (&dev->dev_trap_lock); -+ -+ if (intreg & intmask & INT_CProc) -+ restart |= handle_cproc_trap (dev); -+ if (intreg & intmask & INT_EProc) -+ restart |= handle_eproc_trap (dev); -+ if (intreg & intmask & INT_Dma0Proc) -+ restart |= handle_dproc_trap (dev, 0); -+ if (intreg & intmask & INT_Dma1Proc) -+ restart |= handle_dproc_trap (dev, 1); -+ if (intreg & intmask & INT_TProc) -+ restart |= handle_tproc_trap (dev); -+ -+ PULSE_SCHED_RESTART (dev, restart); -+ -+ spin_unlock (&dev->dev_trap_lock); -+ -+ if (intreg & (INT_Halted|INT_Discarding)) -+ handle_haltints (dev, intreg); -+} -+ -+static __inline__ void -+__elan4_4msi2 (ELAN4_DEV *dev, E4_uint32 intreg, E4_uint32 intmask) -+{ -+ E4_uint32 restart = 0; -+ -+ PRINTF1 (DBG_DEVICE, DBG_INTR, "__elan4_4msi2: %x\n", intreg); -+ -+ spin_lock (&dev->dev_trap_lock); -+ if (intreg & intmask & INT_IProcCh0LowPri) -+ restart |= handle_iproc_trap (dev, 0); -+ -+ if (intreg & intmask & INT_IProcCh1LowPri) -+ restart |= handle_iproc_trap (dev, 1); -+ -+ if (intreg & intmask & INT_IProcCh0HighPri) -+ restart |= handle_iproc_trap (dev, 2); -+ -+ if (intreg & intmask & INT_IProcCh1HighPri) -+ restart |= handle_iproc_trap (dev, 3); -+ -+ PULSE_SCHED_RESTART (dev, restart); -+ -+ spin_unlock (&dev->dev_trap_lock); -+} -+ -+static __inline__ void -+__elan4_4msi3 (ELAN4_DEV *dev, E4_uint32 intreg, E4_uint32 intmask) -+{ -+ PRINTF1 (DBG_DEVICE, DBG_INTR, "__elan4_4msi3: %x\n", intreg); -+ -+ if (intreg & intmask & INT_PciMemErr) -+ handle_pcimemerr (dev); -+ -+ if (intreg & intmask & INT_SDRamInt) -+ handle_sdramint (dev); -+ -+ if (intreg & intmask & INT_LinkError) -+ handle_linkerror (dev); -+ -+ if (intreg & intmask & INT_LinkPortKeyFail) -+ handle_linkportkeyfail (dev); -+} -+ -+int -+elan4_1msi0 (ELAN4_DEV *dev) -+{ -+ E4_uint32 intmask = dev->dev_intmask; -+ E4_uint32 intreg; -+ -+ if (intmask == 0 || ((intreg = read_reg32 (dev, InterruptReg)) & intmask) == 0) -+ return (0); -+ -+ BumpDevStat (dev, s_interrupts); -+ -+ do { -+ PRINTF1 (DBG_DEVICE, DBG_INTR, "elan4_1msi0: %x\n", intreg); -+ -+ if (intreg & intmask & INT_MSI0) -+ __elan4_4msi0(dev, intreg, intmask); -+ if (intreg & intmask & INT_MSI1) -+ __elan4_4msi1(dev, intreg, intmask); -+ if (intreg & intmask & INT_MSI2) -+ __elan4_4msi2(dev, intreg, intmask); -+ if (intreg & intmask & INT_MSI3) -+ __elan4_4msi3(dev, intreg, intmask); -+ -+ if (intreg & INT_LinkPortKeyFail) -+ handle_linkportkeyfail (dev); -+ -+ /* must ensure that the read of the interrupt mask -+ * completes before the read of the interrupt register -+ * since the main interrupt thread clears it's interrupt -+ * and then re-enables it in the interrupt mask. */ -+ intmask = dev->dev_intmask; -+ mb(); -+ intreg = read_reg32 (dev, InterruptReg); -+ -+ } while ((intreg & intmask) != 0); -+ -+ return (1); -+} -+ -+/* local context management */ -+int -+elan4_insertctxt (ELAN4_DEV *dev, ELAN4_CTXT *ctxt, ELAN4_TRAP_OPS *ops) -+{ -+ unsigned long flags; -+ int tbl; -+ int i; -+ -+ ctxt->ctxt_dev = dev; -+ ctxt->ctxt_ops = ops; -+ ctxt->ctxt_features = dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES]; -+ -+ INIT_LIST_HEAD (&ctxt->ctxt_cqalist); -+ spin_lock_init (&ctxt->ctxt_mmulock); -+ -+ for (tbl = 0; tbl < NUM_HASH_TABLES; tbl++) -+ { -+ ctxt->shuffle_needed[tbl] = 0; -+ for(i=0; ishuffle[tbl][i] = -1; -+ -+ KMEM_ZALLOC (ctxt->ctxt_mmuhash[tbl], ELAN4_HASH_ENTRY **, dev->dev_hashsize[tbl] * sizeof (ELAN4_HASH_ENTRY *), 1); -+ -+ if (ctxt->ctxt_mmuhash[tbl] == NULL) -+ { -+ if (tbl != 0) -+ KMEM_FREE (ctxt->ctxt_mmuhash[0], dev->dev_hashsize[0] * sizeof (ELAN4_HASH_ENTRY *)); -+ spin_lock_destroy (&ctxt->ctxt_mmulock); -+ return (-ENOMEM); -+ } -+ } -+ -+ spin_lock_irqsave (&dev->dev_ctxt_lock, flags); -+ -+ if ((ctxt->ctxt_num = bt_freebit (dev->dev_ctxmap, (1 << dev->dev_ctxtableshift))) >= 0) -+ { -+ /* chain onto the lists of all contexts */ -+ list_add (&ctxt->ctxt_link, &dev->dev_ctxt_list); -+ -+ BT_SET (dev->dev_ctxmap, ctxt->ctxt_num); -+ } -+ -+ spin_unlock_irqrestore (&dev->dev_ctxt_lock, flags); -+ -+ if (ctxt->ctxt_num >= 0) -+ proc_insertctxt(dev, ctxt); -+ -+ return (ctxt->ctxt_num < 0 ? -ENOMEM : 0); -+} -+ -+void -+elan4_removectxt (ELAN4_DEV *dev, ELAN4_CTXT *ctxt) -+{ -+ unsigned long flags; -+ int tbl; -+ -+ proc_removectxt(dev, ctxt); -+ -+ /* remove from list of contexts */ -+ spin_lock_irqsave (&dev->dev_ctxt_lock, flags); -+ -+ list_del (&ctxt->ctxt_link); -+ -+ BT_CLEAR (dev->dev_ctxmap, ctxt->ctxt_num); -+ -+ spin_unlock_irqrestore (&dev->dev_ctxt_lock, flags); -+ -+ spin_lock_destroy (&ctxt->ctxt_info_lock); -+ -+ for (tbl = 0; tbl < NUM_HASH_TABLES; tbl++) -+ KMEM_FREE (ctxt->ctxt_mmuhash[tbl], dev->dev_hashsize[tbl] * sizeof (ELAN4_HASH_ENTRY *)); -+ -+ spin_lock_destroy (&ctxt->ctxt_mmulock); -+} -+ -+ELAN4_CTXT * -+elan4_localctxt (ELAN4_DEV *dev, unsigned num) -+{ -+ struct list_head *entry; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&dev->dev_ctxt_lock, flags); -+ -+ list_for_each (entry, &dev->dev_ctxt_list) { -+ ELAN4_CTXT *ctxt = list_entry (entry, ELAN4_CTXT, ctxt_link); -+ -+ if (ctxt->ctxt_num == num) -+ { -+ spin_unlock_irqrestore (&dev->dev_ctxt_lock, flags); -+ return (ctxt); -+ } -+ } -+ spin_unlock_irqrestore (&dev->dev_ctxt_lock, flags); -+ -+ return ((ELAN4_CTXT *) NULL); -+} -+ -+ELAN4_CTXT * -+elan4_networkctxt (ELAN4_DEV *dev, unsigned num) -+{ -+ E4_uint32 filter = elan4_read_filter (dev, num); -+ -+ if ((filter & E4_FILTER_CONTEXT_MASK) == INVALID_CONTEXT) -+ return NULL; -+ else -+ return elan4_localctxt (dev, filter & E4_FILTER_CONTEXT_MASK); -+} -+ -+/* network context management */ -+int -+elan4_attach_filter (ELAN4_CTXT *ctxt, unsigned int ctxnum) -+{ -+ ELAN4_DEV *dev = ctxt->ctxt_dev; -+ int res = 0; -+ E4_uint32 filter; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&dev->dev_ctxt_lock, flags); -+ -+ filter = elan4_read_filter (dev, ctxnum); -+ if ((filter & E4_FILTER_CONTEXT_MASK) != INVALID_CONTEXT) -+ { -+ PRINTF2 (ctxt, DBG_NETWORK_CTX, "elan4_attach_filter: ctx=%d filter=%x -> EBUSY\n", ctxnum, filter); -+ res = -EBUSY; -+ } -+ else -+ { -+ PRINTF1 (ctxt, DBG_NETWORK_CTX, "elan4_attach_filter: ctx=%d - SUCCESS\n", ctxnum); -+ -+ elan4_write_filter (dev, ctxnum, ctxt->ctxt_num | E4_FILTER_DISCARD_ALL); -+ PULSE_SCHED_RESTART (dev, SCH_ContextFilterFlush); -+ } -+ spin_unlock_irqrestore (&dev->dev_ctxt_lock, flags); -+ -+ return (res); -+} -+ -+void -+elan4_detach_filter (ELAN4_CTXT *ctxt, unsigned int ctxnum) -+{ -+ ELAN4_DEV *dev = ctxt->ctxt_dev; -+ -+ PRINTF1 (ctxt, DBG_NETWORK_CTX, "elan4_detach_filter: detach from network context %d\n", ctxnum); -+ -+ elan4_write_filter (dev, ctxnum, INVALID_CONTEXT | E4_FILTER_DISCARD_ALL); -+ PULSE_SCHED_RESTART (dev, SCH_ContextFilterFlush); -+} -+ -+void -+elan4_set_filter (ELAN4_CTXT *ctxt, unsigned int ctxnum, E4_uint32 state) -+{ -+ ELAN4_DEV *dev = ctxt->ctxt_dev; -+ -+ PRINTF6 (ctxt, DBG_NETWORK_CTX, "elan4_set_filter: set filter state %x for network context %d <%s%s%s%s>\n", state, ctxnum, -+ (state & E4_FILTER_DISCARD_ALL) ? "discard," : "", -+ (state & E4_FILTER_ACKOK_ALL) ? "ack-ok," : "", -+ (state & E4_FILTER_HIGH_PRI) ? "high-pri," : "", -+ (state & E4_FILTER_STATS) ? "stats," : ""); -+ -+ elan4_write_filter (dev, ctxnum, ctxt->ctxt_num | state); -+ PULSE_SCHED_RESTART (dev, SCH_ContextFilterFlush); -+} -+ -+void -+elan4_set_routetable (ELAN4_CTXT *ctxt, ELAN4_ROUTE_TABLE *tbl) -+{ -+ ELAN4_DEV *dev = ctxt->ctxt_dev; -+ E4_uint32 value = tbl ? (E4_VPT_VALID | E4_VPT_VALUE(tbl->tbl_entries, tbl->tbl_size)) : 0; -+ -+ /* and insert into the vp table */ -+ elan4_sdram_writel (dev, (dev->dev_ctxtable + (ctxt->ctxt_num * sizeof (E4_ContextControlBlock)) + -+ offsetof (E4_ContextControlBlock, VirtualProcessTable)), value); -+ pioflush_sdram(dev); -+ -+ PULSE_SYSCONTROL (dev, CONT_ROUTE_FLUSH); -+} -+ -+/* command queue management */ -+ELAN4_CQA * -+elan4_getcqa (ELAN4_CTXT *ctxt, unsigned int idx) -+{ -+ ELAN4_DEV *dev = ctxt->ctxt_dev; -+ struct list_head *el; -+ -+ spin_lock (&dev->dev_cqlock); -+ list_for_each (el, &ctxt->ctxt_cqalist) { -+ ELAN4_CQA *cqa = list_entry (el, ELAN4_CQA, cqa_link); -+ -+ if (cqa->cqa_idx == idx) -+ { -+ cqa->cqa_ref++; -+ -+ spin_unlock (&dev->dev_cqlock); -+ return cqa; -+ } -+ } -+ spin_unlock (&dev->dev_cqlock); -+ return NULL; -+} -+ -+void -+elan4_putcqa (ELAN4_CTXT *ctxt, unsigned int idx) -+{ -+ ELAN4_DEV *dev = ctxt->ctxt_dev; -+ struct list_head *el, *nel; -+ -+ spin_lock (&dev->dev_cqlock); -+ list_for_each_safe (el, nel, &ctxt->ctxt_cqalist) { -+ ELAN4_CQA *cqa = list_entry (el, ELAN4_CQA, cqa_link); -+ -+ if (cqa->cqa_idx == idx) -+ { -+ if (--cqa->cqa_ref || bt_lowbit (cqa->cqa_bitmap, ELAN4_CQ_PER_CQA) != -1) -+ spin_unlock (&dev->dev_cqlock); -+ else -+ { -+ list_del (&cqa->cqa_link); -+ -+ BT_CLEAR (ctxt->ctxt_cqamap, cqa->cqa_idx); -+ BT_CLEAR (dev->dev_cqamap, cqa->cqa_cqnum/ELAN4_CQ_PER_CQA); -+ spin_unlock (&dev->dev_cqlock); -+ -+ KMEM_FREE (cqa, sizeof (ELAN4_CQA)); -+ } -+ return; -+ } -+ } -+ spin_unlock (&dev->dev_cqlock); -+ -+ printk ("elan4_putcqa: idx %d not found\n", idx); -+ BUG(); -+} -+ -+static ELAN4_CQ * -+elan4_getcq (ELAN4_CTXT *ctxt, unsigned int type) -+{ -+ ELAN4_DEV *dev = ctxt->ctxt_dev; -+ ELAN4_CQA *cqa; -+ struct list_head *el; -+ int cidx, didx; -+ -+ spin_lock (&dev->dev_cqlock); -+ list_for_each (el, &ctxt->ctxt_cqalist) { -+ cqa = list_entry (el, ELAN4_CQA, cqa_link); -+ -+ if (cqa->cqa_type == type && (cidx = bt_freebit (cqa->cqa_bitmap, ELAN4_CQ_PER_CQA)) >=0) -+ { -+ BT_SET (cqa->cqa_bitmap, cidx); -+ -+ spin_unlock (&dev->dev_cqlock); -+ return &cqa->cqa_cq[cidx]; -+ } -+ } -+ spin_unlock (&dev->dev_cqlock); -+ -+ /* allocate a new cqa and it's chunk of command queue descriptors */ -+ KMEM_ZALLOC (cqa, ELAN4_CQA *, sizeof (ELAN4_CQA), 1); -+ if (cqa == NULL) -+ return NULL; -+ -+ spin_lock (&dev->dev_cqlock); -+ cidx = bt_freebit (ctxt->ctxt_cqamap, ELAN4_MAX_CQA); -+ -+ /* On architectures which have MTRR registers for write-combinig -+ * the top command queues from dev->dev_cqreorder upwards are -+ * used for reordered queues. Without MTRR registers any page -+ * sized group can use write combinig through the ptes. */ -+ if (dev->dev_cqreorder == 0) -+ didx = bt_freebit (dev->dev_cqamap, dev->dev_cqcount/ELAN4_CQ_PER_CQA); -+ else -+ { -+ if ((type & CQ_Reorder) != 0) -+ didx = bt_nextbit (dev->dev_cqamap, dev->dev_cqcount/ELAN4_CQ_PER_CQA, (dev->dev_cqreorder/ELAN4_CQ_PER_CQA) - 1, 0); -+ else -+ didx = bt_freebit (dev->dev_cqamap, dev->dev_cqreorder/ELAN4_CQ_PER_CQA); -+ } -+ -+ if (cidx < 0 || didx < 0) -+ { -+ spin_unlock (&dev->dev_cqlock); -+ KMEM_FREE (cqa, sizeof (ELAN4_CQA)); -+ return NULL; -+ } -+ -+ BT_SET (ctxt->ctxt_cqamap, cidx); -+ BT_SET (dev->dev_cqamap, didx); -+ -+ cqa->cqa_idx = cidx; -+ cqa->cqa_type = type; -+ cqa->cqa_cqnum = (didx * ELAN4_CQ_PER_CQA); -+ -+ list_add_tail (&cqa->cqa_link, &ctxt->ctxt_cqalist); -+ -+ /* initialise the cqa struct */ -+ for (cidx = 0; cidx < ELAN4_CQ_PER_CQA; cidx++) -+ { -+ cqa->cqa_cq[cidx].cq_idx = cidx; -+ cqa->cqa_cq[cidx].cq_cqa = cqa; -+ } -+ -+ /* no mappings yet */ -+ cqa->cqa_ref = 0; -+ -+ /* we're going to return entry zero */ -+ BT_SET (cqa->cqa_bitmap, 0); -+ spin_unlock (&dev->dev_cqlock); -+ -+ return &cqa->cqa_cq[0]; -+} -+ -+static void -+elan4_putcq (ELAN4_CTXT *ctxt, ELAN4_CQ *cq) -+{ -+ ELAN4_DEV *dev = ctxt->ctxt_dev; -+ ELAN4_CQA *cqa = cq->cq_cqa; -+ -+ spin_lock (&dev->dev_cqlock); -+ -+ BT_CLEAR (cqa->cqa_bitmap, cq->cq_idx); -+ -+ if (bt_lowbit (cqa->cqa_bitmap, ELAN4_CQ_PER_CQA) != -1 || cqa->cqa_ref) -+ spin_unlock (&dev->dev_cqlock); -+ else -+ { -+ list_del (&cqa->cqa_link); -+ -+ BT_CLEAR (ctxt->ctxt_cqamap, cqa->cqa_idx); -+ BT_CLEAR (dev->dev_cqamap, cqa->cqa_cqnum/ELAN4_CQ_PER_CQA); -+ spin_unlock (&dev->dev_cqlock); -+ -+ KMEM_FREE (cqa, sizeof (ELAN4_CQA)); -+ } -+} -+ -+ELAN4_CQ * -+elan4_alloccq (ELAN4_CTXT *ctxt, unsigned cqsize, unsigned perm, unsigned cqtype) -+{ -+ ELAN4_DEV *dev = ctxt->ctxt_dev; -+ ELAN4_CQ *cq; -+ int cqnum; -+ sdramaddr_t cqdesc; -+ unsigned offset; -+ E4_uint64 value; -+ -+ if ((cq = elan4_getcq (ctxt, cqtype)) == NULL) -+ return NULL; -+ -+ cqnum = elan4_cq2num(cq); -+ -+ cq->cq_space = elan4_sdram_alloc (dev, CQ_Size(cqsize)); -+ if (cq->cq_space == (virtaddr_t) 0) -+ { -+ elan4_putcq (ctxt, cq); -+ return (NULL); -+ } -+ -+ cq->cq_size = cqsize; -+ cq->cq_perm = perm; -+ -+ /* and finally initialise the command queue descriptor */ -+ cqdesc = dev->dev_cqaddr + (cqnum * sizeof (E4_CommandQueueDesc)); -+ -+ value = CQ_QueuePtrsValue (cqsize, cq->cq_space, cq->cq_space); -+ if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA) -+ value |= ((cqtype & CQ_Priority) ? CQ_RevA_Priority : 0); -+ else -+ value |= (((cqtype & CQ_Priority) ? CQ_RevB_Priority : 0) | -+ ((cqtype & CQ_Reorder) ? CQ_RevB_ReorderingQueue : CQ_RevB_32bitWriteQueue)); -+ -+ elan4_sdram_writeq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_QueuePtrs), value); -+ elan4_sdram_writeq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_HoldingValue), 0); -+ elan4_sdram_writeq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_AckBuffers), 0); -+ elan4_sdram_writeq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_Control), CQ_ControlValue (ctxt->ctxt_num, 2, perm)); -+ pioflush_sdram (dev); -+ -+ offset = (cqnum + dev->dev_cqoffset) * CQ_CommandMappingSize; -+ -+ cq->cq_mapping = elan4_map_device (dev, ELAN4_BAR_REGISTERS, (offset & ~(PAGE_SIZE-1)), -+ PAGE_SIZE, &cq->cq_handle) + (offset & (PAGE_SIZE-1)); -+#ifdef CONFIG_MPSAS -+ if (ctxt == &dev->dev_ctxt) -+ return (cq); -+#endif -+ -+ elan4_sdram_flushcache (dev, cq->cq_space, CQ_Size(cqsize)); -+ -+ return (cq); -+} -+ -+void -+elan4_freecq (ELAN4_CTXT *ctxt, ELAN4_CQ *cq) -+{ -+ ELAN4_DEV *dev = ctxt->ctxt_dev; -+ unsigned offset = (elan4_cq2num(cq) + dev->dev_cqoffset) * CQ_CommandMappingSize; -+ -+ elan4_flushcq (dev, cq); -+ -+ elan4_unmap_device (dev, cq->cq_mapping - (offset & (PAGE_SIZE-1)), PAGE_SIZE, &cq->cq_handle); -+ elan4_sdram_free (dev, cq->cq_space, CQ_Size (cq->cq_size)); -+ -+ elan4_putcq (ctxt, cq); -+} -+ -+void -+elan4_restartcq (ELAN4_DEV *dev, ELAN4_CQ *cq) -+{ -+ sdramaddr_t cqdesc = dev->dev_cqaddr + (elan4_cq2num(cq) * sizeof (E4_CommandQueueDesc)); -+ int hipri; -+ unsigned long flags; -+ -+ PRINTF1 (DBG_DEVICE, DBG_CPROC, "restartcq: restarting cq %p\n", cq); -+ -+ spin_lock_irqsave (&dev->dev_requeue_lock, flags); -+ -+ while (read_reg32 (dev, CommandControl.CommandRequeuePtr) & E4_CommandRequeueBusy) -+ ; -+ -+ if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA) -+ hipri = (elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_QueuePtrs)) & CQ_RevA_Priority) != 0; -+ else -+ hipri = (elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_QueuePtrs)) & CQ_RevB_Priority) != 0; -+ -+ if (hipri) -+ { -+ PRINTF1 (DBG_DEVICE, DBG_CPROC, "restartcq: restart cq %d as high pri\n", elan4_cq2num(cq)); -+ write_reg32 (dev, CommandControl.CommandRequeuePtr, cqdesc | E4_CommandRequeueHighPri); -+ } -+ else -+ { -+ PRINTF1 (DBG_DEVICE, DBG_CPROC, "restartcq: restart cq %d as low pri\n", elan4_cq2num(cq)); -+ write_reg32 (dev, CommandControl.CommandRequeuePtr, cqdesc); -+ } -+ pioflush_reg (dev); -+ -+ spin_unlock_irqrestore (&dev->dev_requeue_lock, flags); -+} -+ -+static void -+flushcq_intop (ELAN4_DEV *dev, void *arg) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave (&dev->dev_flush_lock, flags); -+ dev->dev_flush_finished |= (1 << (unsigned long) arg); -+ kcondvar_wakeupall (&dev->dev_flush_wait, &dev->dev_flush_lock); -+ spin_unlock_irqrestore (&dev->dev_flush_lock, flags); -+} -+void -+elan4_flushcq (ELAN4_DEV *dev, ELAN4_CQ *cq) -+{ -+ int flushqnum = elan4_cq2num(cq) & (COMMAND_INSERTER_CACHE_ENTRIES-1); -+ ELAN4_CQ *flushq = dev->dev_flush_cq[flushqnum]; -+ unsigned long flags; -+ -+ PRINTF (DBG_DEVICE, DBG_FLUSH, "elan4_flushcq: cqnum=%d\n", elan4_cq2num(cq)); -+ -+ spin_lock_irqsave (&dev->dev_flush_lock, flags); -+ -+ while (! (dev->dev_flush_finished & (1 << flushqnum))) -+ kcondvar_wait (&dev->dev_flush_wait, &dev->dev_flush_lock, &flags); -+ -+ dev->dev_flush_finished &= ~(1 << flushqnum); -+ -+ dev->dev_flush_op[flushqnum].op_function = flushcq_intop; -+ dev->dev_flush_op[flushqnum].op_arg = (void *) (unsigned long) flushqnum; -+ -+ elan4_queue_intop (dev, flushq, &dev->dev_flush_op[flushqnum]); -+ -+ while (! (dev->dev_flush_finished & (1 << flushqnum))) -+ kcondvar_wait (&dev->dev_flush_wait, &dev->dev_flush_lock, &flags); -+ -+ spin_unlock_irqrestore (&dev->dev_flush_lock, flags); -+} -+ -+void -+elan4_updatecq (ELAN4_DEV *dev, ELAN4_CQ *cq, unsigned perm, unsigned restart) -+{ -+ sdramaddr_t cqdesc = dev->dev_cqaddr + (elan4_cq2num(cq) * sizeof (E4_CommandQueueDesc)); -+ E4_uint32 control = elan4_sdram_readl (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_Control)); -+ -+ /* Write the command queues control word, but ensure that the ChannelNotCompleted fields -+ * are not modified. We use this to just alter the RestartCount/Permissions fields */ -+ -+ elan4_sdram_writel (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_Control), -+ CQ_ControlValue (CQ_Context (control), restart ? restart : CQ_RestartCount (control), perm)); -+} -+ -+/* instruction cache flush */ -+static __inline__ void -+elan4_flush_icache_locked (ELAN4_DEV *dev) -+{ -+ int i, j; -+ -+ PRINTF0 (DBG_DEVICE, DBG_FLUSH, "elan4_flush_icache_locked: flushing icache\n"); -+ -+ for (i = 0; i < (E4_ICacheLines/E4_ICachePortSize); i++) -+ { -+ write_reg64 (dev, ICachePort_Cntl_Addr, i << E4_ICacheTagAddrShift); -+ for (j = 0; j < E4_ICachePortSize; j++) -+ write_reg64 (dev, ICachePort[j], E4_InvalidTagValue); -+ } -+ -+ /* -+ * Initialise the top of the ICache Set0 with a instruction which will -+ * cause a know trap fingerprint so that the application can identify it -+ * and ignore the trap. -+ */ -+ write_reg64 (dev, ICachePort_Cntl_Addr, E4_ICacheFixupOffset | E4_AccessICacheRams); -+ -+ /* Errata 24: must ensure that the DCache is flushed after loading -+ * code for the thread processor. */ -+ if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA) -+ elan4_sdram_flushcache (dev, 0, E4_CacheSize); -+ -+ pioflush_reg (dev); -+} -+ -+static void -+device_iflush_haltop (ELAN4_DEV *dev, void *arg) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave (&dev->dev_flush_lock, flags); -+ -+ elan4_flush_icache_locked (dev); -+ -+ dev->dev_iflush_queued = 0; -+ -+ kcondvar_wakeupall (&dev->dev_flush_wait, &dev->dev_flush_lock); -+ spin_unlock_irqrestore (&dev->dev_flush_lock, flags); -+} -+ -+void -+elan4_flush_icache_halted (ELAN4_CTXT *ctxt) -+{ -+ ELAN4_DEV *dev = ctxt->ctxt_dev; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&dev->dev_flush_lock, flags); -+ -+ elan4_flush_icache_locked (dev); -+ -+ spin_unlock_irqrestore (&dev->dev_flush_lock, flags); -+} -+ -+void -+elan4_flush_icache (ELAN4_CTXT *ctxt) -+{ -+ ELAN4_DEV *dev = ctxt->ctxt_dev; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&dev->dev_flush_lock, flags); -+ -+ PRINTF1 (DBG_DEVICE, DBG_FLUSH, "elan4_flush_icache: queued=%d\n", dev->dev_iflush_queued); -+ -+ if (! dev->dev_iflush_queued) -+ { -+ dev->dev_iflush_queued = 1; -+ -+ elan4_queue_haltop (dev, &dev->dev_iflush_haltop); -+ } -+ -+ while (dev->dev_iflush_queued) -+ kcondvar_wait (&dev->dev_flush_wait, &dev->dev_flush_lock, &flags); -+ -+ spin_unlock_irqrestore (&dev->dev_flush_lock, flags); -+} -+ -+/* device context operations */ -+static void -+device_cproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status, unsigned cqnum) -+{ -+ ELAN4_DEV *dev = ctxt->ctxt_dev; -+ ELAN4_CPROC_TRAP *trap = &dev->dev_cproc_trap; -+ -+ elan4_extract_cproc_trap (dev, status, trap, cqnum); -+ -+ DBGCMD (DBG_DEVICE, DBG_FLUSH, elan4_display_cproc_trap (DBG_DEVICE, DBG_FLUSH, "device_cproc_trap", trap)); -+ -+ switch (CPROC_TrapType (trap->tr_status)) -+ { -+ case CommandProcInterruptQueueOverflow: -+ PRINTF (ctxt, DBG_FLUSH, "device_cproc_trap: cqnum=%d\n", cqnum); -+ -+ /* XXXX: we could either just hit restart (and hope) - or we could extract -+ * the event interrupt cookie out and "complete" the command before -+ * restarting it */ -+ elan4_restartcq (dev, dev->dev_flush_cq[cqnum]); -+ return; -+ -+ case CommandProcDmaQueueOverflow: -+ case CommandProcPermissionTrap: -+ handle_dma_flushops (dev, status, cqnum); -+ return; -+ -+ default: -+ printk ("device_cproc_trap: status=%llx control=%llx TrapType=%x cqnum=%d\n", (long long) trap->tr_status, -+ elan4_sdram_readq (dev, dev->dev_cqaddr + cqnum * sizeof (E4_CommandQueueDesc) + -+ offsetof (E4_CommandQueueDesc, CQ_Control)), -+ (int) CPROC_TrapType(trap->tr_status), cqnum); -+ ELAN4_DEBUG_TRIGGER (ctxt, "elan4:device_cproc_trap\n"); -+ } -+} -+ -+static void -+device_tproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status) -+{ -+ ELAN4_TPROC_TRAP trap; -+ -+ elan4_extract_tproc_trap (ctxt->ctxt_dev, status, &trap); -+ -+ elan4_display_tproc_trap (DBG_CONSOLE, DBG_TRAP, "device_tproc_trap", &trap); -+ ELAN4_DEBUG_TRIGGER (ctxt, "elan4:device_tproc_trap\n"); -+} -+ -+static void -+device_dproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status, unsigned unit) -+{ -+ ELAN4_DPROC_TRAP trap; -+ -+ elan4_extract_dproc_trap (ctxt->ctxt_dev, status, &trap, unit); -+ -+ elan4_display_dproc_trap (DBG_CONSOLE, DBG_TRAP, "device_dproc_trap", &trap); -+ -+ ELAN4_DEBUG_TRIGGER (ctxt, "elan4:device_dproc_trap\n"); -+} -+ -+static void -+device_interrupt (ELAN4_CTXT *ctxt, E4_uint64 cookie) -+{ -+ ELAN4_DEV *dev = (ELAN4_DEV *) ctxt; -+ struct list_head *el,*nel; -+ unsigned long flags; -+ -+ PRINTF (ctxt, DBG_FLUSH, "device_interrupt: cookie=%llx\n", cookie); -+ -+ spin_lock_irqsave (&dev->dev_intop_lock, flags); -+ list_for_each_safe (el, nel, &dev->dev_intop_list) { -+ ELAN4_INTOP *op = list_entry (el, ELAN4_INTOP, op_link); -+ -+ if (op->op_cookie == cookie) -+ { -+ if ((op->op_cookie & INTOP_TYPE_MASK) == INTOP_ONESHOT) -+ list_del (&op->op_link); -+ -+ spin_unlock_irqrestore (&dev->dev_intop_lock, flags); -+ -+ (*op->op_function)(dev, op->op_arg); -+ return; -+ } -+ } -+ spin_unlock_irqrestore (&dev->dev_intop_lock, flags); -+ -+ panic ("device_interrupt: interrupt cookie %llx not found\n", (long long)cookie); -+} -+ -+static void -+device_iproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status, unsigned unit) -+{ -+ ELAN4_DEV *dev = ctxt->ctxt_dev; -+ ELAN4_IPROC_TRAP *trap = &dev->dev_iproc_trap; -+ -+ elan4_extract_iproc_trap (dev, status, trap, unit); -+ elan4_inspect_iproc_trap (trap); -+ -+ DBGCMD (ctxt, DBG_IPROC, elan4_display_iproc_trap (ctxt, DBG_IPROC, "device_iproc_trap", trap)); -+ -+ if (elan4_neterr_iproc_trap (dev, trap)) -+ return; -+ -+ elan4_display_iproc_trap (DBG_CONSOLE, DBG_TRAP, "device_iproc_trap", trap); -+ panic ("device_iproc_trap: unexpected trap\n"); -+} -+ -+static void -+device_needs_shuffle (ELAN4_CTXT *ctxt, int tbl, int hashidx) -+{ -+ /* XXXX currently this doesnt need to do anything -+ as the chains have only 2 entries */ -+} -+ -+ELAN4_TRAP_OPS device_trap_ops = -+{ -+ NULL, -+ device_cproc_trap, -+ device_dproc_trap, -+ device_tproc_trap, -+ device_iproc_trap, -+ device_interrupt, -+ NULL, -+ device_needs_shuffle, -+}; -+ -+/* -+ * elan4_initialise_device -+ * initialise the ELAN4_DEV struct - spinlocks,cvs etc. -+ * map the registers, sdram etc -+ */ -+int -+elan4_initialise_device (ELAN4_DEV *dev) -+{ -+ int i, bit; -+ -+ if (elan4_mainint_resched_ticks == 0) -+ elan4_mainint_resched_ticks = (hz/4); -+ -+ /* map the registers */ -+ switch (dev->dev_devinfo.dev_revision_id) -+ { -+ case PCI_REVISION_ID_ELAN4_REVA: -+ dev->dev_regs = elan4_map_device (dev, ELAN4_BAR_REGISTERS, ELAN4_REVA_REG_OFFSET, ELAN4_REG_SIZE, &dev->dev_regs_handle); -+ -+ dev->dev_rom = elan4_map_device (dev, ELAN4_BAR_REGISTERS, ELAN4_REVA_EBUS_OFFSET + ELAN4_REVA_EBUS_ROM_OFFSET, -+ ELAN4_REVA_EBUS_ROM_SIZE, &dev->dev_rom_handle); -+ break; -+ -+ case PCI_REVISION_ID_ELAN4_REVB: -+ dev->dev_regs = elan4_map_device (dev, ELAN4_BAR_REGISTERS, ELAN4_REVB_REG_OFFSET, ELAN4_REG_SIZE, &dev->dev_regs_handle); -+ dev->dev_rom = (ioaddr_t) 0; -+ dev->dev_i2c = elan4_map_device (dev, ELAN4_BAR_REGISTERS, ELAN4_REVB_I2C_OFFSET, ELAN4_REVB_I2C_SIZE, &dev->dev_i2c_handle); -+ break; -+ -+ default: -+ return -EINVAL; -+ } -+ -+ /* XXXX: parse the ebus rom to determine the sdram configuration */ -+ { -+ extern long long sdram_cfg; -+ -+ if (sdram_cfg == 0) -+ dev->dev_sdram_cfg = SDRAM_STARTUP_VALUE; -+ else -+ dev->dev_sdram_cfg = sdram_cfg; -+ } -+ -+ for (bit = 0; ((1 << bit) & elan4_resource_len (dev, ELAN4_BAR_SDRAM)) == 0; bit++) -+ ; -+ -+ switch ((dev->dev_sdram_cfg >> SDRAM_RamSize_SH) & 3) -+ { -+ case 0: /* 64Mbit, 128Mbit, 256Mbit, 512Mbit or 1Gbit (16-bit output) */ -+ dev->dev_sdram_numbanks = 4; bit -= 2; -+ for (i = 0; i < dev->dev_sdram_numbanks; i++) -+ { -+ dev->dev_sdram_banks[i].b_base = (i << bit); -+ dev->dev_sdram_banks[i].b_size = (1 << bit); -+ } -+ break; -+ -+ case 1: /* 64Mbit, 128Mbit, 256Mbit or 512Mbit (8-bit output) */ -+ dev->dev_sdram_numbanks = 4; bit -= 2; -+ for (i = 0; i < dev->dev_sdram_numbanks; i++) -+ { -+ dev->dev_sdram_banks[i].b_base = ((i & 2) << (bit)) | ((i & 1) << (bit-1)); -+ dev->dev_sdram_banks[i].b_size = (1 << bit); -+ } -+ break; -+ -+ case 2: /* 2Gbit (16-bit output) or 1Gbit (8-bit output) */ -+ dev->dev_sdram_numbanks = 2; bit--; -+ for (i = 0; i < dev->dev_sdram_numbanks; i++) -+ { -+ dev->dev_sdram_banks[i].b_base = (i << bit); -+ dev->dev_sdram_banks[i].b_size = (1 << bit); -+ } -+ break; -+ -+ case 3: /* 4Gbit (16-bit output) or 2Gbit (8-bit output) */ -+ dev->dev_sdram_numbanks = 1; -+ dev->dev_sdram_banks[0].b_base = 0; -+ dev->dev_sdram_banks[0].b_size = (1 << bit); -+ break; -+ } -+ -+ elan4_sdram_init (dev); -+ -+ /* initialise locks for classes of interrupts */ -+ spin_lock_init (&dev->dev_trap_lock); -+ spin_lock_init (&dev->dev_intop_lock); -+ spin_lock_init (&dev->dev_haltop_lock); -+ spin_lock_init (&dev->dev_mainint_lock); -+ -+ init_timer (&dev->dev_haltop_timer); -+ dev->dev_haltop_timer.function = dev_haltop_timer_func; -+ dev->dev_haltop_timer.data = (unsigned long) dev; -+ -+ /* initialise other locks */ -+ spin_lock_init (&dev->dev_i2c_lock); -+ -+ spin_lock_init (&dev->dev_mmulock); -+ spin_lock_init (&dev->dev_cqlock); -+ spin_lock_init (&dev->dev_ctxlock); -+ -+ spin_lock_init (&dev->dev_intmask_lock); -+ spin_lock_init (&dev->dev_syscontrol_lock); -+ -+ spin_lock_init (&dev->dev_ctxt_lock); -+ spin_lock_init (&dev->dev_flush_lock); -+ spin_lock_init (&dev->dev_requeue_lock); -+ -+ kmutex_init (&dev->dev_lock); -+ -+ kcondvar_init (&dev->dev_mainint_wait); -+ kcondvar_init (&dev->dev_flush_wait); -+ -+ /* initialsie lists */ -+ INIT_LIST_HEAD (&dev->dev_ctxt_list); -+ INIT_LIST_HEAD (&dev->dev_intop_list); -+ INIT_LIST_HEAD (&dev->dev_interruptq_list); -+ INIT_LIST_HEAD (&dev->dev_hc_list); -+ INIT_LIST_HEAD (&dev->dev_haltop_list); -+ INIT_LIST_HEAD (&dev->dev_dma_flushop[0].list); -+ INIT_LIST_HEAD (&dev->dev_dma_flushop[1].list); -+ -+ dev->dev_state = ELAN4_STATE_STOPPED; -+ -+ return (0); -+} -+ -+void -+elan4_finalise_device (ELAN4_DEV *dev) -+{ -+ kcondvar_destroy (&dev->dev_flush_wait); -+ kcondvar_destroy (&dev->dev_mainint_wait); -+ -+ kmutex_destroy (&dev->dev_lock); -+ -+ spin_lock_destroy (&dev->dev_requeue_lock); -+ spin_lock_destroy (&dev->dev_flush_lock); -+ spin_lock_destroy (&dev->dev_ctxt_lock); -+ -+ spin_lock_destroy (&dev->dev_syscontrol_lock); -+ spin_lock_destroy (&dev->dev_intmask_lock); -+ -+ spin_lock_destroy (&dev->dev_ctxlock); -+ spin_lock_destroy (&dev->dev_cqlock); -+ spin_lock_destroy (&dev->dev_mmulock); -+ -+ spin_lock_destroy (&dev->dev_i2c_lock); -+ -+ spin_lock_destroy (&dev->dev_mainint_lock); -+ spin_lock_destroy (&dev->dev_haltop_lock); -+ spin_lock_destroy (&dev->dev_intop_lock); -+ spin_lock_destroy (&dev->dev_trap_lock); -+ -+ del_timer_sync (&dev->dev_haltop_timer); -+ -+ while (! list_empty (&dev->dev_hc_list)) -+ { -+ ELAN4_HASH_CHUNK *hc = list_entry (dev->dev_hc_list.next, ELAN4_HASH_CHUNK, hc_link); -+ -+ list_del (&hc->hc_link); -+ -+ KMEM_FREE(hc, sizeof (ELAN4_HASH_CHUNK)); -+ } -+ -+ elan4_sdram_fini (dev); -+ -+ switch (dev->dev_devinfo.dev_revision_id) -+ { -+ case PCI_REVISION_ID_ELAN4_REVA: -+ elan4_unmap_device (dev, dev->dev_rom, ELAN4_REVA_EBUS_ROM_SIZE, &dev->dev_rom_handle); -+ elan4_unmap_device (dev, dev->dev_regs, ELAN4_REG_SIZE, &dev->dev_regs_handle); -+ break; -+ case PCI_REVISION_ID_ELAN4_REVB: -+ elan4_unmap_device (dev, dev->dev_i2c, ELAN4_REVB_I2C_SIZE, &dev->dev_i2c_handle); -+ elan4_unmap_device (dev, dev->dev_regs, ELAN4_REG_SIZE, &dev->dev_regs_handle); -+ break; -+ } -+} -+ -+static int -+measure_sysclk (ELAN4_DEV *dev) -+{ -+ E4_uint64 val0, val1; -+ E4_uint32 ticks, ns; -+ -+ write_ureg64 (dev, StatCont, STP_SYS_CLOCK_RATE0); -+ -+ val0 = read_ureg64 (dev, StatCounts[0]); -+ udelay (1000); -+ val1 = read_ureg64 (dev, StatCounts[0]); -+ -+ -+ ticks = ((val1 >> 32) - (val0 >> 32)); -+ ns = ((val1 & 0xffffffff) - (val0 & 0xffffffff)); -+ -+ return (ticks / (ns / 1000)); -+} -+ -+static void -+initialise_cache (ELAN4_DEV *dev) -+{ -+ register int set, line; -+ -+ /* Initialise the cache to "map" the bottom of sdram - we will use -+ * this space for cache flushing, so require the cache to be set -+ * up so that cachelines for this are in the correct set. -+ * -+ * XXXX: for MPSAS we set bit 28, to ensure that any access to -+ * sdram causes the line to be filled first to expunge any -+ * Xs. */ -+ for (set = 0; set < E4_NumCacheSets; set++) -+ for (line = 0; line < E4_NumCacheLines; line++) -+ write_tag (dev, Tags[set][line], (((E4_uint64) set) << 29) | (1 << 28) | (line << 16)); -+} -+ -+#ifndef CONFIG_MPSAS -+static void -+initialise_cache_tags (ELAN4_DEV *dev, unsigned addr) -+{ -+ register int set, line; -+ -+ /* Initialise the whole cache to hold sdram at "addr" as direct mapped */ -+ -+ for (set = 0; set < E4_NumCacheSets; set++) -+ for (line = 0; line < E4_NumCacheLines; line++) -+ write_tag (dev, Tags[set][line], addr | (set << 13) | (1 << 11)); -+} -+ -+static void -+initialise_ecc (ELAN4_DEV *dev, ELAN4_SDRAM_BANK *bank) -+{ -+ register int i, addr; -+ -+ if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA) -+ { -+ initialise_cache_tags (dev, E4_CacheSize); -+ for (addr = 0; addr < bank->b_size; addr += E4_CacheSize) -+ { -+ for (i = 0; i < E4_CacheSize; i += sizeof (E4_uint64)) -+ writeq (0xbeec000000000000ull | addr | i, (void *)(bank->b_ioaddr + addr + i)); -+ initialise_cache_tags (dev, addr); -+ } -+ } -+ else -+ { -+ /* Write the whole of this bank of sdram. */ -+ for (addr = 0; addr < bank->b_size; addr += sizeof (E4_uint64)) -+ writeq (0xbeec000000000000ull | addr, (void *)(bank->b_ioaddr + addr)); -+ -+ /* Now flush out the top out of the cache */ -+ for (addr = 0; addr < E4_CacheSize; addr += sizeof (E4_uint64)) -+ writeq (0xbeec000000000000ull | addr, (void *)(bank->b_ioaddr + addr)); -+ -+ /* Now read the top value of sdram to guarantee the write has occured before the ecc is enabled */ -+ __elan4_readq (dev, bank->b_ioaddr + bank->b_size - sizeof (E4_uint64)); -+ } -+} -+#endif -+ -+#ifdef CONFIG_MPSAS -+static void -+do_initdma (ELAN4_DEV *dev) -+{ -+#define VIRTUAL_ADDRESS 0x10000000ull -+ ELAN4_CQ *cq = dev->dev_flush_cq[0]; -+ E4_uint64 value; -+ E4_uint32 intreg; -+ E4_uint64 status; -+ -+ PRINTF (DBG_DEVICE, DBG_CONFIG, "elan: performing initialising dma\n"); -+ -+ DISABLE_INT_MASK (dev, INT_Dma0Proc | INT_Dma1Proc); -+ -+ /* initialise the context filter */ -+ elan4_attach_filter (&dev->dev_ctxt, 0); -+ -+ /* now issue a DMA - we expect this to trap */ -+ writeq (E4_DMA_TYPE_SIZE (128*4, DMA_DataTypeByte, 0, 0) | RUN_DMA_CMD, cq->cq_mapping + (0 << 3)); -+ writeq (0, cq->cq_mapping + (1 << 3)); -+ writeq (0, cq->cq_mapping + (2 << 3)); -+ writeq (dev->dev_tproc_space, cq->cq_mapping + (3 << 3)); -+ writeq (dev->dev_tproc_space, cq->cq_mapping + (4 << 3)); -+ writeq (0, cq->cq_mapping + (5 << 3)); -+ writeq (0, cq->cq_mapping + (6 << 3)); -+ -+ /* spin waiting for it to trap - then restart the dma processor */ -+ do { -+ value = read_reg64 (dev, IntAndMaskReg); -+ intreg = (value >> E4_INTERRUPT_REG_SHIFT); -+ } while ((intreg & (INT_Dma0Proc | INT_Dma1Proc)) == 0); -+ -+ /* check it trapped for the right reason */ -+ status = (intreg & INT_Dma0Proc) ? read_reg64 (dev, DProc0Status) : read_reg64 (dev, DProc1Status); -+ -+ if (DPROC_PrefetcherFault (status) || (DPROC_TrapType(status) != DmaProcFailCountError && DPROC_TrapType(status) != DmaProcPacketAckError)) -+ { -+ printk ("elan: bad dma trap, status = %lx\n", (long)status); -+ panic ("elan: bad dma trap\n"); -+ } -+ -+ PULSE_SCHED_RESTART (dev, SCH_RestartDma0Proc | SCH_RestartDma1Proc | SCH_RestartDmaPrefetchProc); -+ -+ elan4_detach _filter (&dev->dev_ctxt, 0); -+ -+ ENABLE_INT_MASK (dev, INT_Dma0Proc | INT_Dma1Proc); -+#undef VIRTUAL_ADDRESS -+} -+#endif -+ -+static int -+ebus_read_vpd (ELAN4_DEV *dev, unsigned char *data, unsigned int nob) -+{ -+ unsigned int pci_data_ptr; -+ unsigned int vpd_ptr; -+ register int i; -+ -+ if (read_ebus_rom (dev, 0) != 0x55 || read_ebus_rom (dev, 1) != 0xaa) -+ { -+ printk ("elan%d: invalid rom signature in ebus rom\n", dev->dev_instance); -+ return -EINVAL; -+ } -+ -+ pci_data_ptr = (read_ebus_rom (dev, 0x19) << 8) | read_ebus_rom (dev, 0x18); -+ -+ /* check the pci data structure */ -+ if (read_ebus_rom (dev, pci_data_ptr + 0) != 'P' || -+ read_ebus_rom (dev, pci_data_ptr + 1) != 'C' || -+ read_ebus_rom (dev, pci_data_ptr + 2) != 'I' || -+ read_ebus_rom (dev, pci_data_ptr + 3) != 'R') -+ { -+ printk ("elan%d: invalid pci data structure in ebus rom\n", dev->dev_instance); -+ return -EINVAL; -+ } -+ -+ /* extract the VPD pointer */ -+ vpd_ptr = (read_ebus_rom (dev, pci_data_ptr + 9) << 8) | read_ebus_rom (dev, pci_data_ptr + 8); -+ -+ if (vpd_ptr == 0) -+ { -+ printk ("elan%d: no vital product data in ebus rom\n", dev->dev_instance); -+ return -EINVAL; -+ } -+ -+ /* read the vpd data */ -+ for (i = 0; i < nob; i++) -+ data[i] = read_ebus_rom (dev, vpd_ptr + i); -+ -+ return 0; -+} -+ -+int -+elan4_read_vpd (ELAN4_DEV *dev, unsigned char *tag, unsigned char *result) -+{ -+ unsigned char vpd[I2C_ELAN_EEPROM_VPD_SIZE]; -+ unsigned char *ptr = vpd; -+ unsigned int finished = 0; -+ unsigned char *lim; -+ unsigned char name[3]; -+ unsigned char value[256]; -+ unsigned char type; -+ unsigned int len, len2; -+ register int i; -+ -+ if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA) -+ { -+ if (ebus_read_vpd (dev, vpd, I2C_ELAN_EEPROM_VPD_SIZE) < 0) -+ { -+ PRINTF1 (DBG_DEVICE, DBG_CONFIG, "elan%d: elan4_read_vpd, unable to read serial number from EBUS rom\n", dev->dev_instance); -+ return -EINVAL ; -+ } -+ } -+ else -+ { -+ if (i2c_read_rom (dev, I2C_ELAN_EEPROM_VPD_BASEADDR, I2C_ELAN_EEPROM_VPD_SIZE, vpd) < 0) -+ { -+ PRINTF1 (DBG_DEVICE, DBG_CONFIG, "elan%d: elan4_read_vpd, unable to read serial number from I2C rom\n", dev->dev_instance); -+ return -EINVAL; -+ } -+ } -+ -+ result[0] = 0; -+ while (! finished) -+ { -+ type = *ptr++; -+ -+ if (type & LARGE_RESOURCE_BIT) -+ { -+ len = *(ptr++); -+ len += *(ptr++) << 8; -+ -+ switch (type & ~LARGE_RESOURCE_BIT) -+ { -+ case LARGE_RESOURCE_STRING: -+ case LARGE_RESOURCE_VENDOR_DEFINED: -+ ptr += len; -+ break; -+ -+ case LARGE_RESOURCE_VITAL_PRODUCT_DATA: -+ for (lim = ptr + len; ptr < lim; ) -+ { -+ name[0] = *ptr++; -+ name[1] = *ptr++; -+ name[2] = '\0'; -+ len2 = *ptr++; -+ -+ for (i = 0; i < len2 && ptr < lim; i++) -+ value[i] = *ptr++; -+ value[i] = '\0'; -+ -+ PRINTF3 (DBG_DEVICE, DBG_CONFIG, "elan%d: elan4_read_vpd, %s: $s\n", dev->dev_instance, name, value); -+ -+ if (tag != NULL) -+ { /* looking for just one tag */ -+ if (!strcmp (name, tag)) -+ strcpy(result, value); -+ } -+ else -+ { /* get all tags */ -+ strcat(result,name); -+ strcat(result,": "); -+ strcat(result,value); -+ strcat(result,"\n"); -+ } -+ } -+ break; -+ -+ default: -+ PRINTF2 (DBG_DEVICE, DBG_CONFIG, "elan%d: elan4_read_vpd, unknown large resource %x\n", dev->dev_instance, type); -+ finished = 1; -+ break; -+ } -+ } -+ else -+ { -+ len = type & 0x7; -+ -+ switch (type >> 3) -+ { -+ case SMALL_RESOURCE_COMPATIBLE_DEVICE_ID: -+ ptr += len; -+ break; -+ -+ case SMALL_RESOURCE_VENDOR_DEFINED: -+ ptr += len; -+ break; -+ -+ case SMALL_RESOURCE_END_TAG: -+ finished = 1; -+ break; -+ -+ default: -+ PRINTF2 (DBG_DEVICE, DBG_CONFIG, "elan%d: elan4_read_vpd, unknown small resource %x\n", dev->dev_instance, type >> 3); -+ finished = 1; -+ break; -+ } -+ } -+ } -+ -+ if ( result[0] == 0 ) { -+ if ( tag != 0 ) -+ PRINTF2 (DBG_DEVICE, DBG_CONFIG, "elan%d: elan4_read_vpd, failed to find tag %s\n", dev->dev_instance, tag); -+ else -+ PRINTF1 (DBG_DEVICE, DBG_CONFIG, "elan%d: elan4_read_vpd, failed to find any tags\n", dev->dev_instance); -+ return -EINVAL; -+ } -+ -+ return (0); -+} -+ -+int -+elan4_start_device (ELAN4_DEV *dev) -+{ -+ E4_VirtualProcessEntry entry; -+ unsigned pagesizeval[2]; -+ unsigned hashsizeval[2]; -+ register int i, j, tbl, res; -+ unsigned attempts = 0; -+ E4_PAGE_SIZE_TABLE; -+ unsigned char serial[256]; -+ unsigned int sdram_factor = SDRAM_166_DLL_CORRECTION_FACTOR; -+ -+ PRINTF (DBG_DEVICE, DBG_ALL, "elan4_start_device: entered\n"); -+ -+ dev->dev_state = ELAN4_STATE_STARTING; -+ -+ tryagain: -+ /* Initialise the pci config space */ -+ if ((res = elan4_pciinit (dev)) < 0) -+ return (res); -+ -+ /* Display the serial number */ -+ if (elan4_read_vpd (dev, "SN", serial)) -+ printk("elan%d: SN: failed to read\n", dev->dev_instance); -+ else -+ printk("elan%d: SN: %s\n", dev->dev_instance, serial); -+ -+ /* initialise the interrupt mask to zero */ -+ SET_INT_MASK (dev, 0); -+ -+ /* Initialise the device registers */ -+ write_reg64 (dev, TlbLineValue, 0); -+ write_reg64 (dev, SysControlReg, 0); -+ -+ /* Initialise the SDRAM using the configuration value from the ROM */ -+ write_reg64 (dev, SDRamConfigReg, dev->dev_sdram_cfg | SDRAM_SETUP); -+ -+ /* Setup the linkport registers */ -+ write_reg64 (dev, LinkPortLock, elan4_linkport_lock); -+ -+ /* Setup the tick rates, start the clock, and init the stats registers */ -+ write_ureg32 (dev, ClockTickRate.s.TickRates, ELAN4_CLOCK_TICK_RATE); -+ write_ureg64 (dev, Clock, 0); -+ write_ureg32 (dev, InstCount.s.StatsCount, 0); -+ for (i = 0; i < 8; i++) -+ write_ureg32 (dev, StatCounts[i].s.StatsCount, 0); -+ -+ /* Initialise the Link Control register - disable the TLB prefetcher on RevB -+ * as it can cause very occasional data corruption. */ -+ if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVB) -+ write_reg32 (dev, LinkControlReg, LCONT_EN_SYS_READS | LCONT_REVB_DISABLE_TLB_PREFETCH); -+ else -+ write_reg32 (dev, LinkControlReg, LCONT_EN_SYS_READS); -+ -+ /* Initialise the Link Control Settings to set the PLL Reference Value */ -+ write_reg32 (dev, LinkContSettings, -+ (elan4_mod45disable ? LCONT_MOD45_DISABLE : 0) | -+ (3 << LCONT_CONFIG_PHASE_SHIFT) | -+ ((elan4_pll_div & LCONT_PLL_REF_VAL_BITS_MASK) << LCONT_PLL_REF_VAL_BITS_SHIFT) | -+ (LCONT_VOD_360 << LCONT_LVDS_VOLTAGE_BITS_SHIFT) | -+ (LCONT_TERM_AUTO_OHM << LCONT_LVDS_TERMINATION_SHIFT)); -+ -+ /* Clear the link error LED on RevB and above */ -+ if (dev->dev_devinfo.dev_revision_id != PCI_REVISION_ID_ELAN4_REVA) -+ write_i2c (dev, I2cStatus, read_i2c (dev, I2cStatus) | I2cCntl_ClearLinkError); -+ -+ /* Compute the SysClk frequency and update the PLL if necessary */ -+ if (dev->dev_devinfo.dev_revision_id != PCI_REVISION_ID_ELAN4_REVA) -+ { -+ int mhz = measure_sysclk (dev); -+ -+ if (elan4_pll_cfg != 0 || mhz > 190 || mhz < 170) -+ printk ("elan%d: SysClk running at %d Mhz\n", dev->dev_instance, measure_sysclk (dev)); -+ else -+ { -+ sdram_factor = SDRAM_150_DLL_CORRECTION_FACTOR; -+ -+ elan4_updatepll (dev, ECTRL_SYS_CLOCK_RATIO_4_3); -+ -+ printk ("elan%d: SysClk now running at %d Mhz\n", dev->dev_instance, measure_sysclk (dev)); -+ } -+ } -+ -+ initialise_cache (dev); -+ -+ /* Initialise the MMU hash table parameters */ -+ /* Select the largest elan pagesize which is spanned by the -+ * system pagesize for mmu table 0*/ -+ for (i = 0; i < E4_PAGE_SIZE_TABLE_SIZE; i++) -+ if (PageSizeTable[i] > PAGE_SHIFT) -+ break; -+ -+ pagesizeval[0] = i - 1; -+ hashsizeval[0] = elan4_hash_0_size_val; -+ -+ /* Select a suitable elan pagesize to match any "large" page -+ * support that the OS provides. */ -+ pagesizeval[1] = PAGE_SIZE_4M; -+ hashsizeval[1] = elan4_hash_1_size_val; -+ -+ for (tbl = 0; tbl < NUM_HASH_TABLES; tbl++) -+ { -+ dev->dev_pagesizeval[tbl] = pagesizeval[tbl]; -+ dev->dev_pageshift[tbl] = PageSizeTable[pagesizeval[tbl]]; -+ dev->dev_hashsize[tbl] = (1 << hashsizeval[tbl])/sizeof (E4_HashTableEntry); -+ dev->dev_rsvd_hashmask[tbl] = ((1 << (27 - dev->dev_pageshift[tbl]))-1) & ~((1 << hashsizeval[tbl])-1); -+ dev->dev_rsvd_hashval[tbl] = 0xFFFFFFFF; -+ } -+ -+ PRINTF2 (DBG_DEVICE, DBG_CONFIG, "elan4_start_device: pageshifts %d,%d\n", dev->dev_pageshift[0], -+ NUM_HASH_TABLES == 2 ? dev->dev_pageshift[1] : 0); -+ -+ /* Initialise the control register to the desired value */ -+ dev->dev_syscontrol = (CONT_EN_ALL_SETS | CONT_MMU_ENABLE | CONT_CACHE_ALL | CONT_2K_NOT_1K_DMA_PACKETS | -+ (pagesizeval[0] << CONT_TABLE0_PAGE_SIZE_SHIFT) | (hashsizeval[0] << CONT_TABLE0_MASK_SIZE_SHIFT)); -+ -+ if (NUM_HASH_TABLES == 2) -+ dev->dev_syscontrol |= CONT_TWO_HASH_TABLES | (pagesizeval[1] << CONT_TABLE1_PAGE_SIZE_SHIFT) | (hashsizeval[1] << CONT_TABLE1_MASK_SIZE_SHIFT); -+ -+ write_reg64 (dev, SysControlReg, dev->dev_syscontrol); -+ -+ /* use direct mapped pci writes during sdram initialisation, since for -+ * cache flushing to work, we need to ensure that the cacheflush page -+ * never gets lines into the incorrect cache set. */ -+ SET_SYSCONTROL (dev, dev_direct_map_pci_writes, CONT_DIRECT_MAP_PCI_WRITES); -+ -+ if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVB) -+ elan4_sdram_setup_delay_lines(dev, sdram_factor); -+ -+ for (i = res = 0; i < dev->dev_sdram_numbanks; i++) -+ if (dev->dev_sdram_banks[i].b_size) -+ res |= elan4_sdram_init_bank (dev, &dev->dev_sdram_banks[i]); -+ -+ if (! res) -+ { -+ if (dev->dev_devinfo.dev_device_id == PCI_REVISION_ID_ELAN4_REVB && ++attempts < 5) -+ { -+ printk ("elan%d: sdram not working, resetting\n", dev->dev_instance); -+ goto tryagain; -+ } -+ -+ printk ("elan%d: could not find any sdram banks\n", dev->dev_instance); -+ goto failed; -+ } -+ -+#ifndef CONFIG_MPSAS -+ PRINTF0 (DBG_DEVICE, DBG_CONFIG, "elan4_start_device: initialising for ECC\n"); -+ -+ for (i = 0 ; i < dev->dev_sdram_numbanks; i++) -+ if (dev->dev_sdram_banks[i].b_ioaddr) -+ initialise_ecc (dev, &dev->dev_sdram_banks[i]); -+#endif -+ -+ dev->dev_sdram_initial_ecc_val = read_reg64 (dev, SDRamECCStatus); -+ -+ /* Now enable ECC after we've scrubbed the memory */ -+ write_reg64 (dev, SDRamConfigReg, dev->dev_sdram_cfg | SDRAM_ENABLE_ECC); -+ -+ /* clear any errors, and flush the tlb/route cache */ -+ PULSE_SYSCONTROL (dev, CONT_TLB_FLUSH | CONT_ROUTE_FLUSH | CONT_CLEAR_LINKPORT_INT | CONT_CLEAR_SDRAM_ERROR); -+ -+ write_ureg32 (dev, InstCount.s.StatsCount, 0); -+ -+ /* Initialise the thread processor's register file */ -+ for (i = 0; i < 64; i++) -+ write_reg64 (dev, TProcRegs[i], 0); -+ -+ /* Initialise the thread processor's ICache tags */ -+ for (i = 0; i < (E4_ICacheLines/E4_ICachePortSize); i++) -+ { -+ write_reg64 (dev, ICachePort_Cntl_Addr, i << E4_ICacheTagAddrShift); -+ for (j = 0; j < E4_ICachePortSize; j++) -+ write_reg64 (dev, ICachePort[j], E4_InvalidTagValue); -+ } -+ -+ /* -+ * Initialise the ICache with a sethi %hi(addr << 7), %r0 -+ * writing 8 64 bit values per loop of sethi %g0 values ending in 77 for something different?? -+ */ -+ for (i = 0; i < E4_ICacheSizeInBytes; i += (E4_ICachePortSize << 3)) -+ { -+ write_reg64 (dev, ICachePort_Cntl_Addr, E4_AccessICacheRams | (i >> 3)); -+ -+ for (j = 0; j < E4_ICachePortSize; j++) -+ write_reg64 (dev, ICachePort[j], -+ (E4_uint64) (((E4_uint64)i << (4+7)) + ((E4_uint64)j << (1+7)) + (0x077)) | -+ (E4_uint64) (((E4_uint64)i << (4+7+32)) + ((E4_uint64)j << (1+7+32)) + (0x0e7)) << 32); -+ } -+ -+ /* -+ * Initialise the top of the ICache Set0 with a instruction which will -+ * cause a know trap fingerprint so that the application can identify it -+ * and ignore the trap. -+ */ -+ write_reg64 (dev, ICachePort_Cntl_Addr, E4_ICacheFixupOffset | E4_AccessICacheRams); -+ for (i = 0; i < E4_ICachePortSize; i++) -+ write_reg64 (dev, ICachePort[i], E4_ICacheFixupInsn | (E4_ICacheFixupInsn << 32)); -+ -+ /* create the buddy allocator for SDRAM */ -+ for (i = 0; i < dev->dev_sdram_numbanks; i++) -+ if (dev->dev_sdram_banks[i].b_ioaddr) -+ elan4_sdram_add_bank (dev, &dev->dev_sdram_banks[i]); -+ -+ dev->dev_ctxtableshift = elan4_ctxt_table_shift; -+ dev->dev_cqcount = (1 << elan4_ln2_max_cqs); -+ dev->dev_cqreorder = 0; -+ -+ /* allocate the sdram for cache flushing whilst still in direct mapped mode */ -+ dev->dev_cacheflush_space = elan4_sdram_alloc (dev, E4_CacheSize); -+ -+ /* and longer need direct mapped pci writes */ -+ CLEAR_SYSCONTROL (dev, dev_direct_map_pci_writes, CONT_DIRECT_MAP_PCI_WRITES); -+ -+ /* allocate the hash tables, command queues, context tables etc */ -+ PRINTF0 (DBG_DEVICE, DBG_CONFIG, "elan4_start_device: allocating hash tables, command queueus, context tables\n"); -+ -+ dev->dev_comqlowpri = elan4_sdram_alloc (dev, (1 << COMMAND_RUN_QUEUE_BITS)); -+ dev->dev_comqhighpri = elan4_sdram_alloc (dev, (1 << COMMAND_RUN_QUEUE_BITS)); -+ dev->dev_cqaddr = elan4_sdram_alloc (dev, sizeof (E4_CommandQueueDesc) * dev->dev_cqcount); -+ dev->dev_dmaqhighpri = elan4_sdram_alloc (dev, E4_QueueSize(elan4_dmaq_highpri_size)); -+ dev->dev_dmaqlowpri = elan4_sdram_alloc (dev, E4_QueueSize(elan4_dmaq_lowpri_size)); -+ dev->dev_threadqhighpri = elan4_sdram_alloc (dev, E4_QueueSize(elan4_threadq_highpri_size)); -+ dev->dev_threadqlowpri = elan4_sdram_alloc (dev, E4_QueueSize(elan4_threadq_lowpri_size)); -+ dev->dev_interruptq = elan4_sdram_alloc (dev, E4_QueueSize(elan4_interruptq_size)); -+ -+ dev->dev_ctxtable = elan4_sdram_alloc (dev, (1 << dev->dev_ctxtableshift) * sizeof (E4_ContextControlBlock)); -+ dev->dev_faultarea = elan4_sdram_alloc (dev, CUN_Entries * sizeof (E4_FaultSave)); -+ dev->dev_inputtraparea = elan4_sdram_alloc (dev, sizeof (E4_IprocTrapState)); -+ -+ dev->dev_sdrampages[0] = elan4_sdram_alloc (dev, SDRAM_PAGE_SIZE); -+ dev->dev_sdrampages[1] = elan4_sdram_alloc (dev, SDRAM_PAGE_SIZE); -+ -+ for (tbl = 0; tbl < NUM_HASH_TABLES; tbl++) -+ { -+ dev->dev_hashtable[tbl] = elan4_sdram_alloc (dev, dev->dev_hashsize[tbl] * sizeof (E4_HashTableEntry)); -+#ifndef CONFIG_MPSAS -+ /* Initialise hash tables to invalid (zero) */ -+ elan4_sdram_zeroq_sdram (dev, dev->dev_hashtable[tbl], dev->dev_hashsize[tbl] * sizeof (E4_HashTableEntry)); -+#endif -+ } -+ -+ /* Initialise all context filters to discard */ -+#ifdef CONFIG_MPSAS -+ if (sas_memset_dev (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM, dev->dev_ctxtable, -+ E4_FILTER_DISCARD_ALL, (1 << (dev->dev_ctxtableshift-1))) < 0) -+ { -+ for (i = 0; i < (1 << dev->dev_ctxtableshift); i++) -+ elan4_write_filter (dev, i, E4_FILTER_DISCARD_ALL); -+ } -+#else -+ for (i = 0; i < (1 << dev->dev_ctxtableshift); i++) -+ elan4_write_filter (dev, i, E4_FILTER_DISCARD_ALL); -+#endif -+ -+ PRINTF4 (DBG_DEVICE, DBG_CONFIG, "elan4_start_device: hashtables %x,%x, %x,%x\n", dev->dev_hashtable[0], -+ dev->dev_hashsize[0], dev->dev_hashtable[1], dev->dev_hashsize[1]); -+ -+ /* install the hash table pointers */ -+ PRINTF0 (DBG_DEVICE, DBG_CONFIG, "elan4_start_device: initialise registers with table addresses\n"); -+ write_reg64 (dev, MmuTableBasePtrs, (((E4_uint64) dev->dev_hashtable[0]) | ((E4_uint64) dev->dev_hashtable[1]) << 32)); -+ write_reg64 (dev, MmuFaultAndRootCntxPtr, (((E4_uint64) dev->dev_ctxtableshift) | -+ ((E4_uint64) dev->dev_ctxtable) | -+ ((E4_uint64) dev->dev_faultarea) << 32)); -+ write_reg64 (dev, InputTrapAndFilter, (((E4_uint64) dev->dev_ctxtableshift) | -+ ((E4_uint64) dev->dev_ctxtable) | -+ ((E4_uint64) dev->dev_inputtraparea) << 32)); -+ /* -+ * The run ptrs have this format: (Front << 32) | Back -+ * The base for both the front and back is uses the high bits of the back pointer. -+ * So writting just the base value is good enough. -+ */ -+ write_reg64 (dev, CommandLowPriRunPtrs, dev->dev_comqlowpri); -+ write_reg64 (dev, CommandHighPriRunPtrs, dev->dev_comqhighpri); -+ -+ /* Initialise the run queues */ -+ write_reg64 (dev, DProcHighPriPtrs, E4_QueueValue (dev->dev_dmaqhighpri, elan4_dmaq_highpri_size)); -+ write_reg64 (dev, DProcLowPriPtrs, E4_QueueValue (dev->dev_dmaqlowpri, elan4_dmaq_lowpri_size)); -+ write_reg64 (dev, TProcHighPriPtrs, E4_QueueValue (dev->dev_threadqhighpri, elan4_threadq_highpri_size)); -+ write_reg64 (dev, TProcLowPriPtrs, E4_QueueValue (dev->dev_threadqlowpri, elan4_threadq_lowpri_size)); -+ -+ /* Initialise the interrupt queue as "empty" - this is actually with one entry on it */ -+ write_reg64 (dev, MainIntQueuePtrs.Value, (((E4_uint64) E4_QueueFrontValue (dev->dev_interruptq, elan4_interruptq_size) << 32) | -+ ((E4_uint64) E4_QueueBackPointer(dev->dev_interruptq + E4_MainIntEntrySize)))); -+ -+ dev->dev_interruptq_nfptr = dev->dev_interruptq + E4_MainIntEntrySize; -+ -+ /* -+ * Flush the context filter before dropping the Discard all bits in the schedule status register. -+ * Also hit the SCH_RestartTProc to clear out X's from the trap state and -+ * hit the SCH_RestartDmaPrefetchProc to clear out X's from the prev register. -+ */ -+ PULSE_SCHED_RESTART (dev, SCH_ContextFilterFlush | SCH_RestartTProc | SCH_RestartDmaPrefetchProc); -+ -+ /* setup the schedule status register. */ -+ SET_SCHED_STATUS (dev, SCH_CProcTimeout6p2us | SCH_DProcTimeslice512us); -+ -+ /* -+ * Now initialise the inserter cache.s -+ * Bit 31 of the first word of the descriptor is a valid bit. This must be cleared. -+ * Bit 31 becomes a used bit in the descriptors in memory. -+ */ -+ for (i = 0; i < COMMAND_INSERTER_CACHE_ENTRIES; i++) -+ { -+ write_reg32 (dev, CommandControl.CommandQueueDescsBase, i); /* select a cache line */ -+ write_reg64 (dev, CommandCacheTestPort, 0); /* Mark it invalid */ -+ } -+ -+ /* Setup the pointer to the command descriptors */ -+ /* the table must be aligned on a CQ_CommandDescsAlignement boundary */ -+ /* since we've allocated a small table - we work out the offset of the */ -+ /* first entry in our table for mapping in the command ports later */ -+ dev->dev_cqoffset = (dev->dev_cqaddr & (CQ_CommandDescsAlignment-1)) / sizeof (E4_CommandQueueDesc); -+ -+ write_reg32 (dev, CommandControl.CommandQueueDescsBase, (dev->dev_cqaddr & ~(CQ_CommandDescsAlignment-1)) | COM_ENABLE_DEQUEUE); -+ -+ /* allocate the bitmaps for cq,ctxt allocation */ -+ KMEM_ZALLOC (dev->dev_cqamap, bitmap_t *, BT_BITOUL(dev->dev_cqcount/ELAN4_CQ_PER_CQA) * sizeof (bitmap_t), 1); -+ KMEM_ZALLOC (dev->dev_ctxmap, bitmap_t *, BT_BITOUL(1 << dev->dev_ctxtableshift) * sizeof (bitmap_t), 1); -+ -+ if (dev->dev_cqamap == NULL || dev->dev_ctxmap == NULL) -+ goto failed; -+ -+ /* Make every fourth context be invalid for ICache fixup. -+ * context 0 is also invalid - since it is used to indicate -+ * an invalid tag. */ -+ for (i = 0; i < (1 << dev->dev_ctxtableshift); i += 4) -+ BT_SET (dev->dev_ctxmap, i); -+ -+ /* initialise the halt operations */ -+ dev->dev_haltop_mask = 0; -+ dev->dev_haltop_active = 0; -+ -+ /* allocate the hash table shadow structures - and place all blocks on the free lists */ -+ for (tbl = 0; tbl < NUM_HASH_TABLES; tbl++) -+ { -+ KMEM_ZALLOC (dev->dev_mmuhash[tbl], ELAN4_HASH_ENTRY *, dev->dev_hashsize[tbl] * sizeof (ELAN4_HASH_ENTRY), 1); -+ -+ if (dev->dev_mmuhash[tbl] == NULL) -+ goto failed; -+ -+ for (i = 0; i < dev->dev_hashsize[tbl]; i++) -+ dev->dev_mmuhash[tbl][i].he_entry = dev->dev_hashtable[tbl] + (i * sizeof (E4_HashTableEntry)); -+ } -+ -+ /* setup the interrupt mask register */ -+ SET_INT_MASK (dev, (INT_MSI0 | INT_MSI1 | INT_MSI2 | INT_MSI3) & ~(INT_Discarding | INT_Halted | INT_LinkPortKeyFail)); -+ -+ /* start a thread to handle excessive main interrupts */ -+ if (kernel_thread_create (elan4_mainint_thread, (caddr_t) dev) == NULL) -+ goto failed; -+ dev->dev_mainint_started = 1; -+ -+ /* install the device context - and allocate the first 16 command queues */ -+ if (elan4_insertctxt (dev, &dev->dev_ctxt, &device_trap_ops) != 0) -+ goto failed; -+ -+ /* Allocate command queues, one for each entry in the inserter cache, -+ * we'll use these queues to flush the insert cache */ -+ for (i = 0; i < COMMAND_INSERTER_CACHE_ENTRIES; i++) -+ { -+ if ((dev->dev_flush_cq[i] = elan4_alloccq (&dev->dev_ctxt, CQ_Size1K, CQ_DmaStartEnableBit | CQ_InterruptEnableBit, -+ CQ_Priority)) == NULL) -+ goto failed; -+ -+ ASSERT (elan4_cq2num(dev->dev_flush_cq[i]) == i); -+ -+ dev->dev_flush_finished |= (1 << i); -+ } -+ -+ /* Allocate command queues for dma halt operations */ -+ if ((dev->dev_dma_flushop[0].cq = elan4_alloccq (&dev->dev_ctxt, CQ_Size1K, CQ_DmaStartEnableBit|CQ_WaitEventEnableBit, 0)) == NULL || -+ (dev->dev_dma_flushop[1].cq = elan4_alloccq (&dev->dev_ctxt, CQ_Size1K, CQ_DmaStartEnableBit|CQ_WaitEventEnableBit, CQ_Priority)) == NULL) -+ goto failed; -+ -+#ifdef CONFIG_MPSAS -+ elan4_sdram_flushcache (dev, 0, E4_CacheSize); -+#endif -+ -+ /* initialise halt operation for flushing the icache */ -+ dev->dev_iflush_haltop.op_function = device_iflush_haltop; -+ dev->dev_iflush_haltop.op_arg = dev; -+ dev->dev_iflush_haltop.op_mask = INT_TProcHalted; -+ -+ /* Allocate a route table, and create a valid route for vp==0, this is used -+ * when a DMA is removed from the dma run queue */ -+ if ((dev->dev_routetable = elan4_alloc_routetable (dev, 0)) == NULL) -+ goto failed; -+ -+ elan4_set_routetable (&dev->dev_ctxt, dev->dev_routetable); -+ -+ entry.Values[0] = FIRST_MYLINK; -+ entry.Values[1] = 0; -+ -+ elan4_write_route (dev, dev->dev_routetable, 0, &entry); -+ -+ /* map the sdram pages into the elan */ -+ dev->dev_tproc_suspend = DEVICE_TPROC_SUSPEND_ADDR; -+ dev->dev_tproc_space = DEVICE_TPROC_SPACE_ADDR; -+ -+ -+ elan4mmu_pteload (&dev->dev_ctxt, 0, dev->dev_tproc_suspend, HE_TYPE_SDRAM, (dev->dev_sdrampages[0] >> PTE_PADDR_SHIFT) | PTE_SetPerm(PERM_LocExecute)); -+ elan4mmu_pteload (&dev->dev_ctxt, 0, dev->dev_tproc_space, HE_TYPE_SDRAM, (dev->dev_sdrampages[1] >> PTE_PADDR_SHIFT) | PTE_SetPerm(PERM_LocEventWrite)); -+ -+ /* and store the thread suspend sequence in it for use when a thread is removed from the run queue */ -+ elan4_sdram_writel (dev, dev->dev_sdrampages[0], DEVICE_TPROC_SUSPEND_INSTR); -+ -+ /* and initialise the dma flush event in sdrampage[1] */ -+ elan4_sdram_writeq (dev, dev->dev_sdrampages[1] + 64, E4_EVENT_INIT_VALUE (-32, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0)); -+ -+#ifdef CONFIG_MPSAS -+ do_initdma (dev); -+#endif -+ -+ if (!elan4_neterr_init (dev)) -+ goto failed; -+ -+ elan4_configure_writecombining (dev); -+ -+ /* finally register the device with elanmod for rms */ -+ dev->dev_idx = elan_dev_register (&dev->dev_devinfo, &elan4_dev_ops, (void *) dev); -+ -+ dev->dev_state = ELAN4_STATE_STARTED; -+ -+ return (0); -+ -+ failed: -+ printk ("elan%d: failed to start elan4 device - stopping\n", dev->dev_instance); -+ -+ elan4_stop_device (dev); -+ return (-ENOMEM); -+} -+ -+void -+elan4_stop_device (ELAN4_DEV *dev) -+{ -+ unsigned long flags; -+ int i, tbl; -+ -+ dev->dev_state = ELAN4_STATE_STOPPING; -+ -+ elan_dev_deregister (&dev->dev_devinfo); -+ -+ elan4_unconfigure_writecombining (dev); -+ -+ elan4_neterr_destroy (dev); -+ -+ if (dev->dev_tproc_suspend) -+ elan4mmu_unload_range (&dev->dev_ctxt, 0, dev->dev_tproc_suspend, 1 << dev->dev_pageshift[0]); -+ -+ if (dev->dev_tproc_space) -+ elan4mmu_unload_range (&dev->dev_ctxt, 0, dev->dev_tproc_space, 1 << dev->dev_pageshift[0]); -+ -+ if (dev->dev_routetable) -+ { -+ elan4_set_routetable (&dev->dev_ctxt, NULL); -+ elan4_free_routetable (dev, dev->dev_routetable); -+ } -+ -+ for (i = 0; i < 2; i++) -+ if (dev->dev_dma_flushop[i].cq) -+ elan4_freecq (&dev->dev_ctxt, dev->dev_dma_flushop[i].cq); -+ -+ /* free of the device context - and insert cache flushing command queues */ -+ for (i = 0; i < COMMAND_INSERTER_CACHE_ENTRIES; i++) -+ if (dev->dev_flush_cq[i]) -+ elan4_freecq (&dev->dev_ctxt, dev->dev_flush_cq[i]); -+ -+ if (dev->dev_ctxt.ctxt_dev) -+ elan4_removectxt (dev, &dev->dev_ctxt); -+ -+ /* stop the mainint thread */ -+ spin_lock_irqsave (&dev->dev_mainint_lock, flags); -+ dev->dev_stop_threads = 1; -+ -+ while (dev->dev_mainint_started && !dev->dev_mainint_stopped) -+ { -+ kcondvar_wakeupall (&dev->dev_mainint_wait, &dev->dev_mainint_lock); -+ kcondvar_wait (&dev->dev_mainint_wait, &dev->dev_mainint_lock, &flags); -+ } -+ dev->dev_mainint_started = dev->dev_mainint_stopped = 0; -+ spin_unlock_irqrestore (&dev->dev_mainint_lock, flags); -+ -+ /* cancel any error interrupt timeouts */ -+ if (timer_fn_queued (&dev->dev_error_timeoutid)) -+ cancel_timer_fn (&dev->dev_error_timeoutid); -+ -+ if (dev->dev_devinfo.dev_revision_id != PCI_REVISION_ID_ELAN4_REVA && timer_fn_queued (&dev->dev_linkerr_timeoutid)) -+ cancel_timer_fn (&dev->dev_linkerr_timeoutid); -+ -+ /* reset the interrupt mask register to zero */ -+ if (dev->dev_regs) -+ SET_INT_MASK (dev, 0); -+ -+ for (tbl = 0; tbl < NUM_HASH_TABLES; tbl++) -+ { -+ if (dev->dev_mmuhash[tbl]) -+ KMEM_FREE (dev->dev_mmuhash[tbl], dev->dev_hashsize[tbl] * sizeof (ELAN4_HASH_ENTRY)); -+ if (dev->dev_hashtable[tbl]) -+ elan4_sdram_free (dev, dev->dev_hashtable[tbl], dev->dev_hashsize[tbl] * sizeof (E4_HashTableEntry)); -+ } -+ -+ if (dev->dev_cqamap) -+ KMEM_FREE (dev->dev_cqamap, BT_BITOUL (dev->dev_cqcount/ELAN4_CQ_PER_CQA) * sizeof (bitmap_t)); -+ if (dev->dev_ctxmap) -+ KMEM_FREE (dev->dev_ctxmap, BT_BITOUL(1 << dev->dev_ctxtableshift) * sizeof (bitmap_t)); -+ -+ if (dev->dev_comqlowpri) -+ elan4_sdram_free (dev, dev->dev_comqlowpri, (1 << COMMAND_RUN_QUEUE_BITS)); -+ if (dev->dev_comqhighpri) -+ elan4_sdram_free (dev, dev->dev_comqhighpri, (1 << COMMAND_RUN_QUEUE_BITS)); -+ if (dev->dev_cqaddr) -+ elan4_sdram_free (dev, dev->dev_cqaddr, sizeof (E4_CommandQueueDesc) * dev->dev_cqcount); -+ if (dev->dev_dmaqhighpri) -+ elan4_sdram_free (dev, dev->dev_dmaqhighpri, E4_QueueSize(elan4_dmaq_highpri_size)); -+ if (dev->dev_dmaqlowpri) -+ elan4_sdram_free (dev, dev->dev_dmaqlowpri, E4_QueueSize(elan4_dmaq_lowpri_size)); -+ if (dev->dev_threadqhighpri) -+ elan4_sdram_free (dev, dev->dev_threadqhighpri, E4_QueueSize(elan4_threadq_highpri_size)); -+ if (dev->dev_threadqlowpri) -+ elan4_sdram_free (dev, dev->dev_threadqlowpri, E4_QueueSize(elan4_threadq_lowpri_size)); -+ if (dev->dev_interruptq) -+ elan4_sdram_free (dev, dev->dev_interruptq, E4_QueueSize(elan4_interruptq_size)); -+ -+ if (dev->dev_ctxtable) -+ elan4_sdram_free (dev, dev->dev_ctxtable, (1 << dev->dev_ctxtableshift) * sizeof (E4_ContextControlBlock)); -+ if (dev->dev_faultarea) -+ elan4_sdram_free (dev, dev->dev_faultarea, CUN_Entries * sizeof (E4_FaultSave)); -+ if (dev->dev_inputtraparea) -+ elan4_sdram_free (dev, dev->dev_inputtraparea, sizeof (E4_IprocTrapState)); -+ -+ if (dev->dev_sdrampages[0]) -+ elan4_sdram_free (dev, dev->dev_sdrampages[0], SDRAM_PAGE_SIZE); -+ if (dev->dev_sdrampages[1]) -+ elan4_sdram_free (dev, dev->dev_sdrampages[1], SDRAM_PAGE_SIZE); -+ -+ for (i = 0; i < dev->dev_sdram_numbanks; i++) -+ if (dev->dev_sdram_banks[i].b_ioaddr) -+ elan4_sdram_fini_bank (dev, &dev->dev_sdram_banks[i]); -+ -+ elan4_pcifini (dev); -+ -+ dev->dev_state = ELAN4_STATE_STOPPED; -+ -+ if (dev->dev_ack_errors) -+ kfree(dev->dev_ack_errors); -+ if (dev->dev_dproc_timeout) -+ kfree(dev->dev_dproc_timeout); -+ if (dev->dev_cproc_timeout) -+ kfree(dev->dev_cproc_timeout); -+} -+ -+static __inline__ int -+compute_arity (int lvl, unsigned n, char *arity) -+{ -+ if (arity[lvl] == 0) -+ { -+ if (n <= 8) -+ arity[lvl] = n; -+ else -+ arity[lvl] = 4; -+ } -+ -+ return (arity[lvl]); -+} -+ -+int -+elan4_compute_position (ELAN_POSITION *pos, unsigned nodeid, unsigned numnodes, unsigned arityval) -+{ -+ int i, lvl, n; -+ char arity[ELAN_MAX_LEVELS]; -+ -+ if (nodeid >= numnodes) -+ return -EINVAL; -+ -+ for (i = 0; i < ELAN_MAX_LEVELS; i++, arityval >>= 4) -+ arity[i] = arityval & 7; -+ -+ for (lvl = 0, n = numnodes; n > compute_arity(lvl, n, arity) && lvl < ELAN_MAX_LEVELS; lvl++) -+ { -+ if ((n % arity[lvl]) != 0) -+ return -EINVAL; -+ -+ n /= arity[lvl]; -+ } -+ -+ if (arity[lvl] != n) -+ return -EINVAL; -+ -+ for (i = 0; i <= lvl; i++) -+ pos->pos_arity[i] = arity[lvl - i]; -+ -+ pos->pos_nodes = numnodes; -+ pos->pos_levels = lvl + 1; -+ pos->pos_nodeid = nodeid; -+ pos->pos_mode = ELAN_POS_MODE_SWITCHED; -+ -+ return 0; -+} -+ -+int -+elan4_get_position (ELAN4_DEV *dev, ELAN_POSITION *pos) -+{ -+ kmutex_lock (&dev->dev_lock); -+ *pos = dev->dev_position; -+ kmutex_unlock (&dev->dev_lock); -+ -+ return (pos->pos_mode); -+} -+ -+int -+elan4_set_position (ELAN4_DEV *dev, ELAN_POSITION *pos) -+{ -+ int forceLocal = 0; -+ int nnodes, i; -+ unsigned int *ack_errors; -+ unsigned int *dproc_timeout; -+ unsigned int *cproc_timeout; -+ -+ switch (pos->pos_mode) -+ { -+ case ELAN_POS_UNKNOWN: -+ break; -+ -+ case ELAN_POS_MODE_SWITCHED: -+ if (pos->pos_levels > ELAN_MAX_LEVELS) -+ return (-EINVAL); -+ -+ for (i = 0, nnodes = 1; i < pos->pos_levels; i++) -+ { -+ -+ if (pos->pos_arity[i] <= 0 || (i == 0 ? pos->pos_arity[i] > 8 : pos->pos_arity[i] >= 8)) /* allow an 8 way top-switch */ -+ return (-EINVAL); -+ -+ nnodes *= pos->pos_arity[i]; -+ } -+ -+ if (pos->pos_nodes > nnodes || pos->pos_nodeid >= pos->pos_nodes) -+ return (-EINVAL); -+ break; -+ -+ case ELAN_POS_MODE_LOOPBACK: -+ if (pos->pos_levels != 1 || pos->pos_nodes != 1 || pos->pos_nodeid != 0 || pos->pos_arity[0] != 1) -+ return (-EINVAL); -+ -+ forceLocal = 1; -+ break; -+ -+ case ELAN_POS_MODE_BACKTOBACK: -+ if (pos->pos_levels != 1 || pos->pos_nodes != 2 || pos->pos_nodeid >= 2 || pos->pos_arity[0] != 2) -+ return (-EINVAL); -+ -+ forceLocal = (pos->pos_nodeid == 0); -+ break; -+ -+ default: -+ return (-EINVAL); -+ } -+ -+ ack_errors = kmalloc(pos->pos_nodes * sizeof(unsigned int), GFP_KERNEL); -+ if (!ack_errors) -+ return (-EINVAL); -+ memset(ack_errors, 0, pos->pos_nodes * sizeof(unsigned int)); -+ dproc_timeout = kmalloc(pos->pos_nodes * sizeof(unsigned int), GFP_KERNEL); -+ if (!dproc_timeout) -+ { -+ kfree(ack_errors); -+ return (-EINVAL); -+ } -+ memset(dproc_timeout, 0, pos->pos_nodes * sizeof(unsigned int)); -+ cproc_timeout = kmalloc(pos->pos_nodes * sizeof(unsigned int), GFP_KERNEL); -+ if (!cproc_timeout) -+ { -+ kfree(ack_errors); -+ kfree(dproc_timeout); -+ return (-EINVAL); -+ } -+ memset(cproc_timeout, 0, pos->pos_nodes * sizeof(unsigned int)); -+ -+ kmutex_lock (&dev->dev_lock); -+ dev->dev_position = *pos; -+ dev->dev_ack_errors = ack_errors; -+ dev->dev_dproc_timeout = dproc_timeout; -+ dev->dev_cproc_timeout = cproc_timeout; -+ -+ if (forceLocal) -+ write_reg32 (dev, LinkContSettings, read_reg32 (dev, LinkContSettings) | LCONT_FORCE_COMMSCLK_LOCAL); -+ else -+ write_reg32 (dev, LinkContSettings, read_reg32 (dev, LinkContSettings) & ~LCONT_FORCE_COMMSCLK_LOCAL); -+ -+ pioflush_reg (dev); -+ kmutex_unlock (&dev->dev_lock); -+ -+ return (0); -+} -+ -+void -+elan4_get_params (ELAN4_DEV *dev, ELAN_PARAMS *params, unsigned short *mask) -+{ -+ kmutex_lock (&dev->dev_lock); -+ -+ *mask = dev->dev_devinfo.dev_params_mask; -+ memcpy (params, &dev->dev_devinfo.dev_params, sizeof (ELAN_PARAMS)); -+ -+ kmutex_unlock (&dev->dev_lock); -+} -+ -+void -+elan4_set_params (ELAN4_DEV *dev, ELAN_PARAMS *params, unsigned short mask) -+{ -+ int i; -+ -+ kmutex_lock (&dev->dev_lock); -+ for (i = 0; i < ELAN4_PARAM_COUNT; i++) -+ if (mask & (1 << i)) -+ dev->dev_devinfo.dev_params.values[i] = params->values[i]; -+ -+ dev->dev_devinfo.dev_params_mask |= mask; -+ kmutex_unlock (&dev->dev_lock); -+} -+ -+ -+EXPORT_SYMBOL(elan4_get_position); -+EXPORT_SYMBOL(elan4_set_position); -+ -+EXPORT_SYMBOL(elan4_queue_haltop); -+EXPORT_SYMBOL(elan4_queue_dma_flushop); -+EXPORT_SYMBOL(elan4_queue_mainintop); -+ -+EXPORT_SYMBOL(elan4_insertctxt); -+EXPORT_SYMBOL(elan4_removectxt); -+ -+EXPORT_SYMBOL(elan4_attach_filter); -+EXPORT_SYMBOL(elan4_detach_filter); -+EXPORT_SYMBOL(elan4_set_filter); -+EXPORT_SYMBOL(elan4_set_routetable); -+ -+EXPORT_SYMBOL(elan4_alloccq); -+EXPORT_SYMBOL(elan4_freecq); -+EXPORT_SYMBOL(elan4_restartcq); -+ -+EXPORT_SYMBOL(elan4_flush_icache); -+EXPORT_SYMBOL(elan4_hardware_lock_check); -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/elan4/device_Linux.c linux-2.6.9/drivers/net/qsnet/elan4/device_Linux.c ---- clean/drivers/net/qsnet/elan4/device_Linux.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan4/device_Linux.c 2005-09-07 10:35:03.000000000 -0400 -@@ -0,0 +1,3034 @@ -+/* -+ * Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: device_Linux.c,v 1.110.2.9 2005/09/07 14:35:03 mike Exp $" -+/* $Source: /cvs/master/quadrics/elan4mod/device_Linux.c,v $*/ -+ -+#include -+#include -+#include -+ -+#include -+#include -+#ifdef CONFIG_MTRR -+#include -+#endif -+ -+#include -+#include -+#include -+#include -+#include -+ -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) -+#include -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23) -+typedef void irqreturn_t; -+#endif -+# define IRQ_NONE -+# define IRQ_HANDLED -+#endif -+ -+#include -+#include -+#include -+#include -+#include -+ -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) -+#error please use a 2.4.0 series kernel or newer -+#endif -+ -+ -+#if defined(LINUX_SPARC) || defined(LINUX_PPC64) -+#define __io_remap_page_range(from,offset,size,prot) remap_page_range(from,offset,size,prot) -+#define __remap_page_range(from,offset,size,prot) remap_page_range(from,offset,size,prot) -+#elif defined(NO_RMAP) -+#define __io_remap_page_range(from,offset,size,prot) io_remap_page_range(from,offset,size,prot) -+#define __remap_page_range(from,offset,size,prot) remap_page_range(from,offset,size,prot) -+#else -+#define __io_remap_page_range(from,offset,size,prot) io_remap_page_range(vma,from,offset,size,prot) -+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9) -+#define __remap_page_range(from,offset,size,prot) remap_pfn_range(vma,from,(offset)>>PAGE_SHIFT,size,prot) -+#else -+#define __remap_page_range(from,offset,size,prot) remap_page_range(vma,from,offset,size,prot) -+#endif -+#endif -+ -+#if defined (X86_FEATURE_PAT) -+static unsigned int pat_pteval = -1; -+#endif -+ -+#if defined(__alpha) -+static inline physaddr_t bar2phys (unsigned long addr) -+{ -+ return virt_to_phys((void *) ioremap(addr, PAGE_SIZE)); -+} -+#elif defined(__ia64) -+static inline physaddr_t bar2phys (unsigned long addr) -+{ -+ return ((addr) & ~__IA64_UNCACHED_OFFSET); -+} -+#elif defined(__powerpc64__) -+ -+#ifdef CONFIG_PPC_PSERIES -+#include -+ -+static inline physaddr_t bar2phys (unsigned long addr) -+{ -+ return eeh_token_to_phys (addr); -+} -+#endif -+ -+#else -+static inline physaddr_t bar2phys (unsigned long addr) -+{ -+ return (addr); -+} -+#endif -+ -+#ifndef pgprot_noncached -+static inline pgprot_t pgprot_noncached(pgprot_t _prot) -+{ -+ unsigned long prot = pgprot_val(_prot); -+#if defined(__powerpc__) -+ prot |= _PAGE_NO_CACHE | _PAGE_GUARDED; -+#elif defined(__sparc__) -+ prot &= ~(_PAGE_CACHE); -+ prot |= _PAGE_IE; -+#endif -+ return __pgprot(prot); -+} -+#endif -+ -+#ifndef pgprot_writecombine -+static inline pgprot_t pgprot_writecombine (pgprot_t _prot) -+{ -+ unsigned long prot = pgprot_val(_prot); -+ -+#if defined (X86_FEATURE_PAT) -+ if (pat_pteval != -1) -+ prot = (prot & ~(_PAGE_PCD | _PAGE_PWT | _PAGE_PSE)) | pat_pteval; -+#endif -+ return __pgprot (prot); -+} -+#endif -+ -+#define ELAN4_DRIVER_VERSION 0x103 /* 16 bit value */ -+ -+/* -+ * Function prototypes. -+ */ -+static int elan4_attach_device (int instance, struct pci_dev *pdev); -+static void elan4_detach_device (ELAN4_DEV *dev); -+ -+static int elan4_open (struct inode *inode, struct file *file); -+static int elan4_release(struct inode *inode, struct file *file); -+static int elan4_ioctl (struct inode *inode, struct file *file, -+ unsigned int cmd, unsigned long arg); -+static int elan4_mmap (struct file *file, struct vm_area_struct *vm_area); -+ -+static irqreturn_t elan4_irq (int irq, void *arg, struct pt_regs *regs); -+ -+static void elan4_shutdown_devices(int panicing); -+ -+static int disabled; /* bitmask of which devices not to start */ -+unsigned int elan4_pll_cfg = 0; -+int elan4_pll_div = 31; /* RevC PCB */ -+int elan4_mod45disable = 0; -+static int optimise_pci_bus = 1; /* 0 => don't, 1 => if ok, 2 => always */ -+static int default_features = 0; /* default values for dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] */ -+int assfail_mode = 0; -+ -+long long sdram_cfg = SDRAM_STARTUP_VALUE; -+static int sdram_cfg_lo; -+static int sdram_cfg_hi; -+int sdram_bank_limit; -+ -+MODULE_AUTHOR("Quadrics Ltd."); -+MODULE_DESCRIPTION("Elan 4 Device Driver"); -+MODULE_LICENSE("GPL"); -+ -+module_param(elan4_debug, uint, 0); -+module_param(elan4_debug_toconsole, uint, 0); -+module_param(elan4_debug_tobuffer, uint, 0); -+module_param(elan4_debug_mmu, uint, 0); -+module_param(elan4_pll_cfg, uint, 0); -+module_param(elan4_pll_div, uint, 0); -+module_param(elan4_mod45disable, uint, 0); -+module_param(optimise_pci_bus, uint, 0); -+module_param(default_features, uint, 0); -+module_param(assfail_mode, uint, 0); -+ -+module_param(disabled, uint, 0); -+module_param(sdram_cfg_lo, uint, 0); -+module_param(sdram_cfg_hi, uint, 0); -+module_param(sdram_bank_limit, uint, 0); -+ -+module_param(elan4_hash_0_size_val, uint, 0); -+module_param(elan4_hash_1_size_val, uint, 0); -+module_param(elan4_ctxt_table_shift, uint, 0); -+module_param(elan4_ln2_max_cqs, uint, 0); -+module_param(elan4_dmaq_highpri_size, uint, 0); -+module_param(elan4_threadq_highpri_size, uint, 0); -+module_param(elan4_dmaq_lowpri_size, uint, 0); -+module_param(elan4_threadq_lowpri_size, uint, 0); -+module_param(elan4_interruptq_size, uint, 0); -+ -+module_param(elan4_mainint_punt_loops, uint, 0); -+module_param(elan4_mainint_resched_ticks, uint, 0); -+module_param(elan4_linkport_lock, uint, 0); -+module_param(elan4_eccerr_recheck, uint, 0); -+ -+module_param(user_p2p_route_options, uint, 0); -+module_param(user_bcast_route_options, uint, 0); -+module_param(user_dproc_retry_count, uint, 0); -+module_param(user_cproc_retry_count, uint, 0); -+module_param(user_ioproc_enabled, uint, 0); -+module_param(user_pagefault_enabled, uint, 0); -+ -+/* -+ * Standard device entry points. -+ */ -+static struct file_operations elan4_fops = { -+ ioctl: elan4_ioctl, -+ mmap: elan4_mmap, -+ open: elan4_open, -+ release: elan4_release, -+}; -+ -+ELAN4_DEV *elan4_devices[ELAN4_MAX_CONTROLLER]; -+ -+#if defined(CONFIG_DEVFS_FS) -+static devfs_handle_t devfs_handle; -+#endif -+ -+ -+#if defined(CONFIG_PPC64) || defined(CONFIG_SPARC64) || defined(CONFIG_X86_64) -+static int -+elan4_ioctl32_cmds[] = -+{ /* /dev/elan/control */ -+ ELAN4IO_DEVINFO, -+ ELAN4IO_GET_POSITION, -+ ELAN4IO_SET_POSITION, -+ ELAN4IO_GET_PARAMS, -+ ELAN4IO_SET_PARAMS, -+ -+ /* /dev/elan4/user */ -+ ELAN4IO_POSITION, -+ ELAN4IO_FREE, -+ ELAN4IO_ATTACH, -+ ELAN4IO_DETACH, -+ ELAN4IO_BLOCK_INPUTTER, -+ -+ ELAN4IO_ADD_P2PVP, -+ ELAN4IO_ADD_BCASTVP, -+ ELAN4IO_REMOVEVP, -+ ELAN4IO_SET_ROUTE, -+ ELAN4IO_RESET_ROUTE, -+ ELAN4IO_GET_ROUTE, -+ ELAN4IO_CHECK_ROUTE, -+ -+ ELAN4IO_ALLOCCQ, -+ ELAN4IO_FREECQ, -+ ELAN4IO_SETPERM32, -+ ELAN4IO_CLRPERM32, -+ ELAN4IO_TRAPSIG, -+ ELAN4IO_TRAPHANDLER32, -+ ELAN4IO_REQUIRED_MAPPINGS, -+ -+ ELAN4IO_RESUME_EPROC_TRAP, -+ ELAN4IO_RESUME_CPROC_TRAP, -+ ELAN4IO_RESUME_DPROC_TRAP, -+ ELAN4IO_RESUME_TPROC_TRAP, -+ ELAN4IO_RESUME_IPROC_TRAP, -+ -+ ELAN4IO_FLUSH_ICACHE, -+ -+ ELAN4IO_STOP_CTXT, -+ -+ ELAN4IO_ALLOC_INTCOOKIE, -+ ELAN4IO_FREE_INTCOOKIE, -+ ELAN4IO_ARM_INTCOOKIE, -+ ELAN4IO_WAIT_INTCOOKIE, -+ -+ ELAN4IO_ALLOC_TRAP_QUEUES, -+ ELAN4IO_NETERR_MSG, -+ ELAN4IO_NETERR_TIMER, -+ ELAN4IO_NETERR_FIXUP, -+ -+ ELAN4IO_DUMPCQ32, -+}; -+ -+static int elan4_ioctl32 (unsigned int fd, unsigned int cmd, -+ unsigned long arg, struct file *file); -+#endif -+ -+/* -+ * Standard device entry points. -+ */ -+#if defined(CONFIG_DUMP) || defined(CONFIG_DUMP_MODULE) -+ -+#include -+ -+static int -+elan4_dump_event (struct notifier_block *self, unsigned long event, void *buffer) -+{ -+ if (event == DUMP_BEGIN) -+ elan4_shutdown_devices (FALSE); -+ -+ return (NOTIFY_DONE); -+} -+static struct notifier_block elan4_dump_notifier = -+{ -+ notifier_call: elan4_dump_event, -+ priority: 0, -+}; -+ -+#endif -+ -+static int -+elan4_reboot_event (struct notifier_block *self, unsigned long event, void *buffer) -+{ -+ if ((event == SYS_RESTART || event == SYS_HALT || event == SYS_POWER_OFF)) -+ elan4_shutdown_devices (0); -+ -+ return (NOTIFY_DONE); -+} -+ -+static struct notifier_block elan4_reboot_notifier = -+{ -+ notifier_call: elan4_reboot_event, -+ priority: 0, -+}; -+ -+#if !defined(NO_PANIC_NOTIFIER) -+static int -+elan4_panic_event (struct notifier_block *self, unsigned long event, void *buffer) -+{ -+ elan4_shutdown_devices (1); -+ -+ return (NOTIFY_DONE); -+} -+ -+static struct notifier_block elan4_panic_notifier = -+{ -+ notifier_call: elan4_panic_event, -+ priority: 0, -+}; -+#endif -+ -+static int __init -+elan4_init (void) -+{ -+ int err; -+ struct pci_dev *pdev; -+ int count; -+#if defined(__ia64) -+ int seenRevA = 0; -+#endif -+ -+ if ((err = register_chrdev (ELAN4_MAJOR, ELAN4_NAME, &elan4_fops)) < 0) -+ return (err); -+ -+#if defined(CONFIG_DEVFS_FS) -+ devfs_handle = devfs_mk_dir (NULL, "elan4", NULL); -+#endif -+ -+ intcookie_init(); -+ elan4_debug_init(); -+ elan4_procfs_init(); -+ -+#ifdef CONFIG_MPSAS -+ sas_init(); -+#endif -+ -+ if (sdram_cfg_lo != 0 && sdram_cfg_hi != 0) -+ sdram_cfg = (((unsigned long long) sdram_cfg_hi) << 32) | ((unsigned long long) sdram_cfg_lo); -+ -+ for (count = 0, pdev = NULL; (pdev = pci_find_device(PCI_VENDOR_ID_QUADRICS, PCI_DEVICE_ID_ELAN4, pdev)) != NULL ; count++) -+ { -+#if defined(__ia64) -+ unsigned char revid; -+ -+ pci_read_config_byte (pdev, PCI_REVISION_ID, &revid); -+ -+ if (revid == PCI_REVISION_ID_ELAN4_REVA && seenRevA++ != 0 && pci_find_device (PCI_VENDOR_ID_HP, 0x122e, NULL)) -+ { -+ printk ("elan: only a single elan4a supported on rx2600\n"); -+ continue; -+ } -+#endif -+ -+ if (count < ELAN4_MAX_CONTROLLER) -+ elan4_attach_device (count, pdev); -+ } -+ -+ if (count >= ELAN4_MAX_CONTROLLER) -+ printk ("elan: found %d elan4 devices - only support %d\n", count, ELAN4_MAX_CONTROLLER); -+ -+#if defined(CONFIG_PPC64) || defined(CONFIG_SPARC64) || defined(CONFIG_X86_64) -+ lock_kernel(); -+ { -+ extern int register_ioctl32_conversion(unsigned int cmd, int (*handler)(unsigned int, unsigned int, unsigned long, struct file *)); -+ register int i; -+ for (i = 0; i < sizeof (elan4_ioctl32_cmds)/sizeof(elan4_ioctl32_cmds[0]); i++) -+ register_ioctl32_conversion (elan4_ioctl32_cmds[i], elan4_ioctl32); -+ } -+ unlock_kernel(); -+#endif -+ -+#if defined(CONFIG_DUMP) || defined(CONFIG_DUMP_MODULE) -+ register_dump_notifier (&elan4_dump_notifier); -+#endif -+ register_reboot_notifier (&elan4_reboot_notifier); -+ -+#if !defined(NO_PANIC_NOTIFIER) -+ notifier_chain_register (&panic_notifier_list, &elan4_panic_notifier); -+#endif -+ -+ return (0); -+} -+ -+#ifdef MODULE -+static void __exit -+elan4_exit (void) -+{ -+ int i; -+ -+#if defined(CONFIG_PPC64) || defined(CONFIG_SPARC64) || defined(CONFIG_X86_64) -+ lock_kernel(); -+ { -+ extern void unregister_ioctl32_conversion(unsigned int cmd); -+ -+ for (i = 0; i < sizeof (elan4_ioctl32_cmds)/sizeof(elan4_ioctl32_cmds[0]); i++) -+ unregister_ioctl32_conversion (elan4_ioctl32_cmds[i]); -+ } -+ unlock_kernel(); -+#endif -+ -+#if defined(CONFIG_DUMP) || defined(CONFIG_DUMP_MODULE) -+ unregister_dump_notifier (&elan4_dump_notifier); -+#endif -+ unregister_reboot_notifier (&elan4_reboot_notifier); -+ -+#if !defined(NO_PANIC_NOTIFIER) -+ notifier_chain_unregister (&panic_notifier_list, &elan4_panic_notifier); -+#endif -+ -+ for (i = 0; i < ELAN4_MAX_CONTROLLER; i++) -+ if (elan4_devices[i] != NULL) -+ elan4_detach_device (elan4_devices[i]); -+ -+ elan4_procfs_fini(); -+ elan4_debug_fini(); -+ intcookie_fini(); -+ -+#if defined(CONFIG_DEVFS_FS) -+ devfs_unregister (devfs_handle); -+#endif -+ -+ unregister_chrdev(ELAN4_MAJOR, ELAN4_NAME); -+} -+ -+module_init (elan4_init); -+module_exit (elan4_exit); -+ -+#else -+__initcall (elan4_init); -+#endif -+ -+/* -+ * Minor numbers encoded as : -+ * [5:0] device number -+ * [15:6] function number -+ */ -+#define ELAN4_DEVICE_MASK 0x3F -+#define ELAN4_DEVICE(inode) (MINOR((inode)->i_rdev) & ELAN4_DEVICE_MASK) -+ -+#define ELAN4_MINOR_CONTROL 0 -+#define ELAN4_MINOR_MEM 1 -+#define ELAN4_MINOR_USER 2 -+ -+#define ELAN4_MINOR_SHIFT 6 -+#define ELAN4_MINOR(inode) (MINOR((inode)->i_rdev) >> ELAN4_MINOR_SHIFT) -+ -+/* -+ * Called by init_module() for each card discovered on PCI. -+ */ -+static int -+elan4_attach_device (int instance, struct pci_dev *pdev) -+{ -+ ELAN4_DEV *dev; -+ int res; -+ -+ KMEM_ALLOC (dev, ELAN4_DEV *, sizeof (ELAN4_DEV), 1); -+ if ((dev == NULL)) -+ return (-ENOMEM); -+ memset (dev, 0, sizeof (ELAN4_DEV)); -+ -+ /* setup os dependent section of ELAN4_DEV */ -+ dev->dev_instance = instance; -+ dev->dev_osdep.pdev = pdev; -+ -+#if !defined(IOPROC_PATCH_APPLIED) -+ printk ("elan%d: pinning down pages as no ioproc patch\n", dev->dev_instance); -+ -+ default_features |= ELAN4_FEATURE_NO_IOPROC | ELAN4_FEATURE_PIN_DOWN; -+ -+ /* Also change this flag so as to make the /proc entry consistent */ -+ user_ioproc_enabled = 0; -+#endif -+ -+ /* initialise the devinfo */ -+ pci_read_config_word (dev->dev_osdep.pdev, PCI_VENDOR_ID, &dev->dev_devinfo.dev_vendor_id); -+ pci_read_config_word (dev->dev_osdep.pdev, PCI_DEVICE_ID, &dev->dev_devinfo.dev_device_id); -+ pci_read_config_byte (dev->dev_osdep.pdev, PCI_REVISION_ID, &dev->dev_devinfo.dev_revision_id); -+ -+ dev->dev_devinfo.dev_rail = instance; -+ dev->dev_devinfo.dev_driver_version = ELAN4_DRIVER_VERSION; -+ dev->dev_devinfo.dev_num_down_links_value = 0; -+ dev->dev_devinfo.dev_params_mask = (1 << ELAN4_PARAM_DRIVER_FEATURES); -+ dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] = default_features; -+ -+ dev->dev_position.pos_mode = ELAN_POS_UNKNOWN; -+ -+ dev->dev_regs_phys = bar2phys (pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_REGISTERS)); -+ dev->dev_sdram_phys = bar2phys (pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM)); -+ -+ /* initialise the data structures and map the device */ -+ if ((res = elan4_initialise_device (dev)) != 0) -+ { -+ kfree (dev); -+ return res; -+ } -+ -+#if defined(CONFIG_DEVFS_FS) -+ { -+ char name[16]; -+ -+ sprintf (name, "control%d", dev->dev_instance); -+ dev->dev_osdep.devfs_control = devfs_register(devfs_handle, name, DEVFS_FL_NONE, ELAN4_MAJOR, -+ dev->dev_instance | (ELAN4_MINOR_CONTROL << ELAN4_MINOR_SHIFT), S_IFCHR | S_IRUSR | S_IWUSR, -+ &elan4_fops, NULL); -+ sprintf (name, "sdram%d", dev->dev_instance); -+ dev->dev_osdep.devfs_sdram = devfs_register(devfs_handle, name, DEVFS_FL_NONE, ELAN4_MAJOR, -+ dev->dev_instance | (ELAN4_MINOR_MEM << ELAN4_MINOR_SHIFT), S_IFCHR | S_IRUSR|S_IWUSR | S_IRGRP|S_IWGRP | S_IROTH|S_IWOTH, -+ &elan4_fops, NULL); -+ sprintf (name, "user%d", dev->dev_instance); -+ dev->dev_osdep.devfs_user = devfs_register(devfs_handle, name, DEVFS_FL_NONE, ELAN4_MAJOR, -+ dev->dev_instance | (ELAN4_MINOR_USER << ELAN4_MINOR_SHIFT), S_IFCHR | S_IRUSR|S_IWUSR | S_IRGRP|S_IWGRP | S_IROTH|S_IWOTH, -+ &elan4_fops, NULL); -+ } -+#endif -+ -+ /* add the procfs entry */ -+ elan4_procfs_device_init (dev); -+ -+ /* allow the device to be referenced now */ -+ elan4_devices[instance] = dev; -+ -+ if ((disabled & (1 << instance)) == 0) -+ { -+ if (elan4_start_device (dev) != 0) -+ { -+ printk ("elan%d: auto-start of device failed\n", dev->dev_instance); -+ -+ elan4_detach_device (dev); -+ return (-ENXIO); -+ } -+ -+ dev->dev_state = ELAN4_STATE_STARTED; -+ } -+ -+#if defined (__sparc) -+ printk ("elan%d: at pci %s (irq = %s)\n", instance, pdev->slot_name, __irq_itoa(pdev->irq)); -+#else -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) -+ printk ("elan%d: at pci %s (irq = %d)\n", instance, pdev->slot_name, pdev->irq); -+#else -+ printk ("elan%d: at pci %s (irq = %d)\n", instance, pci_name (pdev), pdev->irq); -+#endif -+#endif -+ -+ return (0); -+} -+ -+/* -+ * Called by cleanup_module() for each board found on PCI. -+ */ -+static void -+elan4_detach_device (ELAN4_DEV *dev) -+{ -+ /* stop the chip and free of resources */ -+ if (dev->dev_state == ELAN4_STATE_STARTED) -+ elan4_stop_device (dev); -+ -+ elan4_devices[dev->dev_instance] = NULL; -+ -+#if defined(CONFIG_DEVFS_FS) -+ devfs_unregister (dev->dev_osdep.devfs_control); -+ devfs_unregister (dev->dev_osdep.devfs_sdram); -+ devfs_unregister (dev->dev_osdep.devfs_user); -+#endif -+ -+ /* remove the procfs entry */ -+ elan4_procfs_device_fini (dev); -+ -+ /* unmap the device and finalise the data structures */ -+ elan4_finalise_device (dev); -+ -+ KMEM_FREE (dev, sizeof(*dev)); -+} -+ -+/* -+ * Maintain reference counts on the device -+ */ -+ELAN4_DEV * -+elan4_reference_device (int instance, int state) -+{ -+ ELAN4_DEV *dev = elan4_devices[instance]; -+ -+ if (dev == NULL) -+ return (NULL); -+ -+ kmutex_lock (&dev->dev_lock); -+ -+ if ((dev->dev_state & state) == 0) -+ { -+ kmutex_unlock (&dev->dev_lock); -+ return (NULL); -+ } -+ -+ dev->dev_references++; -+ kmutex_unlock (&dev->dev_lock); -+ -+#ifdef MODULE -+ MOD_INC_USE_COUNT; -+#endif -+ -+#ifdef CONFIG_MPSAS -+ sas_set_position(dev); -+#endif -+ -+ return (dev); -+} -+ -+void -+elan4_dereference_device (ELAN4_DEV *dev) -+{ -+ kmutex_lock (&dev->dev_lock); -+ dev->dev_references--; -+ kmutex_unlock (&dev->dev_lock); -+ -+#ifdef MODULE -+ MOD_DEC_USE_COUNT; -+#endif -+} -+ -+static void -+elan4_shutdown_devices(int panicing) -+{ -+ ELAN4_DEV *dev; -+ unsigned long flags; -+ register int i; -+ -+ local_irq_save (flags); -+ for (i = 0; i < ELAN4_MAX_CONTROLLER; i++) -+ { -+ if ((dev = elan4_devices[i]) != NULL) -+ { -+ printk(KERN_INFO "elan%d: forcing link into reset\n", dev->dev_instance); -+ -+ /* set the inputters to discard everything */ -+ if (! panicing) spin_lock (&dev->dev_haltop_lock); -+ -+ if (dev->dev_discard_lowpri_count++ == 0) -+ elan4_set_schedstatus (dev, 0); -+ if (dev->dev_discard_highpri_count++ == 0) -+ elan4_set_schedstatus (dev, 0); -+ -+ if (! panicing) spin_unlock (&dev->dev_haltop_lock); -+ -+ /* ideally we'd like to halt all the outputters too, -+ * however this will prevent the kernel comms flushing -+ * to work correctly ..... -+ */ -+ } -+ } -+ local_irq_restore (flags); -+} -+ -+/* -+ * /dev/elan4/controlX - control device -+ * -+ */ -+static int -+control_open (struct inode *inode, struct file *file) -+{ -+ ELAN4_DEV *dev = elan4_reference_device (ELAN4_DEVICE(inode), ELAN4_STATE_STOPPED | ELAN4_STATE_STARTED); -+ CONTROL_PRIVATE *pr; -+ -+ if (dev == NULL) -+ return (-ENXIO); -+ -+ KMEM_ALLOC (pr, CONTROL_PRIVATE *, sizeof (CONTROL_PRIVATE), 1); -+ if ((pr == NULL)) -+ { -+ elan4_dereference_device (dev); -+ -+ return (-ENOMEM); -+ } -+ -+ PRINTF (DBG_USER, DBG_FILE, "control_open: dev=%p pr=%p\n", dev, pr); -+ -+ pr->pr_dev = dev; -+ pr->pr_boundary_scan = 0; -+ -+ file->private_data = (void *) pr; -+ -+ return (0); -+} -+ -+static int -+control_release (struct inode *inode, struct file *file) -+{ -+ CONTROL_PRIVATE *pr = (CONTROL_PRIVATE *) file->private_data; -+ ELAN4_DEV *dev = pr->pr_dev; -+ -+ PRINTF (DBG_DEVICE, DBG_FILE, "control_release: pr=%p\n", pr); -+ -+ //if (pr->pr_boundary_scan) -+ // elan4_clear_boundary_scan (dev, pr); -+ -+ elan4_dereference_device (dev); -+ -+ KMEM_FREE (pr, sizeof(*pr)); -+ -+ return (0); -+} -+ -+static int -+control_ioctl (struct inode *inode, struct file *file, -+ unsigned int cmd, unsigned long arg) -+{ -+ CONTROL_PRIVATE *pr = (CONTROL_PRIVATE *) file->private_data; -+ -+ PRINTF (DBG_DEVICE, DBG_FILE, "control_ioctl: cmd=%x arg=%lx\n", cmd, arg); -+ -+ switch (cmd) -+ { -+ case ELAN4IO_DEVINFO: -+ if (copy_to_user ((void *) arg, &pr->pr_dev->dev_devinfo, sizeof (ELAN_DEVINFO))) -+ return (-EFAULT); -+ return (0); -+ -+ case ELAN4IO_GET_POSITION: -+ { -+ ELAN_POSITION pos; -+ -+ elan4_get_position (pr->pr_dev, &pos); -+ -+ if (copy_to_user ((void *) arg, &pos, sizeof (ELAN_POSITION))) -+ return (-EFAULT); -+ -+ return (0); -+ } -+ -+ case ELAN4IO_SET_POSITION: -+ { -+ ELAN_POSITION pos; -+ -+ if (copy_from_user (&pos, (void *) arg, sizeof (ELAN_POSITION))) -+ return (-EFAULT); -+ -+ return (elan4_set_position (pr->pr_dev, &pos)); -+ } -+ -+ case ELAN4IO_OLD_GET_PARAMS: -+ { -+ ELAN_PARAMS params; -+ unsigned short mask; -+ -+ elan4_get_params (pr->pr_dev, ¶ms, &mask); -+ -+ if (copy_to_user ((void *) arg, ¶ms, sizeof (ELAN_PARAMS))) -+ return (-EFAULT); -+ -+ return (0); -+ } -+ -+ case ELAN4IO_OLD_SET_PARAMS: -+ { -+ ELAN_PARAMS params; -+ -+ if (copy_from_user (¶ms, (void *) arg, sizeof (ELAN_PARAMS))) -+ return (-EFAULT); -+ -+ elan4_set_params (pr->pr_dev, ¶ms, 3); -+ -+ return (0); -+ } -+ -+ case ELAN4IO_SET_PARAMS: -+ { -+ ELAN4IO_PARAMS_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_PARAMS_STRUCT))) -+ return (-EFAULT); -+ -+ elan4_set_params (pr->pr_dev, &args.p_params, args.p_mask); -+ -+ return (0); -+ } -+ -+ case ELAN4IO_GET_PARAMS: -+ { -+ ELAN4IO_PARAMS_STRUCT args; -+ -+ elan4_get_params (pr->pr_dev, &args.p_params, &args.p_mask); -+ -+ if (copy_to_user ((void *) arg, &args, sizeof (ELAN_PARAMS))) -+ return (-EFAULT); -+ -+ return (0); -+ } -+ } -+ -+ return (-EINVAL); -+} -+ -+static int -+control_mmap (struct file *file, struct vm_area_struct *vma) -+{ -+ CONTROL_PRIVATE *pr = (CONTROL_PRIVATE *) file->private_data; -+ unsigned bar = OFF_TO_BAR (vma->vm_pgoff << PAGE_SHIFT); -+ unsigned long off = OFF_TO_OFFSET (vma->vm_pgoff << PAGE_SHIFT); -+ long len = vma->vm_end - vma->vm_start; -+ -+ PRINTF (DBG_USER, DBG_FILE, "control_mmap: pr=%p bar=%x off=%x\n", pr, bar, off); -+ -+ /* check bar number and translate the standard psuedo bars */ -+ switch (bar) -+ { -+ case ELAN4_BAR_SDRAM: -+ case ELAN4_BAR_REGISTERS: -+ break; -+ -+ default: -+ return (-EINVAL); -+ } -+ -+ if (off < 0 || (off + len) > pci_resource_len (pr->pr_dev->dev_osdep.pdev, bar)) -+ return (-EINVAL); -+ -+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); -+ -+ if (__io_remap_page_range (vma->vm_start, pci_resource_start (pr->pr_dev->dev_osdep.pdev, bar) + off, len, vma->vm_page_prot)) -+ return (-EAGAIN); -+ -+ return (0); -+} -+ -+/* -+ * /dev/elan4/sdramX - sdram access device -+ */ -+static void -+mem_freepage (MEM_PRIVATE *pr, MEM_PAGE *pg) -+{ -+ PRINTF (DBG_USER, DBG_MEM, "mem_freepage: pr=%p pgoff=%lx pg=%p ref=%d\n", pr, pg->pg_pgoff, pg, pg->pg_ref); -+ -+ elan4_sdram_free (pr->pr_dev, pg->pg_addr, SDRAM_PAGE_SIZE); -+ -+ KMEM_FREE(pg, sizeof(*pg)); -+} -+ -+static MEM_PAGE * -+mem_getpage (MEM_PRIVATE *pr, unsigned long pgoff) -+{ -+ int hashval = MEM_HASH (pgoff); -+ MEM_PAGE *npg = NULL; -+ MEM_PAGE *pg; -+ -+ ASSERT ((pgoff & SDRAM_PGOFF_OFFSET) == 0); -+ -+ PRINTF (DBG_USER, DBG_MEM, "mem_getpage: pr=%p pgoff=%lx\n", pr, pgoff); -+ -+ again: -+ spin_lock (&pr->pr_lock); -+ for (pg = pr->pr_pages[hashval]; pg; pg = pg->pg_next) -+ if (pg->pg_pgoff == pgoff) -+ break; -+ -+ if (pg != NULL) -+ { -+ PRINTF (DBG_USER, DBG_MEM, "mem_getpage: pr=%p pgoff=%lx -> found %p addr=%x\n", pr, pgoff, pg, pg->pg_addr); -+ -+ pg->pg_ref++; -+ spin_unlock (&pr->pr_lock); -+ -+ if (npg != NULL) /* we'd raced and someone else had created */ -+ mem_freepage (pr, npg); /* this page - so free of our new one*/ -+ return (pg); -+ } -+ -+ if (npg != NULL) /* didn't find the page, so inset the */ -+ { /* new one we've just created */ -+ npg->pg_next = pr->pr_pages[hashval]; -+ pr->pr_pages[hashval] = npg; -+ -+ spin_unlock (&pr->pr_lock); -+ return (npg); -+ } -+ -+ spin_unlock (&pr->pr_lock); /* drop spinlock before creating a new page */ -+ -+ KMEM_ALLOC (npg, MEM_PAGE *, sizeof (MEM_PAGE), 1); -+ if ((npg == NULL)) -+ return (NULL); -+ -+ if ((npg->pg_addr = elan4_sdram_alloc (pr->pr_dev, SDRAM_PAGE_SIZE)) == 0) -+ { -+ KMEM_FREE(npg, sizeof(*npg)); -+ return (NULL); -+ } -+ -+#ifndef CONFIG_MPSAS -+ /* zero the page before returning it to the user */ -+ elan4_sdram_zeroq_sdram (pr->pr_dev, npg->pg_addr, SDRAM_PAGE_SIZE); -+#endif -+ -+ npg->pg_pgoff = pgoff; -+ npg->pg_ref = 1; -+ -+ /* created a new page - so have to rescan before inserting it */ -+ goto again; -+} -+ -+static void -+mem_droppage (MEM_PRIVATE *pr, unsigned long pgoff, int dontfree) -+{ -+ MEM_PAGE **ppg; -+ MEM_PAGE *pg; -+ -+ spin_lock (&pr->pr_lock); -+ for (ppg = &pr->pr_pages[MEM_HASH(pgoff)]; *ppg; ppg = &(*ppg)->pg_next) -+ if ((*ppg)->pg_pgoff == pgoff) -+ break; -+ -+ pg = *ppg; -+ -+ ASSERT (*ppg != NULL); -+ -+ PRINTF (DBG_USER, DBG_MEM, "mem_droppage: pr=%p pgoff=%lx pg=%p ref=%d dontfree=%d\n", pr, pgoff, (*ppg), (*ppg)->pg_ref, dontfree); -+ -+ if (--pg->pg_ref == 0 && !dontfree) -+ { -+ *ppg = pg->pg_next; -+ -+ mem_freepage (pr, pg); -+ } -+ -+ spin_unlock (&pr->pr_lock); -+} -+ -+static int -+mem_open (struct inode *inode, struct file *file) -+{ -+ ELAN4_DEV *dev = elan4_reference_device (ELAN4_DEVICE(inode), ELAN4_STATE_STARTED); -+ MEM_PRIVATE *pr; -+ register int i; -+ -+ if (dev == NULL) -+ return (-ENXIO); -+ -+ KMEM_ALLOC (pr, MEM_PRIVATE *, sizeof (MEM_PRIVATE), 1); -+ if ((pr == NULL)) -+ { -+ elan4_dereference_device (dev); -+ return (-ENOMEM); -+ } -+ -+ spin_lock_init (&pr->pr_lock); -+ pr->pr_dev = dev; -+ for (i = 0; i < MEM_HASH_SIZE; i++) -+ pr->pr_pages[i] = NULL; -+ -+ file->private_data = (void *) pr; -+ -+ return (0); -+} -+ -+static int -+mem_release (struct inode *node, struct file *file) -+{ -+ MEM_PRIVATE *pr = (MEM_PRIVATE *) file->private_data; -+ MEM_PAGE *pg, *next; -+ int i; -+ -+ /* free off any pages that we'd allocated */ -+ spin_lock (&pr->pr_lock); -+ for (i = 0; i < MEM_HASH_SIZE; i++) -+ { -+ for (pg = pr->pr_pages[i]; pg; pg = next) -+ { -+ next = pg->pg_next; -+ mem_freepage (pr, pg); -+ } -+ } -+ spin_unlock (&pr->pr_lock); -+ -+ elan4_dereference_device (pr->pr_dev); -+ KMEM_FREE(pr, sizeof(*pr)); -+ -+ return (0); -+} -+ -+static int -+mem_ioctl (struct inode *inode, struct file *file, -+ unsigned int cmd, unsigned long arg) -+{ -+ return (-EINVAL); -+} -+ -+static void -+mem_vma_open (struct vm_area_struct *vma) -+{ -+ MEM_PRIVATE *pr = (MEM_PRIVATE *) vma->vm_private_data; -+ unsigned long addr; -+ unsigned long pgoff; -+ -+ PRINTF (DBG_USER, DBG_MEM, "mem_vma_open: vm_mm=%p start=%lx end=%lx pgoff=%lx file=%p\n", -+ vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_pgoff, vma->vm_file); -+ -+ for (addr = vma->vm_start, pgoff = vma->vm_pgoff; addr < vma->vm_end; addr += PAGE_SIZE, pgoff++) -+ mem_getpage (pr, pgoff & SDRAM_PGOFF_MASK); -+} -+ -+static void -+mem_vma_close (struct vm_area_struct *vma) -+{ -+ MEM_PRIVATE *pr = (MEM_PRIVATE *) vma->vm_private_data; -+ unsigned long addr; -+ unsigned long pgoff; -+ -+ PRINTF (DBG_USER, DBG_MEM, "mem_vma_close: vm_mm=%p start=%lx end=%lx pgoff=%lx file=%p\n", -+ vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_pgoff, vma->vm_file); -+ -+ /* NOTE: the call to close may not have the same vm_start/vm_end values as -+ * were passed into mmap()/open() - since if an partial unmap had occurred -+ * then the vma could have been shrunk or even split. -+ * -+ * if a the vma is split then an vma_open() will be called for the top -+ * portion - thus causing the reference counts to become incorrect. -+ * -+ * We drop the reference to any pages we're notified about - so they get freed -+ * earlier than when the device is finally released. -+ */ -+ for (pgoff = vma->vm_pgoff, addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE, pgoff++) -+ mem_droppage (pr, pgoff & SDRAM_PGOFF_MASK, 0); -+} -+ -+struct vm_operations_struct mem_vm_ops = { -+ open: mem_vma_open, -+ close: mem_vma_close, -+}; -+ -+static int -+mem_mmap (struct file *file, struct vm_area_struct *vma) -+{ -+ MEM_PRIVATE *pr = (MEM_PRIVATE *) file->private_data; -+ MEM_PAGE *pg; -+ unsigned long addr; -+ unsigned long pgoff; -+ -+ PRINTF (DBG_USER, DBG_MEM, "mem_mmap: vma=%p start=%lx end=%lx pgoff=%lx file=%p\n", -+ vma, vma->vm_start, vma->vm_end, vma->vm_pgoff, file); -+ -+ /* Don't allow these pages to be swapped out of dumped */ -+ vma->vm_flags |= (VM_RESERVED | VM_IO); -+ -+ vma->vm_ops = &mem_vm_ops; -+ vma->vm_file = file; -+ vma->vm_private_data = (void *) pr; -+ -+ for (addr = vma->vm_start, pgoff = vma->vm_pgoff; addr < vma->vm_end; addr += PAGE_SIZE, pgoff++) -+ { -+ if ((pg = mem_getpage (pr, pgoff & SDRAM_PGOFF_MASK)) == NULL) -+ goto failed; -+ -+ PRINTF (DBG_USER, DBG_MEM, "mem_mmap: addr %lx -> pg=%p sdram=%x+%x bar=%lx\n", -+ addr, pg, pg->pg_addr, (pgoff & SDRAM_PGOFF_OFFSET) * PAGE_SIZE, -+ pci_resource_start (pr->pr_dev->dev_osdep.pdev, ELAN4_BAR_SDRAM)); -+ -+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); -+ -+ if (! (pr->pr_dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] & ELAN4_FEATURE_NO_WRITE_COMBINE)) -+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); -+ -+ if (__io_remap_page_range (addr, pci_resource_start (pr->pr_dev->dev_osdep.pdev, ELAN4_BAR_SDRAM) + -+ pg->pg_addr + (pgoff & SDRAM_PGOFF_OFFSET) * PAGE_SIZE, PAGE_SIZE, vma->vm_page_prot)) -+ { -+ mem_droppage (pr, pgoff & SDRAM_PGOFF_MASK, 0); /* drop our reference to this page */ -+ goto failed; -+ } -+ -+#if defined(conditional_schedule) -+ conditional_schedule(); -+#endif -+ } -+ -+ return (0); -+ -+ failed: -+ /* free of any pages we've already allocated/referenced */ -+ while (pgoff-- > vma->vm_pgoff) -+ mem_droppage (pr, pgoff & SDRAM_PGOFF_MASK, 0); -+ -+ return (-ENOMEM); -+} -+ -+int -+mem_pteload (struct vm_area_struct *vma, unsigned long maddr, USER_CTXT *uctx, E4_Addr eaddr, int perm) -+{ -+ MEM_PRIVATE *pr = (MEM_PRIVATE *) vma->vm_private_data; -+ ELAN4_DEV *dev = uctx->uctx_ctxt.ctxt_dev; -+ unsigned long pgoff = vma->vm_pgoff + ((maddr - vma->vm_start) >> PAGE_SHIFT); -+ sdramaddr_t pgaddr; -+ MEM_PAGE *pg; -+ register int i, res; -+ -+ if (pr->pr_dev != dev) -+ return -EINVAL; -+ -+ if ((pg = mem_getpage (pr, pgoff & SDRAM_PGOFF_MASK)) == NULL) -+ return -ENOMEM; -+ -+ pgaddr = pg->pg_addr + ((pgoff & SDRAM_PGOFF_OFFSET) << PAGE_SHIFT); -+ -+ if (! elan4mmu_sdram_aliascheck (&uctx->uctx_ctxt, eaddr, pgaddr)) -+ return -EINVAL; -+ -+ for (i = 0; i < PAGE_SIZE; i += (1 << dev->dev_pageshift[0])) -+ { -+ E4_uint64 newpte = ((pgaddr + i) >> PTE_PADDR_SHIFT) | PTE_SetPerm (perm); -+ -+ if ((res = elan4mmu_pteload (&uctx->uctx_ctxt, 0, eaddr + i, HE_TYPE_SDRAM, newpte)) < 0) -+ return res; -+ } -+ -+ mem_droppage (pr, pgoff & SDRAM_PGOFF_MASK, 0); -+ -+ return 0; -+} -+ -+ -+/* -+ * /dev/elan4/userX - control device -+ * -+ */ -+static inline void -+user_private_free (USER_PRIVATE *pr) -+{ -+ ELAN4_DEV *dev = pr->pr_uctx->uctx_ctxt.ctxt_dev; -+ -+ ASSERT (atomic_read (&pr->pr_ref) == 0); -+ -+ user_free (pr->pr_uctx); -+ KMEM_FREE(pr, sizeof(*pr)); -+ -+ elan4_dereference_device (dev); -+} -+ -+#if defined(IOPROC_PATCH_APPLIED) -+static void -+user_ioproc_release (void *arg, struct mm_struct *mm) -+{ -+ USER_PRIVATE *pr = (USER_PRIVATE *) arg; -+ -+ PRINTF (pr->pr_uctx, DBG_IOPROC, "user_ioproc_release: ref=%d\n", atomic_read (&pr->pr_ref)); -+ -+ elan4mmu_invalidate_ctxt (&pr->pr_uctx->uctx_ctxt); -+ -+ pr->pr_mm = NULL; -+ -+ if (atomic_dec_and_test (&pr->pr_ref)) -+ user_private_free (pr); -+} -+ -+/* -+ * On 2.4 kernels we get passed a mm_struct, whereas on 2.6 kernels -+ * we get the vma which is more usefull -+ */ -+#if defined(IOPROC_MM_STRUCT_ARG) -+static void -+user_ioproc_sync_range (void *arg, struct mm_struct *mm, unsigned long start, unsigned long end) -+{ -+ USER_PRIVATE *pr = (USER_PRIVATE *) arg; -+ -+ PRINTF (pr->pr_uctx, DBG_IOPROC, "user_ioproc_sync_range: start=%lx end=%lx\n", start, end); -+ -+ /* XXXX: this is intended to sync the modified bit from our page tables, -+ * into the main cpu's modified bits - however since we do not -+ * syncronize our modified bit on a ioproc_invalidate_page() call, -+ * then it could get lost if we modify the page after the last -+ * modification and writepage has occurred. Hence we invalidate -+ * all translations and allow it to refault. -+ */ -+ -+ user_unload_main (pr->pr_uctx, start, end - start); -+} -+ -+static void -+user_ioproc_invalidate_range (void *arg, struct mm_struct *mm, unsigned long start, unsigned long end) -+{ -+ USER_PRIVATE *pr = (USER_PRIVATE *) arg; -+ -+ PRINTF (pr->pr_uctx, DBG_IOPROC, "user_ioproc_invalidate_range: start=%lx end=%lx\n", start, end); -+ -+ user_unload_main (pr->pr_uctx, start, end - start); -+} -+ -+static void -+user_ioproc_update_range (void *arg, struct mm_struct *mm, unsigned long start, unsigned long end) -+{ -+ USER_PRIVATE *pr = (USER_PRIVATE *) arg; -+ struct vm_area_struct *vma; -+ -+ if (pr->pr_uctx->uctx_ctxt.ctxt_dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] & ELAN4_FEATURE_NO_IOPROC_UPDATE) -+ return; -+ -+ PRINTF (pr->pr_uctx, DBG_IOPROC, "user_ioproc_update_range: start=%lx end=%lx\n", start, end); -+ -+ vma = find_vma_intersection (mm, start, end); -+ -+ user_update_main (pr->pr_uctx, mm, vma, start, end - start); -+} -+ -+static void -+user_ioproc_change_protection (void *arg, struct mm_struct *mm, unsigned long start, unsigned long end, pgprot_t newprot) -+{ -+ USER_PRIVATE *pr = (USER_PRIVATE *) arg; -+ -+ PRINTF (pr->pr_uctx, DBG_IOPROC, "user_ioproc_change_protection: start=%lx end=%lx\n", start, end); -+ -+ user_unload_main (pr->pr_uctx, start, end - start); -+} -+ -+ -+#else -+ -+static void -+user_ioproc_sync_range (void *arg, struct vm_area_struct *vma, unsigned long start, unsigned long end) -+{ -+ USER_PRIVATE *pr = (USER_PRIVATE *) arg; -+ -+ PRINTF (pr->pr_uctx, DBG_IOPROC, "user_ioproc_sync_range: start=%lx end=%lx\n", start, end); -+ -+ /* XXXX: this is intended to sync the modified bit from our page tables, -+ * into the main cpu's modified bits - however since we do not -+ * syncronize our modified bit on a ioproc_invalidate_page() call, -+ * then it could get lost if we modify the page after the last -+ * modification and writepage has occurred. Hence we invalidate -+ * all translations and allow it to refault. -+ */ -+ -+ user_unload_main (pr->pr_uctx, start, end - start); -+} -+ -+static void -+user_ioproc_invalidate_range (void *arg, struct vm_area_struct *vma, unsigned long start, unsigned long end) -+{ -+ USER_PRIVATE *pr = (USER_PRIVATE *) arg; -+ -+ PRINTF (pr->pr_uctx, DBG_IOPROC, "user_ioproc_invalidate_range: start=%lx end=%lx\n", start, end); -+ -+ user_unload_main (pr->pr_uctx, start, end - start); -+} -+ -+static void -+user_ioproc_update_range (void *arg, struct vm_area_struct *vma, unsigned long start, unsigned long end) -+{ -+ USER_PRIVATE *pr = (USER_PRIVATE *) arg; -+ -+ if (pr->pr_uctx->uctx_ctxt.ctxt_dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] & ELAN4_FEATURE_NO_IOPROC_UPDATE) -+ return; -+ -+ PRINTF (pr->pr_uctx, DBG_IOPROC, "user_ioproc_update_range: start=%lx end=%lx\n", start, end); -+ -+ user_update_main (pr->pr_uctx, vma->vm_mm, vma, start, end - start); -+} -+ -+static void -+user_ioproc_change_protection (void *arg, struct vm_area_struct *vma, unsigned long start, unsigned long end, pgprot_t newprot) -+{ -+ USER_PRIVATE *pr = (USER_PRIVATE *) arg; -+ -+ PRINTF (pr->pr_uctx, DBG_IOPROC, "user_ioproc_change_protection: start=%lx end=%lx\n", start, end); -+ -+ user_unload_main (pr->pr_uctx, start, end - start); -+} -+#endif /* defined(IOPROC_NO_VMA_RANGE) */ -+ -+static void -+user_ioproc_sync_page (void *arg, struct vm_area_struct *vma, unsigned long addr) -+{ -+ USER_PRIVATE *pr = (USER_PRIVATE *) arg; -+ -+ PRINTF (pr->pr_uctx, DBG_IOPROC, "user_ioproc_sync_page: addr=%lx\n", addr); -+ -+ user_unload_main (pr->pr_uctx, addr & PAGE_MASK, PAGE_SIZE); -+} -+ -+static void -+user_ioproc_invalidate_page (void *arg, struct vm_area_struct *vma, unsigned long addr) -+{ -+ USER_PRIVATE *pr = (USER_PRIVATE *) arg; -+ -+ PRINTF (pr->pr_uctx, DBG_IOPROC, "user_ioproc_invalidate_page: addr=%lx\n", addr); -+ -+ user_unload_main (pr->pr_uctx, addr & PAGE_MASK, PAGE_SIZE); -+} -+ -+static void -+user_ioproc_update_page (void *arg, struct vm_area_struct *vma, unsigned long addr) -+{ -+ USER_PRIVATE *pr = (USER_PRIVATE *) arg; -+ -+ if (pr->pr_uctx->uctx_ctxt.ctxt_dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] & ELAN4_FEATURE_NO_IOPROC_UPDATE) -+ return; -+ -+ PRINTF (pr->pr_uctx, DBG_IOPROC, "user_ioproc_update_page: addr=%lx\n", addr); -+ -+ user_update_main (pr->pr_uctx, vma->vm_mm, vma, addr & PAGE_MASK, PAGE_SIZE); -+} -+#endif /* defined(IOPROC_PATCH_APPLIED) */ -+ -+static int -+user_open (struct inode *inode, struct file *file) -+{ -+ ELAN4_DEV *dev; -+ USER_PRIVATE *pr; -+ USER_CTXT *uctx; -+ -+ PRINTF (DBG_USER, DBG_FILE, "user_open: mm=%p users=%d count=%d\n", current->mm, -+ atomic_read (¤t->mm->mm_users), atomic_read (¤t->mm->mm_count)); -+ -+ if ((dev = elan4_reference_device (ELAN4_DEVICE(inode), ELAN4_STATE_STARTED)) == NULL) -+ return (-ENXIO); -+ -+ KMEM_ALLOC (pr, USER_PRIVATE *, sizeof (USER_PRIVATE), 1); -+ if ((pr == NULL)) -+ { -+ elan4_dereference_device (dev); -+ return (-ENOMEM); -+ } -+ -+ uctx = user_alloc (dev); -+ -+ if (IS_ERR(uctx)) -+ { -+ elan4_dereference_device (dev); -+ KMEM_FREE(pr, sizeof(*pr)); -+ -+ return PTR_ERR(uctx); -+ } -+ -+ /* initialise refcnt to 1 - one for "file" */ -+ atomic_set (&pr->pr_ref, 1); -+ -+ pr->pr_uctx = uctx; -+ pr->pr_mm = current->mm; -+ -+#if defined(IOPROC_PATCH_APPLIED) -+ if (! (uctx->uctx_ctxt.ctxt_features & ELAN4_FEATURE_NO_IOPROC)) -+ { -+ /* register a ioproc callback to notify us of translation changes */ -+ pr->pr_ioproc.arg = (void *) pr; -+ pr->pr_ioproc.release = user_ioproc_release; -+ pr->pr_ioproc.sync_range = user_ioproc_sync_range; -+ pr->pr_ioproc.invalidate_range = user_ioproc_invalidate_range; -+ pr->pr_ioproc.update_range = user_ioproc_update_range; -+ pr->pr_ioproc.change_protection = user_ioproc_change_protection; -+ pr->pr_ioproc.sync_page = user_ioproc_sync_page; -+ pr->pr_ioproc.invalidate_page = user_ioproc_invalidate_page; -+ pr->pr_ioproc.update_page = user_ioproc_update_page; -+ -+ /* add an extra reference for the ioproc ops */ -+ atomic_inc (&pr->pr_ref); -+ -+ spin_lock (¤t->mm->page_table_lock); -+ ioproc_register_ops (current->mm, &pr->pr_ioproc); -+ spin_unlock (¤t->mm->page_table_lock); -+ } -+#endif -+ -+ file->private_data = (void *) pr; -+ -+ return (0); -+} -+ -+static int -+user_release (struct inode *inode, struct file *file) -+{ -+ USER_PRIVATE *pr = (USER_PRIVATE *) file->private_data; -+ -+ PRINTF (pr->pr_uctx, DBG_FILE, "user_release: ref=%d\n", atomic_read (&pr->pr_ref)); -+ -+ if (atomic_dec_and_test (&pr->pr_ref)) -+ user_private_free (pr); -+ -+ return (0); -+} -+ -+static int -+user_ioctl (struct inode *inode, struct file *file, -+ unsigned int cmd, unsigned long arg) -+{ -+ USER_PRIVATE *pr = (USER_PRIVATE *) file->private_data; -+ USER_CTXT *uctx = pr->pr_uctx; -+ int res = 0; -+ -+ PRINTF (uctx, DBG_FILE, "user_ioctl: cmd=%x arg=%lx\n", cmd, arg); -+ -+ if (current->mm != pr->pr_mm) -+ return (-EINVAL); -+ -+ switch (cmd) -+ { -+ case ELAN4IO_DEVINFO: -+ { -+ ELAN_DEVINFO devinfo = uctx->uctx_ctxt.ctxt_dev->dev_devinfo; -+ -+ if ((devinfo.dev_params_mask & (1 << ELAN4_PARAM_DRIVER_FEATURES)) != 0) -+ devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] = uctx->uctx_ctxt.ctxt_features; -+ -+ if (copy_to_user ((void *) arg, &devinfo, sizeof (ELAN_DEVINFO))) -+ return (-EFAULT); -+ return (0); -+ } -+ -+ case ELAN4IO_POSITION: -+ { -+ ELAN4_DEV *dev = uctx->uctx_ctxt.ctxt_dev; -+ -+ if (copy_to_user ((void *) arg, &dev->dev_position, sizeof (ELAN_POSITION))) -+ return (-EFAULT); -+ return (0); -+ } -+ -+ case ELAN4IO_FREE: -+#if defined(IOPROC_PATCH_APPLIED) -+ if (! (uctx->uctx_ctxt.ctxt_features & ELAN4_FEATURE_NO_IOPROC)) -+ { -+ spin_lock (¤t->mm->page_table_lock); -+ if (pr->pr_mm != current->mm) -+ spin_unlock (¤t->mm->page_table_lock); -+ else -+ { -+ ioproc_unregister_ops (current->mm, &pr->pr_ioproc); -+ spin_unlock (¤t->mm->page_table_lock); -+ -+ user_ioproc_release (pr, current->mm); -+ } -+ } -+#endif -+ return (0); -+ -+ case ELAN4IO_ATTACH: -+ { -+ ELAN_CAPABILITY *cap; -+ -+ KMEM_ALLOC (cap, ELAN_CAPABILITY *, sizeof (ELAN_CAPABILITY), 1); -+ if ((cap == NULL)) -+ return (-ENOMEM); -+ -+ if (copy_from_user (cap, (void *) arg, sizeof (ELAN_CAPABILITY))) -+ res = -EFAULT; -+ else if ((res = user_attach (uctx, cap)) == 0 && -+ copy_to_user ((void *) arg, cap, sizeof (ELAN_CAPABILITY))) -+ { -+ user_detach (uctx, cap); -+ res = -EFAULT; -+ } -+ -+ KMEM_FREE(cap, sizeof(*cap)); -+ return (res); -+ } -+ -+ case ELAN4IO_DETACH: -+ { -+ ELAN_CAPABILITY *cap; -+ -+ KMEM_ALLOC (cap, ELAN_CAPABILITY *, sizeof (ELAN_CAPABILITY), 1); -+ if ((cap == NULL)) -+ return (-ENOMEM); -+ -+ if (copy_from_user (cap, (void *) arg, sizeof (ELAN_CAPABILITY))) -+ res = -EFAULT; -+ else -+ user_detach (uctx, cap); -+ -+ KMEM_FREE(cap, sizeof(*cap)); -+ return (res); -+ } -+ -+ case ELAN4IO_BLOCK_INPUTTER: -+ user_block_inputter (uctx, arg); -+ return (0); -+ -+ case ELAN4IO_ADD_P2PVP: -+ { -+ ELAN4IO_ADD_P2PVP_STRUCT *args; -+ -+ KMEM_ALLOC (args, ELAN4IO_ADD_P2PVP_STRUCT *, sizeof (ELAN4IO_ADD_P2PVP_STRUCT), 1); -+ if ((args == NULL)) -+ return (-ENOMEM); -+ -+ if (copy_from_user (args, (void *) arg, sizeof (ELAN4IO_ADD_P2PVP_STRUCT))) -+ res = -EFAULT; -+ else -+ res = user_add_p2pvp (uctx, args->vp_process, &args->vp_capability); -+ -+ KMEM_FREE(args, sizeof(*args)); -+ return (res); -+ } -+ -+ case ELAN4IO_ADD_BCASTVP: -+ { -+ ELAN4IO_ADD_BCASTVP_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_ADD_BCASTVP_STRUCT))) -+ return (-EFAULT); -+ -+ return (user_add_bcastvp (uctx, args.vp_process, args.vp_lowvp, args.vp_highvp)); -+ } -+ -+ case ELAN4IO_REMOVEVP: -+ return (user_removevp (uctx, arg)); -+ -+ case ELAN4IO_SET_ROUTE: -+ { -+ ELAN4IO_ROUTE_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_ROUTE_STRUCT))) -+ return (-EFAULT); -+ -+ return (user_set_route (uctx, args.rt_process, &args.rt_route)); -+ } -+ -+ case ELAN4IO_RESET_ROUTE: -+ { -+ ELAN4IO_ROUTE_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_ROUTE_STRUCT))) -+ return (-EFAULT); -+ -+ return (user_reset_route (uctx, args.rt_process)); -+ } -+ -+ case ELAN4IO_GET_ROUTE: -+ { -+ ELAN4IO_ROUTE_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_ROUTE_STRUCT))) -+ return (-EFAULT); -+ -+ if ((res = user_get_route (uctx, args.rt_process, &args.rt_route)) == 0 && -+ copy_to_user ((void *) arg, &args, sizeof (ELAN4IO_ROUTE_STRUCT))) -+ res = -EFAULT; -+ -+ return (res); -+ } -+ -+ case ELAN4IO_CHECK_ROUTE: -+ { -+ ELAN4IO_ROUTE_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_ROUTE_STRUCT))) -+ return (-EFAULT); -+ -+ if ((res = user_check_route (uctx, args.rt_process, &args.rt_route, &args.rt_error)) == 0 && -+ copy_to_user ((void *) arg, &args, sizeof (ELAN4IO_ROUTE_STRUCT))) -+ res = -EFAULT; -+ -+ return (res); -+ } -+ -+ case ELAN4IO_ALLOCCQ: -+ { -+ ELAN4IO_ALLOCCQ_STRUCT args; -+ USER_CQ *ucq; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_ALLOCCQ_STRUCT))) -+ return (-EFAULT); -+ -+ ucq = user_alloccq (uctx, args.cq_size & CQ_SizeMask, args.cq_perm & CQ_PermissionMask, -+ (args.cq_type & ELAN4IO_CQ_TYPE_REORDER) ? UCQ_REORDER : 0); -+ if (IS_ERR (ucq)) -+ return PTR_ERR (ucq); -+ -+ args.cq_indx = elan4_cq2idx (ucq->ucq_cq); -+ -+ if (copy_to_user ((void *) arg, &args, sizeof (ELAN4IO_ALLOCCQ_STRUCT))) -+ { -+ user_dropcq (uctx, ucq); -+ return (-EFAULT); -+ } -+ -+ /* don't drop the reference on the cq until the context is freed, -+ * or the caller explicitly frees the cq */ -+ return (0); -+ } -+ -+ case ELAN4IO_FREECQ: -+ { -+ USER_CQ *ucq; -+ unsigned indx; -+ -+ if (copy_from_user (&indx, (void *) arg, sizeof (unsigned))) -+ return (-EFAULT); -+ -+ if ((ucq = user_findcq (uctx, indx)) == NULL) /* can't free unallocated cq */ -+ return (-EINVAL); -+ -+ user_dropcq (uctx, ucq); /* drop the reference we've just taken */ -+ -+ if ((ucq->ucq_flags & UCQ_SYSTEM)) /* can't free device driver cq */ -+ return (-EINVAL); -+ -+ user_dropcq (uctx, ucq); /* and the one held from the alloccq call */ -+ -+ return (0); -+ } -+ -+ case ELAN4IO_DUMPCQ: -+ { -+ ELAN4IO_DUMPCQ_STRUCT args; -+ ELAN4_DEV *dev = uctx->uctx_ctxt.ctxt_dev; -+ USER_CQ *ucq; -+ void *buf; -+ int i; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof(ELAN4IO_DUMPCQ_STRUCT))) -+ return (-EFAULT); -+ -+ if ((ucq = user_findcq (uctx, args.cq_indx)) == NULL) -+ return (-EINVAL); -+ -+ if (args.bufsize) -+ { -+ E4_uint32 usedBufSize = min(args.cq_size, args.bufsize); -+ -+ KMEM_ALLOC (buf, void *, usedBufSize, 1); -+ -+ if (buf == NULL) -+ return (-ENOMEM); -+ -+ for (i=0; iucq_cq->cq_space + i); -+ -+ if (copy_to_user((void *)args.buffer, buf, usedBufSize)) -+ { -+ KMEM_FREE(buf, args.bufsize); -+ return (-EFAULT); -+ } -+ KMEM_FREE(buf, usedBufSize); -+ args.bufsize = usedBufSize; -+ } -+ -+ args.cq_size = CQ_Size(ucq->ucq_cq->cq_size); -+ args.cq_space = ucq->ucq_cq->cq_space; -+ -+ -+ if (copy_to_user((void *)arg, &args, sizeof(ELAN4IO_DUMPCQ_STRUCT))) -+ { -+ return (-EFAULT); -+ } -+ -+ user_dropcq (uctx, ucq); /* drop the reference we've just taken */ -+ -+ return (0); -+ } -+ -+ case ELAN4IO_SETPERM: -+ { -+ ELAN4IO_PERM_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_PERM_STRUCT))) -+ return (-EFAULT); -+ -+ return (user_setperm (uctx, args.ps_maddr, args.ps_eaddr, args.ps_len, args.ps_perm)); -+ } -+ -+ case ELAN4IO_CLRPERM: -+ { -+ ELAN4IO_PERM_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_PERM_STRUCT))) -+ return (-EFAULT); -+ -+ user_clrperm (uctx, args.ps_eaddr, args.ps_len); -+ return (0); -+ } -+ -+ case ELAN4IO_TRAPSIG: -+ { -+ ELAN4IO_TRAPSIG_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_TRAPSIG_STRUCT))) -+ return (-EFAULT); -+ -+ pr->pr_uctx->uctx_trap_pid = current->pid; -+ pr->pr_uctx->uctx_trap_signo = args.ts_signo; -+ -+ return (0); -+ } -+ -+ case ELAN4IO_TRAPHANDLER: -+ { -+ ELAN4IO_TRAPHANDLER_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_TRAPHANDLER_STRUCT))) -+ return (-EFAULT); -+ -+ return (user_trap_handler (pr->pr_uctx, (ELAN4_USER_TRAP *)args.th_trapp, args.th_nticks)); -+ } -+ -+ case ELAN4IO_REQUIRED_MAPPINGS: -+ { -+ ELAN4IO_REQUIRED_MAPPINGS_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_REQUIRED_MAPPINGS_STRUCT))) -+ return (-EFAULT); -+ -+ pr->pr_uctx->uctx_upage_addr = args.rm_upage_addr; -+ pr->pr_uctx->uctx_trestart_addr = args.rm_trestart_addr; -+ -+ return (0); -+ } -+ -+ case ELAN4IO_ALLOC_TRAP_QUEUES: -+ { -+ ELAN4IO_ALLOC_TRAP_QUEUES_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_ALLOC_TRAP_QUEUES_STRUCT))) -+ return (-EFAULT); -+ -+ return (user_alloc_trap_queues (uctx, args.tq_ndproc_traps, args.tq_neproc_traps, -+ args.tq_ntproc_traps, args.tq_nthreads, args.tq_ndmas)); -+ } -+ -+ case ELAN4IO_RESUME_EPROC_TRAP: -+ { -+ ELAN4IO_RESUME_EPROC_TRAP_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_RESUME_EPROC_TRAP_STRUCT))) -+ return (-EFAULT); -+ -+ return (user_resume_eproc_trap (pr->pr_uctx, args.rs_addr)); -+ } -+ -+ case ELAN4IO_RESUME_CPROC_TRAP: -+ { -+ ELAN4IO_RESUME_CPROC_TRAP_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_RESUME_CPROC_TRAP_STRUCT))) -+ return (-EFAULT); -+ -+ return (user_resume_cproc_trap (pr->pr_uctx, args.rs_indx)); -+ } -+ -+ case ELAN4IO_RESUME_DPROC_TRAP: -+ { -+ ELAN4IO_RESUME_DPROC_TRAP_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_RESUME_DPROC_TRAP_STRUCT))) -+ return (-EFAULT); -+ -+ return (user_resume_dproc_trap (pr->pr_uctx, &args.rs_desc)); -+ } -+ -+ case ELAN4IO_RESUME_TPROC_TRAP: -+ { -+ ELAN4IO_RESUME_TPROC_TRAP_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_RESUME_TPROC_TRAP_STRUCT))) -+ return (-EFAULT); -+ -+ return (user_resume_tproc_trap (pr->pr_uctx, &args.rs_regs)); -+ } -+ -+ case ELAN4IO_RESUME_IPROC_TRAP: -+ { -+ ELAN4IO_RESUME_IPROC_TRAP_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_RESUME_IPROC_TRAP_STRUCT))) -+ return (-EFAULT); -+ -+ return (user_resume_iproc_trap (pr->pr_uctx, args.rs_channel, args.rs_trans, -+ &args.rs_header, &args.rs_data)); -+ } -+ -+ case ELAN4IO_FLUSH_ICACHE: -+ elan4_flush_icache (&uctx->uctx_ctxt); -+ return (0); -+ -+ case ELAN4IO_STOP_CTXT: -+ if (arg) -+ user_swapout (uctx, UCTX_USER_STOPPED); -+ else -+ user_swapin (uctx, UCTX_USER_STOPPED); -+ return (0); -+ -+ case ELAN4IO_ALLOC_INTCOOKIE_TABLE: -+ { -+ ELAN_CAPABILITY *cap; -+ INTCOOKIE_TABLE *tbl; -+ -+ KMEM_ALLOC (cap, ELAN_CAPABILITY *, sizeof (ELAN_CAPABILITY), 1); -+ if ((cap == NULL)) -+ return (-ENOMEM); -+ -+ if (copy_from_user (cap, (void *) arg, sizeof (ELAN_CAPABILITY))) -+ res = -EFAULT; -+ else -+ { -+ tbl = intcookie_alloc_table(cap); -+ -+ if (tbl == NULL) -+ res = -ENOMEM; -+ else -+ { -+ /* Install the intcookie table we've just created */ -+ spin_lock (&uctx->uctx_spinlock); -+ if (uctx->uctx_intcookie_table != NULL) -+ res = -EBUSY; -+ else -+ uctx->uctx_intcookie_table = tbl; -+ spin_unlock (&uctx->uctx_spinlock); -+ -+ /* drop the table we created if there already was one */ -+ if (res != 0) -+ intcookie_free_table (tbl); -+ } -+ } -+ -+ KMEM_FREE(cap, sizeof(*cap)); -+ -+ return (res); -+ } -+ -+ case ELAN4IO_FREE_INTCOOKIE_TABLE: -+ { -+ INTCOOKIE_TABLE *tbl; -+ -+ spin_lock (&uctx->uctx_spinlock); -+ tbl = uctx->uctx_intcookie_table; -+ uctx->uctx_intcookie_table = NULL; -+ spin_unlock (&uctx->uctx_spinlock); -+ -+ if (tbl != NULL) -+ intcookie_free_table (tbl); -+ -+ return (tbl == NULL ? -EINVAL : 0); -+ } -+ -+ case ELAN4IO_ALLOC_INTCOOKIE: -+ { -+ /* For backwards compatibility with the old libs (pre 1.8.0) -+ * we allocate an intcookie table on the first cookie -+ * alloc if one hasn't be created already -+ */ -+ if (uctx->uctx_intcookie_table == NULL) -+ { -+ ELAN_CAPABILITY *cap; -+ INTCOOKIE_TABLE *tbl; -+ -+ KMEM_ALLOC (cap, ELAN_CAPABILITY *, sizeof (ELAN_CAPABILITY), 1); -+ if ((cap == NULL)) -+ return (-ENOMEM); -+ -+ /* Create a dummy capability */ -+ elan_nullcap(cap); -+ -+ /* Must be unique for each process on a node */ -+ cap->cap_mycontext = (int) ELAN4_TASK_HANDLE(); -+ -+ /* Create a new intcookie table */ -+ tbl = intcookie_alloc_table(cap); -+ -+ /* Hang intcookie table off uctx */ -+ spin_lock (&uctx->uctx_spinlock); -+ if (uctx->uctx_intcookie_table == NULL) -+ { -+ uctx->uctx_intcookie_table = tbl; -+ spin_unlock (&uctx->uctx_spinlock); -+ } -+ else -+ { -+ spin_unlock (&uctx->uctx_spinlock); -+ intcookie_free_table(tbl); -+ } -+ -+ KMEM_FREE(cap, sizeof(*cap)); -+ } -+ -+ return (intcookie_alloc (uctx->uctx_intcookie_table, arg)); -+ } -+ -+ case ELAN4IO_FREE_INTCOOKIE: -+ if (uctx->uctx_intcookie_table == NULL) -+ return -EINVAL; -+ else -+ return (intcookie_free (uctx->uctx_intcookie_table, arg)); -+ -+ case ELAN4IO_ARM_INTCOOKIE: -+ if (uctx->uctx_intcookie_table == NULL) -+ return -EINVAL; -+ else -+ return (intcookie_arm (uctx->uctx_intcookie_table, arg)); -+ -+ case ELAN4IO_WAIT_INTCOOKIE: -+ if (uctx->uctx_intcookie_table == NULL) -+ return -EINVAL; -+ else -+ return (intcookie_wait (uctx->uctx_intcookie_table, arg)); -+ -+ case ELAN4IO_FIRE_INTCOOKIE: -+ { -+ ELAN4IO_FIRECAP_STRUCT *args; -+ -+ KMEM_ALLOC (args, ELAN4IO_FIRECAP_STRUCT *, sizeof (ELAN4IO_FIRECAP_STRUCT), 1); -+ if ((args == NULL)) -+ return (-ENOMEM); -+ -+ if (copy_from_user (args, (void *) arg, sizeof (ELAN4IO_FIRECAP_STRUCT))) -+ res = -EFAULT; -+ else -+ res = intcookie_fire_cap (&args->fc_capability, args->fc_cookie); -+ -+ KMEM_FREE(args, sizeof(*args)); -+ -+ return (res); -+ } -+ -+ case ELAN4IO_NETERR_MSG: -+ { -+ ELAN4IO_NETERR_MSG_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_NETERR_MSG_STRUCT))) -+ return (-EFAULT); -+ -+ return (user_send_neterr_msg (uctx, args.nm_vp, args.nm_nctx, args.nm_retries, &args.nm_msg)); -+ } -+ -+ case ELAN4IO_NETERR_TIMER: -+ { -+ unsigned long ticks = ((unsigned long) arg * HZ) / 1000; -+ -+ PRINTF (uctx, DBG_NETERR, "elan4_neterr_timer: arg %ld inc %ld\n", arg, ticks); -+ -+ mod_timer (&uctx->uctx_neterr_timer, (jiffies + (ticks > 0 ? ticks : 1))); -+ return 0; -+ } -+ -+ case ELAN4IO_NETERR_FIXUP: -+ { -+ ELAN4IO_NETERR_FIXUP_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_NETERR_FIXUP_STRUCT))) -+ return (-EFAULT); -+ -+ if (args.nf_sten) -+ return (user_neterr_sten (uctx, args.nf_vp, args.nf_cookie, args.nf_waitforeop)); -+ else -+ return (user_neterr_dma (uctx, args.nf_vp, args.nf_cookie, args.nf_waitforeop)); -+ } -+ -+ case ELAN4IO_LOAD_TRANSLATION: -+ { -+ ELAN4IO_TRANSLATION_STRUCT args; -+ unsigned long base, top; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_TRANSLATION_STRUCT))) -+ return (-EFAULT); -+ -+ top = (args.tr_addr + args.tr_len - 1) | (PAGE_SIZE-1); -+ base = args.tr_addr & PAGE_MASK; -+ -+ return user_load_range (uctx, base, top - base + 1, args.tr_access); -+ } -+ case ELAN4IO_UNLOAD_TRANSLATION: -+ { -+ ELAN4IO_TRANSLATION_STRUCT args; -+ unsigned long base, top; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_TRANSLATION_STRUCT))) -+ return (-EFAULT); -+ -+ top = (args.tr_addr + args.tr_len - 1) | (PAGE_SIZE-1); -+ base = args.tr_addr & PAGE_MASK; -+ -+ elan4mmu_unload_range (&uctx->uctx_ctxt, 0 /* tbl */, base, top - base + 1); -+ -+ return 0; -+ } -+ -+ default: -+ PRINTF (uctx, DBG_FILE, "user_ioctl: invalid ioctl %x\n", cmd); -+ return (-EINVAL); -+ } -+} -+ -+static void -+user_vma_open (struct vm_area_struct *vma) -+{ -+ USER_PRIVATE *pr = (USER_PRIVATE *) vma->vm_private_data; -+ USER_CTXT *uctx = pr->pr_uctx; -+ unsigned long addr; -+ unsigned long pgoff; -+ -+ PRINTF (uctx, DBG_FILE, "user_vma_open: vm_mm=%p start=%lx end=%lx pgoff=%lx file=%p\n", -+ vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_pgoff, vma->vm_file); -+ -+ for (addr = vma->vm_start, pgoff = vma->vm_pgoff; addr < vma->vm_end; addr += PAGE_SIZE, pgoff++) -+ elan4_getcqa (&uctx->uctx_ctxt, pgoff); -+} -+ -+static void -+user_vma_close (struct vm_area_struct *vma) -+{ -+ USER_PRIVATE *pr = (USER_PRIVATE *) vma->vm_private_data; -+ USER_CTXT *uctx = pr->pr_uctx; -+ unsigned long addr; -+ unsigned long pgoff; -+ -+ PRINTF (uctx, DBG_FILE, "user_vma_close: vm_mm=%p start=%lx end=%lx pgoff=%lx file=%p\n", -+ vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_pgoff, vma->vm_file); -+ -+ /* NOTE: the same comments apply as mem_vma_close */ -+ for (addr = vma->vm_start, pgoff = vma->vm_pgoff; addr < vma->vm_end; addr += PAGE_SIZE, pgoff++) -+ if (elan4_getcqa (&uctx->uctx_ctxt, pgoff) != NULL) -+ { -+ elan4_putcqa (&uctx->uctx_ctxt, pgoff); /* drop the reference we've just taken */ -+ elan4_putcqa (&uctx->uctx_ctxt, pgoff); /* and the one held by the mmap */ -+ } -+} -+ -+struct vm_operations_struct user_vm_ops = { -+ open: user_vma_open, -+ close: user_vma_close, -+}; -+ -+static int -+user_mmap (struct file *file, struct vm_area_struct *vma) -+{ -+ USER_PRIVATE *pr = (USER_PRIVATE *) file->private_data; -+ USER_CTXT *uctx = pr->pr_uctx; -+ ELAN4_DEV *dev = uctx->uctx_ctxt.ctxt_dev; -+ ELAN4_CQA *cqa; -+ unsigned long addr; -+ unsigned long pgoff; -+ int res; -+ ioaddr_t ioaddr; -+ -+ /* Don't allow these pages to be swapped out of dumped */ -+ vma->vm_flags |= (VM_RESERVED | VM_IO); -+ -+ vma->vm_ops = &user_vm_ops; -+ vma->vm_file = file; -+ vma->vm_private_data = (void *) pr; -+ -+ for (addr = vma->vm_start, pgoff = vma->vm_pgoff; addr < vma->vm_end; addr += PAGE_SIZE, pgoff++) -+ { -+ switch (pgoff) -+ { -+ default: -+ PRINTF (uctx, DBG_FILE, "user_mmap: command queue %ld mapping at %lx\n", pgoff, addr); -+ -+ if ((cqa = elan4_getcqa (&uctx->uctx_ctxt, pgoff)) == NULL) -+ { -+ res = -EINVAL; -+ goto failed; -+ } -+ -+ PRINTF (uctx, DBG_FILE, "user_mmap: cqa=%p idx=%d num=%d ref=%d\n", cqa, cqa->cqa_idx, cqa->cqa_cqnum, cqa->cqa_ref); -+ -+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); -+ -+ if (! (dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] & ELAN4_FEATURE_NO_WRITE_COMBINE) && (cqa->cqa_type & CQ_Reorder) != 0) -+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); -+ -+ PRINTF (uctx, DBG_FILE, "user_mmap: remap_page_range (%lx, %lx, %lx, %lx)\n", -+ addr, pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_REGISTERS) + -+ (cqa->cqa_cqnum + dev->dev_cqoffset) * CQ_CommandMappingSize, PAGE_SIZE, -+ vma->vm_page_prot); -+ -+ if (__io_remap_page_range (addr, -+ pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_REGISTERS) + -+ (cqa->cqa_cqnum + dev->dev_cqoffset) * CQ_CommandMappingSize, -+ PAGE_SIZE, vma->vm_page_prot)) -+ { -+ PRINTF (uctx, DBG_FILE, "user_mmap: remap_page_range failed\n"); -+ -+ elan4_putcqa (&uctx->uctx_ctxt, pgoff); -+ res = -ENOMEM; -+ goto failed; -+ } -+ break; -+ -+ case ELAN4_OFF_USER_REGS: -+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); -+ -+ switch (dev->dev_devinfo.dev_revision_id) -+ { -+ case PCI_REVISION_ID_ELAN4_REVA: -+ ioaddr = pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_REGISTERS) + ELAN4_REVA_REG_OFFSET + offsetof(E4_Registers, uRegs); -+ break; -+ -+ case PCI_REVISION_ID_ELAN4_REVB: -+ ioaddr = pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_REGISTERS) + ELAN4_REVB_REG_OFFSET + offsetof(E4_Registers, uRegs); -+ break; -+ -+ default: -+ res = -EINVAL; -+ goto failed; -+ } -+ -+ PRINTF (uctx, DBG_FILE, "user_mmap: user_regs at %lx ioaddr %lx prot %lx\n", -+ addr, ioaddr, vma->vm_page_prot.pgprot); -+ -+ if (__io_remap_page_range (addr, (ioaddr & PAGEMASK), PAGE_SIZE, vma->vm_page_prot)) -+ { -+ res = -EAGAIN; -+ goto failed; -+ } -+ -+ break; -+ -+ case ELAN4_OFF_USER_PAGE: -+ PRINTF (uctx, DBG_FILE, "user_mmap: shared user page - kaddr=%lx uaddr=%lx phys=%lx\n", -+ uctx->uctx_upage, addr, kmem_to_phys (uctx->uctx_upage)); -+ -+ /* we do not want to have this area swapped out, lock it */ -+ vma->vm_flags |= VM_LOCKED; -+ -+ /* Mark the page as reserved or else the remap_page_range() doesn't remap it */ -+ SetPageReserved(pte_page(*find_pte_kernel((unsigned long) uctx->uctx_upage))); -+ -+ if (__remap_page_range (addr, kmem_to_phys (uctx->uctx_upage), PAGE_SIZE, vma->vm_page_prot)) -+ { -+ PRINTF (uctx, DBG_FILE, "user_mmap: remap_page_range (user_page) failed\n"); -+ res = -ENOMEM; -+ goto failed; -+ } -+ break; -+ -+ case ELAN4_OFF_TPROC_TRAMPOLINE: -+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); -+ -+ PRINTF (uctx, DBG_FILE, "user_mmap: tproc trampoline - kaddr=%lx uaddr=%lx phys=%lx\n", uctx->uctx_trampoline, addr, -+ pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM) + uctx->uctx_trampoline + (addr & (SDRAM_PGOFF_OFFSET << PAGE_SHIFT))); -+ -+ if (__io_remap_page_range (addr, pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM) + -+ uctx->uctx_trampoline + (addr & (SDRAM_PGOFF_OFFSET << PAGE_SHIFT)), -+ PAGE_SIZE, vma->vm_page_prot)) -+ { -+ PRINTF (uctx, DBG_FILE, "user_mmap: remap_page_range (tproc_trampoline) failed\n"); -+ res = -ENOMEM; -+ goto failed; -+ } -+ break; -+ -+ case ELAN4_OFF_DEVICE_STATS: -+ printk ("user_mmap: device_stats\n"); -+ break; -+ } -+ -+ } -+ -+ return (0); -+ -+ failed: -+ for (addr -= PAGE_SIZE, pgoff--; addr >= vma->vm_start; addr -= PAGE_SIZE, pgoff--) -+ elan4_putcqa (&uctx->uctx_ctxt, pgoff); /* drop the reference we've just taken */ -+ return (res); -+} -+ -+int -+user_pteload (struct vm_area_struct *vma, unsigned long maddr, USER_CTXT *uctx, E4_Addr eaddr, int perm) -+{ -+ USER_PRIVATE *pr = (USER_PRIVATE *) vma->vm_private_data; -+ ELAN4_DEV *dev = uctx->uctx_ctxt.ctxt_dev; -+ unsigned long pgoff = vma->vm_pgoff + ((maddr - vma->vm_start) >> PAGE_SHIFT); -+ register int i, res; -+ -+ if (pr->pr_uctx != uctx) -+ return -EINVAL; -+ -+ switch (pgoff) -+ { -+ default: -+ { -+ ELAN4_CQA *cqa; -+ unsigned long cqaddr; -+ -+ if ((cqa = elan4_getcqa (&uctx->uctx_ctxt, pgoff)) == NULL) -+ return -EINVAL; -+ -+ cqaddr = (cqa->cqa_cqnum + dev->dev_cqoffset) * CQ_CommandMappingSize; -+ -+ for (i = 0; i < PAGE_SIZE; i += (1 << dev->dev_pageshift[0])) -+ { -+ E4_uint64 newpte = ((cqaddr + i) >> PTE_PADDR_SHIFT) | PTE_SetPerm (perm) | PTE_CommandQueue; -+ -+ if ((res = elan4mmu_pteload (&uctx->uctx_ctxt, 0, eaddr + i, HE_TYPE_COMMAND, newpte)) < 0) -+ return res; -+ } -+ elan4_putcqa (&uctx->uctx_ctxt, pgoff); -+ -+ return 0; -+ } -+ -+ case ELAN4_OFF_USER_REGS: -+ { -+ u32 blow, bhigh; -+ physaddr_t ioaddr; -+ -+ /* compute a local pci address from our register BAR */ -+ pci_read_config_dword (dev->dev_osdep.pdev, PCI_BASE_ADDRESS_2, &blow); -+ pci_read_config_dword (dev->dev_osdep.pdev, PCI_BASE_ADDRESS_3, &bhigh); -+ -+ ioaddr = (((physaddr_t) bhigh) << 32) | (blow & PCI_BASE_ADDRESS_MEM_MASK); -+ -+ switch (dev->dev_devinfo.dev_revision_id) -+ { -+ case PCI_REVISION_ID_ELAN4_REVA: -+ ioaddr |= ELAN4_REVA_REG_OFFSET + offsetof(E4_Registers, uRegs); -+ break; -+ -+ case PCI_REVISION_ID_ELAN4_REVB: -+ ioaddr |= ELAN4_REVB_REG_OFFSET + offsetof(E4_Registers, uRegs); -+ break; -+ -+ default: -+ return -EINVAL; -+ } -+ -+ for (i = 0; i < PAGE_SIZE; i += (1 << dev->dev_pageshift[0])) -+ { -+ E4_uint64 newpte = (((ioaddr & PAGE_MASK) | i) >> PTE_PADDR_SHIFT) | PTE_SetPerm (perm) | PTE_PciNotLocal; -+ -+ if ((res = elan4mmu_pteload (&uctx->uctx_ctxt, 0, eaddr + i, HE_TYPE_REGS, newpte)) < 0) -+ return res; -+ } -+ -+ return 0; -+ } -+ -+ case ELAN4_OFF_USER_PAGE: -+ for (i = 0; i < PAGE_SIZE; i += (1 << dev->dev_pageshift[0])) -+ if ((res = elan4mmu_pteload_page (&uctx->uctx_ctxt, 0, eaddr, pte_page(*find_pte_kernel((unsigned long) uctx->uctx_upage)), perm)) < 0) -+ return res; -+ return 0; -+ -+ case ELAN4_OFF_TPROC_TRAMPOLINE: -+ { -+ sdramaddr_t trampoline = uctx->uctx_trampoline + (maddr & (SDRAM_PGOFF_OFFSET << PAGE_SHIFT)); -+ -+ if (! elan4mmu_sdram_aliascheck (&uctx->uctx_ctxt, eaddr, trampoline)) -+ return -EINVAL; -+ -+ for (i = 0; i < PAGE_SIZE; i += (1 << dev->dev_pageshift[0])) -+ { -+ E4_uint64 newpte = ((trampoline + i) >> PTE_PADDR_SHIFT) | PTE_SetPerm (perm); -+ -+ if ((res = elan4mmu_pteload (&uctx->uctx_ctxt, 0, eaddr + i, HE_TYPE_SDRAM, newpte)) < 0) -+ return res; -+ } -+ return 0; -+ } -+ } -+} -+ -+/* driver entry points */ -+static int -+elan4_open (struct inode *inode, struct file *file) -+{ -+ PRINTF (DBG_USER, DBG_FILE, "elan4_open: device %d minor %d file=%p\n", ELAN4_DEVICE(inode), ELAN4_MINOR(inode), file); -+ -+ switch (ELAN4_MINOR (inode)) -+ { -+ case ELAN4_MINOR_CONTROL: -+ return (control_open (inode, file)); -+ case ELAN4_MINOR_MEM: -+ return (mem_open (inode, file)); -+ case ELAN4_MINOR_USER: -+ return (user_open (inode, file)); -+ default: -+ return (-ENXIO); -+ } -+} -+ -+static int -+elan4_release (struct inode *inode, struct file *file) -+{ -+ PRINTF (DBG_USER, DBG_FILE, "elan4_release: device %d minor %d file=%p\n", ELAN4_DEVICE(inode), ELAN4_MINOR(inode), file); -+ -+ switch (ELAN4_MINOR (inode)) -+ { -+ case ELAN4_MINOR_CONTROL: -+ return (control_release (inode, file)); -+ case ELAN4_MINOR_MEM: -+ return (mem_release (inode, file)); -+ case ELAN4_MINOR_USER: -+ return (user_release (inode, file)); -+ default: -+ return (-ENXIO); -+ } -+} -+ -+static int -+elan4_ioctl (struct inode *inode, struct file *file, -+ unsigned int cmd, unsigned long arg) -+{ -+ PRINTF (DBG_USER, DBG_FILE, "elan4_ioctl: device %d minor %d cmd %x\n", ELAN4_DEVICE(inode), ELAN4_MINOR(inode), cmd); -+ -+ switch (ELAN4_MINOR (inode)) -+ { -+ case ELAN4_MINOR_CONTROL: -+ return (control_ioctl (inode, file, cmd, arg)); -+ case ELAN4_MINOR_MEM: -+ return (mem_ioctl (inode, file, cmd, arg)); -+ case ELAN4_MINOR_USER: -+ return (user_ioctl (inode, file, cmd, arg)); -+ default: -+ return (-ENXIO); -+ } -+} -+ -+#if defined(CONFIG_PPC64) || defined(CONFIG_SPARC64) || defined(CONFIG_X86_64) -+static int -+elan4_ioctl32 (unsigned int fd, unsigned int cmd, unsigned long arg, struct file *file) -+{ -+ struct inode *inode = file->f_dentry->d_inode; -+ extern int sys_ioctl (unsigned int fd, unsigned int cmd, unsigned long arg); -+ -+ PRINTF (DBG_USER, DBG_FILE, "elan4_ioctl32: device %d minor %d cmd %x\n", ELAN4_DEVICE(inode), ELAN4_MINOR(inode), cmd); -+ -+ if (ELAN4_MINOR (inode) == ELAN4_MINOR_USER) -+ { -+ USER_PRIVATE *pr = (USER_PRIVATE *) file->private_data; -+ USER_CTXT *uctx = pr->pr_uctx; -+ -+ if (current->mm != pr->pr_mm) -+ return -EINVAL; -+ -+ switch (cmd) -+ { -+ case ELAN4IO_SETPERM32: -+ { -+ ELAN4IO_PERM_STRUCT32 args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_PERM_STRUCT32))) -+ return (-EFAULT); -+ -+ PRINTF (DBG_USER, DBG_FILE, "user_ioctl32: setperm maddr=%x eaddr=%llx len=%llxx perm=%d\n", -+ args.ps_maddr, args.ps_eaddr,args.ps_len, args.ps_perm); -+ -+ return (user_setperm (uctx, args.ps_maddr, args.ps_eaddr, args.ps_len, args.ps_perm)); -+ } -+ -+ case ELAN4IO_CLRPERM32: -+ { -+ ELAN4IO_PERM_STRUCT32 args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_PERM_STRUCT32))) -+ return (-EFAULT); -+ -+ PRINTF (DBG_USER, DBG_FILE, "user_ioctl32: clrperm eaddr=%llx len=%ll\n", -+ args.ps_eaddr, args.ps_len); -+ -+ user_clrperm (uctx, args.ps_eaddr, args.ps_len); -+ return (0); -+ } -+ -+ case ELAN4IO_TRAPHANDLER32: -+ { -+ ELAN4IO_TRAPHANDLER_STRUCT32 args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_TRAPHANDLER_STRUCT32))) -+ return (-EFAULT); -+ -+ PRINTF (DBG_USER, DBG_FILE, "user_ioctl32: traphandler trapp=%x nticks=%d\n", -+ args.th_trapp, args.th_nticks); -+ -+ return (user_trap_handler (pr->pr_uctx, (ELAN4_USER_TRAP *)(unsigned long)args.th_trapp, args.th_nticks)); -+ } -+ } -+ } -+ -+ PRINTF (DBG_USER, DBG_FILE, "elan4_ioctl32: fd=%d cmd=%x arg=%lx file=%p\n", fd, cmd, arg, file); -+ return (sys_ioctl (fd, cmd, arg)); -+} -+#endif -+ -+ -+ -+static int -+elan4_mmap (struct file *file, struct vm_area_struct *vma) -+{ -+ PRINTF (DBG_USER, DBG_FILE, "elan4_mmap: instance %d minor %d start=%lx end=%lx pgoff=%lx\n", -+ ELAN4_DEVICE (file->f_dentry->d_inode), ELAN4_MINOR (file->f_dentry->d_inode), -+ vma->vm_start, vma->vm_end, vma->vm_pgoff); -+ -+ switch (ELAN4_MINOR (file->f_dentry->d_inode)) -+ { -+ case ELAN4_MINOR_CONTROL: -+ return (control_mmap (file, vma)); -+ case ELAN4_MINOR_MEM: -+ return (mem_mmap (file, vma)); -+ case ELAN4_MINOR_USER: -+ return (user_mmap (file, vma)); -+ default: -+ return (-ENXIO); -+ } -+} -+ -+void -+elan4_update_intel_p64h2 (ELAN4_DEV *dev, struct pci_dev *bridge) -+{ -+ u16 cnf; -+ -+ pci_read_config_word (bridge, 0x40 /* CNF */, &cnf); -+ -+ /* We expect the CNF register to be configured as follows -+ * -+ * [8] == 1 PMODE PCI Mode -+ * [7:6] == 2/3 PFREQ PCI Frequency (100/133) -+ * [5] == 0 RSDIS Restreaming Disable -+ * [4:3] == 0x PP Prefetch Policy -+ * [2] == 0 DTD Delayed Transaction Depth -+ * [1:0] == 10 MDT MaximumDelaedTransactions -+ */ -+ -+ if ((cnf & (1 << 8)) == 0) -+ printk ("elan%d: strangeness - elan reports PCI-X but P64H2 reports PCI mode !\n", dev->dev_instance); -+ else if ((cnf & 0xb7) != 0x82 && (cnf & 0xb7) != 0x84 && optimise_pci_bus < 2) -+ printk ("elan%d: P64H2 CNF is not configured as expected : RSDIS=%d PP=%d DTD=%d MDT=%d\n", -+ dev->dev_instance, (cnf >> 5) & 1, (cnf >> 3) & 3, (cnf >> 2) & 1, cnf & 3); -+ else -+ { -+ switch ((cnf >> 6) & 3) -+ { -+ case 2: /* PCI-X 100 */ -+ pci_write_config_word (bridge, 0xfc /* PC100 */, 0x7777); -+ -+ printk ("elan%d: optimise P64H2 : setting MDT=0, DTD=1, PFC=777 for PCI-X 100\n", dev->dev_instance); -+ -+ break; -+ -+ case 3: /* PCI-X 133 */ -+ pci_write_config_word (bridge, 0xfe /* PC133 */, 0x7777); -+ -+ printk ("elan%d: optimise P64H2 : setting MDT=0, DTD=1, PFC=777 for PCI-X 133\n", dev->dev_instance); -+ break; -+ } -+ -+ pci_write_config_word (bridge, 0x40 /* CNF */, (cnf & 0xfff8) | 0x4); /* DTD=1 MDT=0 */ -+ } -+} -+ -+int -+elan4_optimise_intel_p64h2 (ELAN4_DEV *dev, struct pci_dev *pdev) -+{ -+ struct pci_bus *bus = pdev->bus; -+ struct pci_dev *bridge = bus->self; -+ unsigned int devcount = 0; -+ u8 revision; -+ u32 ectrl; -+ struct list_head *el; -+ -+ pci_read_config_dword (pdev, PCI_ELAN_CONTROL, &ectrl); -+ -+ /* We can only run in PCI-Xmode with a B1 stepping P64H2 because of P64H2 Errata 3 */ -+ pci_read_config_byte (bridge, PCI_REVISION_ID, &revision); -+ if (revision < 0x04) -+ { -+ if ((ectrl & ECTRL_INITIALISATION_MODE) != Pci2_2) -+ { -+ static const char *p64h2_stepping[4] = {"UNKNOWN", "UNKNOWN", "UNKNOWN", "B0"}; -+ -+ printk ("elan%d: unable to use device because of P64H2 Errata 3 on\n" -+ " %s stepping part and running in a PCI-X slot\n", -+ dev->dev_instance, p64h2_stepping[revision]); -+ return -EINVAL; -+ } -+ } -+ -+ /* We can only alter the bus configuration registers if the Elan is the only device -+ * on the bus ... */ -+ list_for_each (el, &bus->devices) { -+ struct pci_dev *pcip = list_entry (el, struct pci_dev, bus_list); -+ -+ if (pcip == pdev || (pcip->vendor == PCI_VENDOR_ID_INTEL && pcip->device == 0x1462 /* P64H2 HOTPLUG */)) -+ continue; -+ -+ devcount++; -+ } -+ -+ if (devcount > 0 || !list_empty (&bus->children)) -+ { -+ printk ("elan%d: unable to optimise P64H2 settings as %s%s\n", dev->dev_instance, -+ (devcount > 0) ? "more than one device on bus" : "", -+ ! list_empty (&bus->children) ? "has child buses" : ""); -+ return 0; -+ } -+ -+#ifdef __ia64 -+ if ((ectrl & ECTRL_INITIALISATION_MODE) == PciX100to133MHz) -+ { -+ struct pci_dev *pcip; -+ unsigned int sioh_good = 0; -+ unsigned int sioh_downgrade = 0; -+ unsigned int snc_good = 0; -+ unsigned int snc_downgrade = 0; -+ -+ /* Search for the associated SIOH and SNC on ia64, -+ * if we have a C2 SIOH and a C0/C1 SNC, then we can -+ * reconfigure the P64H2 as follows: -+ * CNF:MDT = 0 -+ * CNF:DTD = 1 -+ * CNF:PC133 = 7777 -+ * -+ * if not, then issue a warning that down rev parts -+ * affect bandwidth. -+ */ -+ for (pcip = NULL; (pcip = pci_find_device (PCI_VENDOR_ID_INTEL, 0x500, pcip)); ) -+ { -+ pci_read_config_byte (pcip, PCI_REVISION_ID, &revision); -+ -+ if (revision >= 0x21) -+ snc_good++; -+ else -+ { -+ printk ("elan%d: SNC revision %x (%s)\n", dev->dev_instance, revision, -+ revision == 0x00 ? "A0" : revision == 0x01 ? "A1" : -+ revision == 0x02 ? "A2" : revision == 0x03 ? "A3" : -+ revision == 0x10 ? "B0" : revision == 0x20 ? "C0" : -+ revision == 0x21 ? "C1" : "UNKNOWN"); -+ -+ snc_downgrade++; -+ } -+ } -+ -+ for (pcip = NULL; (pcip = pci_find_device (PCI_VENDOR_ID_INTEL, 0x510, pcip)) != NULL; ) -+ { -+ pci_read_config_byte (pcip, PCI_REVISION_ID, &revision); -+ -+ -+ if (revision >= 0x22) -+ sioh_good++; -+ else -+ { -+ printk ("elan%d: SIOH revsision %x (%s)\n", dev->dev_instance, revision, -+ revision == 0x10 ? "C0" : revision == 0x20 ? "C0" : -+ revision == 0x21 ? "C1" : revision == 0x22 ? "C2" : "UNKNOWN"); -+ -+ sioh_downgrade++; -+ } -+ } -+ -+ if (optimise_pci_bus < 2 && (sioh_downgrade || snc_downgrade)) -+ printk ("elan%d: unable to optimise as SNC/SIOH below required C1/C2 steppings\n", dev->dev_instance); -+ else if (optimise_pci_bus < 2 && (sioh_good == 0 || snc_good == 0)) -+ printk ("elan%d: unable to optimise as cannot determine SNC/SIOH revision\n", dev->dev_instance); -+ else -+ elan4_update_intel_p64h2 (dev, bridge); -+ } -+#endif -+ -+#ifdef __i386 -+ if ((ectrl & ECTRL_INITIALISATION_MODE) == PciX100to133MHz) -+ elan4_update_intel_p64h2 (dev, bridge); -+#endif -+ return 0; -+} -+ -+int -+elan4_optimise_intel_pxh (ELAN4_DEV *dev, struct pci_dev *pdev) -+{ -+ dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] |= ELAN4_FEATURE_64BIT_READ; -+ -+ return 0; -+} -+ -+void -+elan4_optimise_serverworks_ciobx2 (ELAN4_DEV *dev) -+{ -+ struct pci_dev *pdev = dev->dev_osdep.pdev; -+ struct pci_dev *pcip; -+ unsigned char bus; -+ unsigned int dor; -+ -+ /* Find the CIOBX2 for our bus number */ -+ for (pcip = NULL; (pcip = pci_find_device (PCI_VENDOR_ID_SERVERWORKS, 0x0101, pcip)) != NULL;) -+ { -+ pci_read_config_byte (pcip, 0x44 /* BUSNUM */, &bus); -+ -+ if (pdev->bus->number == bus) -+ { -+ printk ("elan%d: optimise CIOBX2 : setting DOR to disable read pipe lining\n", dev->dev_instance); -+ -+ pci_read_config_dword (pcip, 0x78 /* DOR */, &dor); -+ pci_write_config_dword (pcip, 0x78 /* DOR */, dor | (1 << 16)); -+ -+ printk ("elan%d: disabling write-combining on ServerWorks chipset\n", dev->dev_instance); -+ dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] |= ELAN4_FEATURE_NO_WRITE_COMBINE; -+ } -+ } -+} -+ -+#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_PPC_PSERIES) || defined(__alpha) -+int -+elan4_optimise_pci_map (ELAN4_DEV *dev, unsigned int features) -+{ -+ dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] |= features; -+ -+ if (pci_set_dma_mask (dev->dev_osdep.pdev, (u64) 0XFFFFFFFFFFFFFFFFull) || -+ pci_set_consistent_dma_mask (dev->dev_osdep.pdev, (u64) 0XFFFFFFFFFFFFFFFFull)) -+ { -+ printk (KERN_ERR "elan%d: unable to set DAC mode\n", dev->dev_instance); -+ return -EINVAL; -+ } -+ -+ return 0; -+} -+#endif -+ -+int -+elan4_optimise_bus (ELAN4_DEV *dev) -+{ -+ struct pci_dev *pdev = dev->dev_osdep.pdev; -+ -+ if (pdev->bus && pdev->bus->self) -+ { -+ struct pci_dev *bridge = pdev->bus->self; -+ -+ if (bridge->vendor == PCI_VENDOR_ID_INTEL && bridge->device == 0x1460 /* Intel P64H2 */) -+ return elan4_optimise_intel_p64h2 (dev, pdev); -+ -+ /* See http://pciids.sourceforge.net/iii/?i=8086 */ -+ -+ if ((bridge->vendor == PCI_VENDOR_ID_INTEL && bridge->device == 0x0329) /* Intel 6700PXH Fn 0 */ || -+ (bridge->vendor == PCI_VENDOR_ID_INTEL && bridge->device == 0x032a) /* Intel 6700PXH Fn 2 */ || -+ (bridge->vendor == PCI_VENDOR_ID_INTEL && bridge->device == 0x032c) /* Intel 6702PXH */ || -+ (bridge->vendor == PCI_VENDOR_ID_INTEL && bridge->device == 0x0320) /* Intel PXH-D */ || -+ (bridge->vendor == PCI_VENDOR_ID_INTEL && bridge->device == 0x0330) /* Intel 80332 (A segment) */ || -+ (bridge->vendor == PCI_VENDOR_ID_INTEL && bridge->device == 0x0332) /* Intel 80332 (B segment) */ -+ ) -+ return elan4_optimise_intel_pxh (dev, pdev); -+ } -+ -+ if (pci_find_device (PCI_VENDOR_ID_HP, 0x122e, NULL) != NULL) /* on HP ZX1 set the relaxed ordering */ -+ dev->dev_pteval = PTE_RelaxedOrder; /* bit to get better DMA bandwidth. */ -+ -+ if (pci_find_device (PCI_VENDOR_ID_SERVERWORKS, 0x0101, NULL) != NULL) /* ServerWorks CIOBX2 */ -+ elan4_optimise_serverworks_ciobx2 (dev); -+ -+#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_PPC_PESERIES) -+ return elan4_optimise_pci_map (dev, ELAN4_FEATURE_NO_WRITE_COMBINE | ELAN4_FEATURE_PCI_MAP | ELAN4_FEATURE_NO_PREFETCH); -+#endif -+ -+#ifdef __alpha -+ return elan4_optimise_pci_map (dev, ELAN4_FEATURE_PCI_MAP); -+#endif -+ -+#ifdef __sparc -+ if (! (dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] & ELAN4_FEATURE_PCI_MAP)) -+ dev->dev_pteval |= (0xfffe000000000000 >> PTE_PADDR_SHIFT); -+#endif -+ -+ return 0; -+} -+ -+int -+elan4_pciinit (ELAN4_DEV *dev) -+{ -+ int res; -+ u32 value; -+ u16 command; -+ u8 cacheline; -+ unsigned long flags; -+ -+ if (optimise_pci_bus && (res = elan4_optimise_bus (dev)) <0) -+ return (res); -+ -+ if ((res = pci_enable_device (dev->dev_osdep.pdev)) < 0) -+ return (res); -+ -+ pci_read_config_dword (dev->dev_osdep.pdev, PCI_ELAN_CONTROL, &value); -+ if ((value & ECTRL_INITIALISATION_MODE) == Pci2_2) -+ printk ("elan%d: is an elan4%c (PCI-2.2)\n", dev->dev_instance, 'a' + dev->dev_devinfo.dev_revision_id); -+ else -+ { -+ switch (value & ECTRL_INITIALISATION_MODE) -+ { -+ case PciX50To66MHz: -+ printk ("elan%d: is an elan4%c (PCI-X 50-66)\n", dev->dev_instance, 'a' + dev->dev_devinfo.dev_revision_id); -+ break; -+ -+ case PciX66to100MHz: -+ printk ("elan%d: is an elan4%c (PCI-X 66-100)\n", dev->dev_instance, 'a' + dev->dev_devinfo.dev_revision_id); -+ break; -+ -+ case PciX100to133MHz: -+ printk ("elan%d: is an elan4%c (PCI-X 100-133)\n", dev->dev_instance, 'a' + dev->dev_devinfo.dev_revision_id); -+ break; -+ -+ default: -+ printk ("elan%d: Invalid PCI-X mode\n", dev->dev_instance); -+ return (-EINVAL); -+ } -+ } -+ -+ /* initialise the elan pll control register */ -+ pci_read_config_dword (dev->dev_osdep.pdev, PCI_ELAN_PLL_CONTROL, &value); -+ -+ if (elan4_pll_cfg) -+ { -+ printk ("elan%d: setting pll control to %08x\n", dev->dev_instance, elan4_pll_cfg); -+ -+ pci_write_config_dword (dev->dev_osdep.pdev, PCI_ELAN_PLL_CONTROL, elan4_pll_cfg); -+ } -+ else -+ { -+ if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA) -+ pci_write_config_dword (dev->dev_osdep.pdev, PCI_ELAN_PLL_CONTROL, -+ (value & ~ECTRL_SYS_CLOCK_RATIO_MASK) | ECTRL_SYS_CLOCK_RATIO_4_3); -+ else -+ pci_write_config_dword (dev->dev_osdep.pdev, PCI_ELAN_PLL_CONTROL, -+ (value & ~ECTRL_SYS_CLOCK_RATIO_MASK) | ECTRL_SYS_CLOCK_RATIO_6_5 | SysPll_FeedForwardISel0 | SysPll_FeedForwardISel1); -+ } -+ -+ /* initialise the elan control register */ -+ pci_read_config_dword (dev->dev_osdep.pdev, PCI_ELAN_CONTROL, &value); -+ -+ value = ((15 << ECTRL_IPROC_HIGH_PRI_TIME_SHIFT) | -+ (15 << ECTRL_OTHER_HIGH_PRI_TIME_SHIFT) | -+ (value & ECTRL_28_NOT_30_BIT_LOCAL_BAR) | -+ (dev->dev_topaddrmode ? ECTRL_ExtraMasterAddrBits : 0) | -+ ECTRL_ENABLE_LATENCY_RESET | -+ ECTRL_ENABLE_WRITEBURSTS | -+ ECTRL_ENABLE_2_2READBURSTS); -+ -+#ifdef LINUX_SPARC -+ value &= ~(ECTRL_ENABLE_LATENCY_RESET | ECTRL_ENABLE_WRITEBURSTS); -+#endif -+ -+ pci_write_config_dword (dev->dev_osdep.pdev, PCI_ELAN_CONTROL, value | ECTRL_SOFTWARE_INTERNAL_RESET); -+ -+ switch (dev->dev_devinfo.dev_revision_id) -+ { -+ case PCI_REVISION_ID_ELAN4_REVA: -+ /* Delay 10ms here if we've changed the sysclock ratio */ -+ /* to allow the PLL to stabalise before proceeding */ -+ udelay (10000); -+ break; -+ -+ case PCI_REVISION_ID_ELAN4_REVB: -+ { -+ unsigned char val = read_i2c (dev, I2cLedsValue); -+ -+ /* On RevB we have to explicitly reset the PLLs */ -+ pci_read_config_word (dev->dev_osdep.pdev, PCI_COMMAND, &command); -+ -+ write_i2c (dev, I2cLedsValue, val | 0x80); -+ udelay (1000); -+ -+ /* Issue the PLL counter reset and immediately inhibit all pci interaction -+ * while the PLL is recovering. The write to the PCI_COMMAND register has -+ * to occur within 50uS of the write to the i2c registers */ -+ local_irq_save (flags); -+ write_i2c (dev, I2cLedsValue, val & ~0x80); -+ pci_write_config_word (dev->dev_osdep.pdev, PCI_COMMAND, (1 << 10) /* PCI_COMMAND_DISABLE_INT */); -+ local_irq_restore (flags); -+ -+ /* Wait for the write to occur and for the PLL to regain lock */ -+ udelay (20000); udelay (20000); -+ -+ /* Re-enable pci interaction and clear any spurious errors deteced */ -+ pci_write_config_word (dev->dev_osdep.pdev, PCI_STATUS, PCI_STATUS_DETECTED_PARITY | PCI_STATUS_SIG_SYSTEM_ERROR); -+ pci_write_config_word (dev->dev_osdep.pdev, PCI_COMMAND, command); -+ break; -+ } -+ } -+ -+ pci_write_config_dword (dev->dev_osdep.pdev, PCI_ELAN_CONTROL, value); -+ -+ /* Enable master accesses */ -+ pci_set_master (dev->dev_osdep.pdev); -+ -+ /* Verify that the memWrInvalidate bit is set */ -+ pci_read_config_word (dev->dev_osdep.pdev, PCI_COMMAND, &command); -+ pci_read_config_byte (dev->dev_osdep.pdev, PCI_CACHE_LINE_SIZE, &cacheline); -+ -+ if ((command & PCI_COMMAND_INVALIDATE) == 0) -+ { -+ printk ("elan%d: enable MemWrInvalidate (cacheline %d)\n", -+ dev->dev_instance, cacheline * 4); -+ -+ pci_write_config_word (dev->dev_osdep.pdev, PCI_COMMAND, command | PCI_COMMAND_INVALIDATE); -+ } -+ -+ if (pci_request_regions(dev->dev_osdep.pdev, "elan4")) -+ return -ENODEV; -+ -+ /* add the interrupt handler */ -+ if (request_irq (dev->dev_osdep.pdev->irq, elan4_irq, SA_SHIRQ, "elan4", dev) != 0) -+ { -+ pci_release_regions (dev->dev_osdep.pdev); -+ return -ENXIO; -+ } -+ -+ return (0); -+} -+ -+void -+elan4_updatepll (ELAN4_DEV *dev, unsigned int val) -+{ -+ u32 value; -+ -+ if (elan4_pll_cfg == 0) -+ { -+ pci_read_config_dword (dev->dev_osdep.pdev, PCI_ELAN_PLL_CONTROL, &value); -+ -+ pci_write_config_dword (dev->dev_osdep.pdev, PCI_ELAN_PLL_CONTROL, -+ (value & ~ECTRL_SYS_CLOCK_RATIO_MASK) | val); -+ -+ /* Delay 10ms here if we've changed the sysclock ratio */ -+ /* to allow the PLL to stabalise before proceeding */ -+ udelay (10000); -+ } -+} -+ -+void -+elan4_pcifini (ELAN4_DEV *dev) -+{ -+ u32 value; -+ -+ /* release the interrupt handler */ -+ free_irq (dev->dev_osdep.pdev->irq, dev); -+ -+ /* release the address space */ -+ pci_release_regions (dev->dev_osdep.pdev); -+ -+ pci_read_config_dword (dev->dev_osdep.pdev, PCI_ELAN_CONTROL, &value); -+ pci_write_config_dword (dev->dev_osdep.pdev, PCI_ELAN_CONTROL, value | ECTRL_SOFTWARE_INTERNAL_RESET); -+ pci_write_config_dword (dev->dev_osdep.pdev, PCI_ELAN_CONTROL, value); -+ -+ pci_disable_device (dev->dev_osdep.pdev); -+} -+ -+void -+elan4_pcierror (ELAN4_DEV *dev) -+{ -+ struct pci_dev *pci = dev->dev_osdep.pdev; -+ u8 type; -+ u16 status, cmd; -+ u32 physlo, physhi, control; -+ -+ printk("elan%d: pci error has occurred\n", dev->dev_instance); -+ -+ pci_read_config_word (pci, PCI_STATUS, &status); -+ pci_read_config_word (pci, PCI_COMMAND, &cmd); -+ pci_read_config_dword (pci, PCI_ELAN_CONTROL, &control); -+ -+ if (control & ECTRL_REC_SPLIT_COMP_MESSAGE) -+ { -+ u32 message, attr; -+ -+ pci_write_config_dword (pci, PCI_ELAN_CONTROL, control & ~ECTRL_SELECT_SPLIT_MESS_ATTR); -+ pci_read_config_dword (pci, PCI_ELAN_SPLIT_MESSAGE_VALUE, &message); -+ pci_write_config_dword (pci, PCI_ELAN_CONTROL, control | ECTRL_SELECT_SPLIT_MESS_ATTR); -+ pci_read_config_dword (pci, PCI_ELAN_SPLIT_MESSAGE_VALUE, &attr); -+ -+ printk ("elan%d: pcierror - received split completion message - attr=%08x, message=%08x\n", -+ dev->dev_instance, attr, message); -+ -+ pci_write_config_dword (pci, PCI_ELAN_CONTROL, control | ECTRL_REC_SPLIT_COMP_MESSAGE); /* clear the error */ -+ } -+ else -+ { -+ pci_read_config_dword (pci, PCI_ELAN_PARITY_ADDR_LO, &physlo); -+ pci_read_config_dword (pci, PCI_ELAN_PARITY_ADDR_HI, &physhi); -+ pci_read_config_byte (pci, PCI_ELAN_PARITY_TYPE, &type); -+ -+ printk ("elan%d: pcierror - status %x cmd %4x physaddr %08x%08x type %x\n", -+ dev->dev_instance, status, cmd, physhi, physlo, type); -+ -+ if (status & PCI_STATUS_PARITY) -+ printk ("elan%d: parity error signalled (PERR)\n", dev->dev_instance); -+ if (status & PCI_STATUS_DETECTED_PARITY) -+ printk ("elan%d: detected parity error\n", dev->dev_instance); -+ if (status & PCI_STATUS_REC_MASTER_ABORT) -+ printk ("elan%d: received master abort\n", dev->dev_instance); -+ if (status & PCI_STATUS_REC_TARGET_ABORT) -+ printk ("elan%d: received target abort\n", dev->dev_instance); -+ if (status & PCI_STATUS_SIG_SYSTEM_ERROR) -+ printk ("elan%d: signalled SERR\n", dev->dev_instance); -+ if (status & PCI_STATUS_SIG_TARGET_ABORT) -+ printk ("elan%d: signalled target abort\n", dev->dev_instance); -+ -+ pci_write_config_word (pci, PCI_STATUS, status); /* clear the errors */ -+ } -+ -+ DISABLE_INT_MASK (dev, INT_PciMemErr); -+ -+#ifdef notdef -+ panic ("elan%d: pcierror\n", dev->dev_instance); /* better panic ! */ -+#endif -+} -+ -+static irqreturn_t -+elan4_irq (int irq, void *arg, struct pt_regs *regs) -+{ -+ if (elan4_1msi0 ((ELAN4_DEV *) arg)) -+ return IRQ_HANDLED; -+ else -+ return IRQ_NONE; -+} -+ -+ioaddr_t -+elan4_map_device (ELAN4_DEV *dev, unsigned bar, unsigned off, unsigned size, ELAN4_MAP_HANDLE *handle) -+{ -+ return (ioaddr_t) ioremap_nocache (pci_resource_start (dev->dev_osdep.pdev, bar) + off, size); -+} -+ -+void -+elan4_unmap_device (ELAN4_DEV *dev, ioaddr_t ptr, unsigned size, ELAN4_MAP_HANDLE *handle) -+{ -+ iounmap ((void *) ptr); -+} -+ -+unsigned long -+elan4_resource_len (ELAN4_DEV *dev, unsigned bar) -+{ -+ return (pci_resource_len (dev->dev_osdep.pdev, bar)); -+} -+ -+void -+elan4_configure_writecombining (ELAN4_DEV *dev) -+{ -+#ifdef CONFIG_MTRR -+ dev->dev_osdep.sdram_mtrr = dev->dev_osdep.regs_mtrr = -1; -+#endif -+ -+ if ((dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] & ELAN4_FEATURE_NO_WRITE_COMBINE)) -+ return; -+ -+#if (defined(__i386) || defined(__x86_64)) -+ -+#if defined (X86_FEATURE_PAT) -+ -+#ifndef boot_cpu_has -+# define boot_cpu_has(bit) test_bit(bit, boot_cpu_data.x86_capability) -+#endif -+ -+ /* Try to utilise PAT entries which already exist */ -+ if (boot_cpu_has (X86_FEATURE_PAT)) -+ { -+ unsigned int val0, val1, i; -+ int slot = -1; -+ -+ /* Read the IA32CR_PAT MSR register and see if a slot is -+ * set for write-combinig. Note we assume that all CPUs -+ * are configured the same like they're supposed to. */ -+ rdmsr (0x277, val0, val1); -+ -+ /* Check for PAT write combining entry (value 0x01) */ -+ for (i = 0; i < 4; i++, val0 >>= 8) -+ if ((val0 & 0xff) == 0x01) -+ slot = i; -+ for (i = 4; i < 8; i++, val1 >>= 8) -+ if ((val1 & 0xff) == 0x01) -+ slot = i; -+ -+ if (slot >= 0) -+ { -+ printk ("elan%d: using PAT for write combining (slot %d)\n", dev->dev_instance, slot); -+ -+ pat_pteval = ((slot & 4) ? _PAGE_PSE : 0) | ((slot & 2) ? _PAGE_PCD : 0) | ((slot & 1) ? _PAGE_PWT : 0); -+ return; -+ } -+ } -+#endif -+ -+#ifdef CONFIG_MTRR -+ /* try and initialise the MTRR registers to enable write-combining */ -+ dev->dev_osdep.sdram_mtrr = mtrr_add (pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM), -+ pci_resource_len (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM), -+ MTRR_TYPE_WRCOMB, 1); -+ if (dev->dev_osdep.sdram_mtrr < 0) -+ printk ("elan%d: cannot configure MTRR for sdram\n", dev->dev_instance); -+ -+ if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVB) -+ { -+ unsigned int cqreorder = dev->dev_cqcount >> 1; -+ unsigned int cqcount = dev->dev_cqcount - cqreorder; -+ -+ dev->dev_osdep.regs_mtrr = mtrr_add (pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_REGISTERS) + -+ (dev->dev_cqoffset + cqreorder) * CQ_CommandMappingSize, -+ CQ_CommandMappingSize * cqcount, -+ MTRR_TYPE_WRCOMB, 1); -+ -+ if (dev->dev_osdep.regs_mtrr < 0) -+ printk ("elan%d: cannot configure MTRR for command ports\n", dev->dev_instance); -+ else -+ { -+ dev->dev_cqreorder = cqreorder; -+ return; -+ } -+ } -+#endif -+ -+ /* Set flag so that userspace knows write-combining is disabled */ -+ dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] |= ELAN4_FEATURE_NO_WRITE_COMBINE; -+#endif -+ -+} -+ -+void -+elan4_unconfigure_writecombining (ELAN4_DEV *dev) -+{ -+#if defined (X86_FEATURE_PAT) -+ if (pat_pteval != -1) return; -+#endif -+ -+#ifdef CONFIG_MTRR -+ if (dev->dev_osdep.sdram_mtrr >=0 ) -+ mtrr_del (dev->dev_osdep.sdram_mtrr, pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM), -+ pci_resource_len (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM)); -+ -+ if (dev->dev_cqreorder && dev->dev_osdep.regs_mtrr >= 0) -+ mtrr_del (dev->dev_osdep.regs_mtrr, -+ pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_REGISTERS) + -+ (dev->dev_cqoffset + dev->dev_cqreorder) * CQ_CommandMappingSize, -+ CQ_CommandMappingSize * (dev->dev_cqcount >> 1)); -+#endif -+} -+ -+EXPORT_SYMBOL(elan4_reference_device); -+EXPORT_SYMBOL(elan4_dereference_device); -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/elan4/i2c.c linux-2.6.9/drivers/net/qsnet/elan4/i2c.c ---- clean/drivers/net/qsnet/elan4/i2c.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan4/i2c.c 2004-01-07 08:37:45.000000000 -0500 -@@ -0,0 +1,248 @@ -+/* -+ * Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: i2c.c,v 1.4 2004/01/07 13:37:45 jon Exp $" -+/* $Source: /cvs/master/quadrics/elan4mod/i2c.c,v $*/ -+#include -+ -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+#include -+ -+#define I2C_POLL_LIMIT 8 -+ -+static int -+i2c_poll_busy (ELAN4_DEV *dev) -+{ -+ int t = 100; -+ int loop = 0; -+ volatile unsigned char val; -+ -+ /* wait for any led I2C operation to finish */ -+ while (((val = read_i2c (dev, I2cPortControl)) & I2cCntl_I2cPortBusy) && loop++ < I2C_POLL_LIMIT) -+ { -+ DELAY (t); -+ -+ if (t < 500000) -+ t <<= 1; -+ } -+ if (loop >= I2C_POLL_LIMIT) -+ { -+ printk ("elan%d: I2c has timed out waiting for I2cPortBusy to clear!\n", dev->dev_instance); -+ printk ("elan%d: I2cPortControl=%x I2cLedBase=%x I2cStatus=%x\n", -+ dev->dev_instance, val, read_i2c (dev, I2cLedBase), read_i2c (dev, I2cStatus)); -+ } -+ -+ return val; -+} -+ -+static int -+i2c_poll_stopped (ELAN4_DEV *dev) -+{ -+ int t = 100; -+ int loop = 0; -+ unsigned char val=0, newval; -+ -+ /* wait for any led I2C operation to finish. Must see it stopped at least twice */ -+ while (!(((newval = read_i2c (dev, I2cPortControl)) & I2cCntl_I2cStopped) && -+ (val & I2cCntl_I2cStopped)) && -+ (loop++ < I2C_POLL_LIMIT)) -+ { -+ DELAY (t); -+ -+ if (t < 500000) -+ t <<= 1; -+ val = newval; -+ } -+ -+ return val; -+} -+ -+int -+i2c_disable_auto_led_update (ELAN4_DEV *dev) -+{ -+ spin_lock (&dev->dev_i2c_lock); -+ -+ if (dev->dev_i2c_led_disabled++ == 0) -+ { -+ write_i2c (dev, I2cLedBase, read_i2c (dev, I2cLedBase) & ~I2cCntl_I2cUpdatingLedReg); -+ -+ if (! (i2c_poll_stopped (dev) & I2cCntl_I2cStopped)) -+ { -+ write_i2c (dev, I2cLedBase, read_i2c (dev, I2cLedBase) | I2cCntl_I2cUpdatingLedReg); -+ -+ spin_unlock (&dev->dev_i2c_lock); -+ -+ return -EAGAIN; -+ } -+ -+ write_i2c (dev, I2cStatus, read_i2c (dev, I2cStatus) & ~I2cCntl_SampleNewLedValues); -+ } -+ -+ spin_unlock (&dev->dev_i2c_lock); -+ -+ return 0; -+} -+ -+void -+i2c_enable_auto_led_update (ELAN4_DEV *dev) -+{ -+ spin_lock (&dev->dev_i2c_lock); -+ if (--dev->dev_i2c_led_disabled == 0) -+ { -+ write_i2c (dev, I2cLedBase, read_i2c (dev, I2cLedBase) | I2cCntl_I2cUpdatingLedReg); -+ write_i2c (dev, I2cStatus, read_i2c (dev, I2cStatus) | I2cCntl_SampleNewLedValues); -+ } -+ -+ spin_unlock (&dev->dev_i2c_lock); -+} -+ -+int -+i2c_write (ELAN4_DEV *dev, unsigned int address, unsigned int count, unsigned char *data) -+{ -+ int i; -+ -+ if (! (i2c_poll_busy (dev) & I2cCntl_I2cStopped)) -+ return -EAGAIN; -+ -+ write_i2c (dev, I2cWrData, I2C_WRITE_ADDR(address)); -+ write_i2c (dev, I2cPortControl, I2cCntl_I2cPortWrite); -+ -+ if (i2c_poll_busy (dev) & I2cCntl_I2cPortAccFailed) -+ return -ENXIO; -+ -+ for (i = 0; i < count; i++) -+ { -+ write_i2c (dev, I2cWrData, data[i]); -+ write_i2c (dev, I2cPortControl, I2cCntl_I2cPortWrite | (i == (count-1) ? I2cCntl_I2cPortGenStopBit : 0)); -+ } -+ -+ return 0; -+} -+ -+int -+i2c_read (ELAN4_DEV *dev, unsigned int address, unsigned int count, unsigned char *data) -+{ -+ int i; -+ -+ if (! (i2c_poll_busy (dev) & I2cCntl_I2cStopped)) -+ return -EAGAIN; /* not idle */ -+ -+ write_i2c (dev, I2cWrData, I2C_READ_ADDR(address)); -+ write_i2c (dev, I2cPortControl, I2cCntl_I2cPortWrite); -+ -+ if (i2c_poll_busy (dev) & I2cCntl_I2cPortAccFailed) -+ return -ENXIO; -+ -+ for (i = 0; i < count; i++) -+ { -+ write_i2c (dev, I2cWrData, 0xff); -+ write_i2c (dev, I2cPortControl, I2cCntl_I2cPortRead | ((i == count-1) ? I2cCntl_I2cPortGenStopBit : 0)); -+ -+ i2c_poll_busy (dev); -+ -+ data[i] = read_i2c (dev, I2cRdData); -+ } -+ -+ return 0; -+} -+ -+int -+i2c_writereg (ELAN4_DEV *dev, unsigned int address, unsigned int reg, unsigned int count, unsigned char *data) -+{ -+ int i; -+ -+ if (! (i2c_poll_busy (dev) & I2cCntl_I2cStopped)) -+ return -EAGAIN; /* not idle */ -+ -+ write_i2c (dev, I2cWrData, I2C_WRITE_ADDR(address)); -+ write_i2c (dev, I2cPortControl, I2cCntl_I2cPortWrite); -+ -+ if (i2c_poll_busy (dev) & I2cCntl_I2cPortAccFailed) -+ return -ENXIO; -+ -+ write_i2c (dev, I2cWrData, reg); -+ write_i2c (dev, I2cPortControl, I2cCntl_I2cPortWrite); -+ -+ if (i2c_poll_busy (dev) & I2cCntl_I2cPortAccFailed) -+ return -ENXIO; -+ -+ for (i = 0; i < count; i++) -+ { -+ write_i2c (dev, I2cWrData, data[i]); -+ write_i2c (dev, I2cPortControl, I2cCntl_I2cPortWrite | ((i == count-1) ? I2cCntl_I2cPortGenStopBit : 0)); -+ -+ if (i2c_poll_busy (dev) & I2cCntl_I2cPortAccFailed) -+ printk (" i2c_writereg: off %d failed\n", i); -+ } -+ -+ return 0; -+} -+ -+int -+i2c_readreg (ELAN4_DEV *dev, unsigned int address, unsigned int reg, unsigned int count, unsigned char *data) -+{ -+ if (! (i2c_poll_busy (dev) & I2cCntl_I2cStopped)) -+ return -EAGAIN; /* not idle */ -+ -+ write_i2c (dev, I2cWrData, I2C_WRITE_ADDR(address)); -+ write_i2c (dev, I2cPortControl, I2cCntl_I2cPortWrite); -+ -+ if (i2c_poll_busy (dev) & I2cCntl_I2cPortAccFailed) -+ return -ENXIO; -+ -+ write_i2c (dev, I2cWrData, reg); -+ write_i2c (dev, I2cPortControl, I2cCntl_I2cPortWrite | I2cCntl_I2cPortGenStopBit); -+ -+ if (i2c_poll_busy (dev) & I2cCntl_I2cPortAccFailed) -+ return -ENXIO; -+ -+ return i2c_read (dev, address, count, data); -+} -+ -+int -+i2c_read_rom (ELAN4_DEV *dev, unsigned int addr, unsigned int len, unsigned char *data) -+{ -+ unsigned int top = addr + len; -+ int res; -+ -+ if ((res = i2c_disable_auto_led_update (dev)) == 0) -+ { -+ /* read the rom in chunks that don't span the block boundary */ -+ while (addr < top) -+ { -+ unsigned int thisnob = top - addr; -+ unsigned int blocknob = I2C_24LC16B_BLOCKSIZE - I2C_24LC16B_BLOCKOFFSET(addr); -+ -+ if (thisnob > blocknob) -+ thisnob = blocknob; -+ -+ if ((res = i2c_readreg (dev, I2C_EEPROM_ADDR + I2C_24LC16B_BLOCKADDR(addr), -+ I2C_24LC16B_BLOCKOFFSET(addr), thisnob, data)) < 0) -+ break; -+ -+ addr += thisnob; -+ data += thisnob; -+ } -+ -+ i2c_enable_auto_led_update (dev); -+ } -+ return res; -+} -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/elan4/intcookie.c linux-2.6.9/drivers/net/qsnet/elan4/intcookie.c ---- clean/drivers/net/qsnet/elan4/intcookie.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan4/intcookie.c 2005-02-03 11:24:44.000000000 -0500 -@@ -0,0 +1,371 @@ -+/* -+ * Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: intcookie.c,v 1.15 2005/02/03 16:24:44 addy Exp $" -+/* $Source: /cvs/master/quadrics/elan4mod/intcookie.c,v $*/ -+ -+#include -+ -+#include -+#include -+#include -+#include -+ -+static INTCOOKIE_TABLE *intcookie_tables; -+static spinlock_t intcookie_table_lock; -+ -+/* -+ * intcookie_drop_entry: -+ * drop the reference to a cookie held -+ * by the cookie table -+ */ -+static void -+intcookie_drop_entry (INTCOOKIE_ENTRY *ent) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave (&ent->ent_lock, flags); -+ if (--ent->ent_ref != 0) -+ { -+ ent->ent_fired = ent->ent_cookie; -+ kcondvar_wakeupall (&ent->ent_wait, &ent->ent_lock); -+ -+ spin_unlock_irqrestore (&ent->ent_lock, flags); -+ } -+ else -+ { -+ spin_unlock_irqrestore (&ent->ent_lock, flags); -+ -+ spin_lock_destroy (&ent->ent_lock); -+ kcondvar_destroy (&ent->ent_wait); -+ -+ KMEM_FREE (ent, sizeof (INTCOOKIE_ENTRY)); -+ } -+} -+ -+void -+intcookie_init() -+{ -+ spin_lock_init (&intcookie_table_lock); -+} -+ -+void -+intcookie_fini() -+{ -+ spin_lock_destroy (&intcookie_table_lock); -+} -+ -+INTCOOKIE_TABLE * -+intcookie_alloc_table (ELAN_CAPABILITY *cap) -+{ -+ INTCOOKIE_TABLE *tbl, *ntbl; -+ ELAN_CAPABILITY *ncap; -+ -+ KMEM_ZALLOC (ntbl, INTCOOKIE_TABLE *, sizeof (INTCOOKIE_TABLE), 1); -+ -+ if (ntbl == NULL) -+ return (NULL); -+ -+ KMEM_ALLOC (ncap, ELAN_CAPABILITY *, ELAN_CAP_SIZE(cap), 1); -+ -+ if (ncap == NULL) -+ { -+ KMEM_FREE (ntbl, sizeof (INTCOOKIE_TABLE)); -+ return (NULL); -+ } -+ -+ spin_lock (&intcookie_table_lock); -+ -+ for (tbl = intcookie_tables; tbl; tbl = tbl->tbl_next) -+ if (ELAN_CAP_MATCH (tbl->tbl_cap, cap) && tbl->tbl_cap->cap_mycontext == cap->cap_mycontext) -+ break; -+ -+ if (tbl != NULL) -+ tbl->tbl_ref++; -+ else -+ { -+ spin_lock_init (&ntbl->tbl_lock); -+ -+ ntbl->tbl_cap = ncap; -+ ntbl->tbl_ref = 1; -+ ntbl->tbl_entries = NULL; -+ -+ /* Save supplied cap */ -+ memcpy (ncap, cap, ELAN_CAP_SIZE(cap)); -+ -+ if ((ntbl->tbl_next = intcookie_tables) != NULL) -+ intcookie_tables->tbl_prev = ntbl; -+ intcookie_tables = ntbl; -+ ntbl->tbl_prev = NULL; -+ } -+ spin_unlock (&intcookie_table_lock); -+ -+ if (tbl == NULL) -+ return (ntbl); -+ else -+ { -+ KMEM_FREE (ntbl, sizeof (INTCOOKIE_TABLE)); -+ KMEM_FREE (ncap, ELAN_CAP_SIZE(cap)); -+ return (tbl); -+ } -+} -+ -+void -+intcookie_free_table (INTCOOKIE_TABLE *tbl) -+{ -+ INTCOOKIE_ENTRY *ent; -+ -+ spin_lock (&intcookie_table_lock); -+ if (tbl->tbl_ref > 1) -+ { -+ tbl->tbl_ref--; -+ spin_unlock (&intcookie_table_lock); -+ return; -+ } -+ -+ if (tbl->tbl_prev) -+ tbl->tbl_prev->tbl_next = tbl->tbl_next; -+ else -+ intcookie_tables = tbl->tbl_next; -+ if (tbl->tbl_next) -+ tbl->tbl_next->tbl_prev = tbl->tbl_prev; -+ -+ spin_unlock (&intcookie_table_lock); -+ -+ /* NOTE - table no longer visible to other threads -+ * no need to aquire tbl_lock */ -+ while ((ent = tbl->tbl_entries) != NULL) -+ { -+ if ((tbl->tbl_entries = ent->ent_next) != NULL) -+ ent->ent_next->ent_prev = NULL; -+ -+ intcookie_drop_entry (ent); -+ } -+ spin_lock_destroy (&tbl->tbl_lock); -+ -+ KMEM_FREE (tbl->tbl_cap, ELAN_CAP_SIZE(tbl->tbl_cap)); -+ KMEM_FREE (tbl, sizeof (INTCOOKIE_TABLE)); -+} -+ -+int -+intcookie_alloc (INTCOOKIE_TABLE *tbl, ELAN4_INTCOOKIE cookie) -+{ -+ INTCOOKIE_ENTRY *ent, *nent; -+ unsigned long flags; -+ -+ KMEM_ZALLOC (nent, INTCOOKIE_ENTRY *, sizeof (INTCOOKIE_ENTRY), 1); -+ -+ if (nent == NULL) -+ return (-ENOMEM); -+ -+ spin_lock_irqsave (&tbl->tbl_lock, flags); -+ for (ent = tbl->tbl_entries; ent; ent = ent->ent_next) -+ if (ent->ent_cookie == cookie) -+ break; -+ -+ if (ent == NULL) -+ { -+ kcondvar_init (&nent->ent_wait); -+ spin_lock_init (&nent->ent_lock); -+ -+ nent->ent_ref = 1; -+ nent->ent_cookie = cookie; -+ -+ if ((nent->ent_next = tbl->tbl_entries) != NULL) -+ tbl->tbl_entries->ent_prev = nent; -+ tbl->tbl_entries = nent; -+ nent->ent_prev = NULL; -+ } -+ spin_unlock_irqrestore (&tbl->tbl_lock, flags); -+ -+ if (ent == NULL) -+ return (0); -+ else -+ { -+ KMEM_FREE (nent, sizeof (INTCOOKIE_ENTRY)); -+ return (-EINVAL); -+ } -+} -+ -+int -+intcookie_free (INTCOOKIE_TABLE *tbl, ELAN4_INTCOOKIE cookie) -+{ -+ INTCOOKIE_ENTRY *ent; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&tbl->tbl_lock, flags); -+ for (ent = tbl->tbl_entries; ent; ent = ent->ent_next) -+ if (ent->ent_cookie == cookie) -+ break; -+ -+ if (ent == NULL) -+ { -+ spin_unlock_irqrestore (&tbl->tbl_lock, flags); -+ return (-EINVAL); -+ } -+ -+ if (ent->ent_prev == NULL) -+ tbl->tbl_entries = ent->ent_next; -+ else -+ ent->ent_prev->ent_next = ent->ent_next; -+ -+ if (ent->ent_next != NULL) -+ ent->ent_next->ent_prev = ent->ent_prev; -+ -+ spin_unlock_irqrestore (&tbl->tbl_lock, flags); -+ -+ intcookie_drop_entry (ent); -+ -+ return (0); -+} -+ -+/* -+ * intcookie_fire_cookie: -+ * fire the cookie - this is called from the event interrupt. -+ */ -+int -+intcookie_fire (INTCOOKIE_TABLE *tbl, ELAN4_INTCOOKIE cookie) -+{ -+ INTCOOKIE_ENTRY *ent; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&tbl->tbl_lock, flags); -+ for (ent = tbl->tbl_entries; ent; ent = ent->ent_next) -+ if (ent->ent_cookie == cookie) -+ break; -+ -+ if (ent == NULL) -+ { -+ spin_unlock_irqrestore (&tbl->tbl_lock, flags); -+ return (-EINVAL); -+ } -+ -+ spin_lock (&ent->ent_lock); -+ ent->ent_fired = cookie; -+ kcondvar_wakeupall (&ent->ent_wait, &ent->ent_lock); -+ spin_unlock (&ent->ent_lock); -+ -+ spin_unlock_irqrestore (&tbl->tbl_lock, flags); -+ -+ return (0); -+} -+ -+int -+intcookie_fire_cap (ELAN_CAPABILITY *cap, ELAN4_INTCOOKIE cookie) -+{ -+ int res; -+ INTCOOKIE_TABLE *tbl; -+ -+ spin_lock (&intcookie_table_lock); -+ -+ for (tbl = intcookie_tables; tbl; tbl = tbl->tbl_next) -+ if (ELAN_CAP_MATCH (tbl->tbl_cap, cap) && tbl->tbl_cap->cap_mycontext == cap->cap_mycontext) -+ break; -+ -+ if (tbl != NULL) -+ tbl->tbl_ref++; -+ -+ spin_unlock (&intcookie_table_lock); -+ -+ /* No matching table found */ -+ if (tbl == NULL) -+ return (-EINVAL); -+ -+ /* Fire the correct cookie */ -+ res = intcookie_fire (tbl, cookie); -+ -+ /* Decrement reference count (and free if necessary) */ -+ intcookie_free_table (tbl); -+ -+ return (res); -+} -+ -+/* -+ * intcookie_wait_cookie: -+ * deschedule on a cookie if it has not already fired. -+ * note - if the cookie is removed from the table, then -+ * we free it off when we're woken up. -+ */ -+int -+intcookie_wait (INTCOOKIE_TABLE *tbl, ELAN4_INTCOOKIE cookie) -+{ -+ INTCOOKIE_ENTRY *ent; -+ unsigned long flags; -+ int res; -+ -+ spin_lock_irqsave (&tbl->tbl_lock, flags); -+ for (ent = tbl->tbl_entries; ent; ent = ent->ent_next) -+ if (ent->ent_cookie == cookie) -+ break; -+ -+ if (ent == NULL) -+ { -+ spin_unlock_irqrestore (&tbl->tbl_lock, flags); -+ return (-EINVAL); -+ } -+ -+ spin_lock (&ent->ent_lock); -+ spin_unlock (&tbl->tbl_lock); -+ -+ if (ent->ent_fired != 0) -+ { -+ spin_unlock_irqrestore (&ent->ent_lock, flags); -+ return (0); -+ } -+ -+ ent->ent_ref++; -+ kcondvar_waitsig (&ent->ent_wait, &ent->ent_lock, &flags); -+ -+ res = ent->ent_fired ? 0 : -EINTR; -+ -+ if (--ent->ent_ref > 0) -+ spin_unlock_irqrestore (&ent->ent_lock, flags); -+ else -+ { -+ spin_unlock_irqrestore (&ent->ent_lock, flags); -+ -+ spin_lock_destroy (&ent->ent_lock); -+ kcondvar_destroy (&ent->ent_wait); -+ -+ KMEM_FREE (ent, sizeof (INTCOOKIE_ENTRY)); -+ } -+ -+ return (res); -+} -+ -+int -+intcookie_arm (INTCOOKIE_TABLE *tbl, ELAN4_INTCOOKIE cookie) -+{ -+ INTCOOKIE_ENTRY *ent; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&tbl->tbl_lock, flags); -+ for (ent = tbl->tbl_entries; ent; ent = ent->ent_next) -+ if (ent->ent_cookie == cookie) -+ break; -+ -+ if (ent == NULL) -+ { -+ spin_unlock_irqrestore (&tbl->tbl_lock, flags); -+ return (-EINVAL); -+ } -+ -+ spin_lock (&ent->ent_lock); -+ ent->ent_fired = 0; -+ spin_unlock (&ent->ent_lock); -+ -+ spin_unlock_irqrestore (&tbl->tbl_lock, flags); -+ -+ return (0); -+} -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/elan4/Makefile linux-2.6.9/drivers/net/qsnet/elan4/Makefile ---- clean/drivers/net/qsnet/elan4/Makefile 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan4/Makefile 2005-10-10 17:47:30.000000000 -0400 -@@ -0,0 +1,15 @@ -+# -+# Makefile for Quadrics QsNet -+# -+# Copyright (c) 2002-2004 Quadrics Ltd -+# -+# File: drivers/net/qsnet/elan4/Makefile -+# -+ -+ -+# -+ -+obj-$(CONFIG_ELAN4) += elan4.o -+elan4-objs := device.o i2c.o mmu.o sdram.o debug.o routetable.o trap.o user.o user_ddcq.o regions.o intcookie.o neterr.o device_Linux.o user_Linux.o procfs_Linux.o mmu_Linux.o -+ -+EXTRA_CFLAGS += -DDEBUG -DDEBUG_PRINTF -DDEBUG_ASSERT -diff -urN clean/drivers/net/qsnet/elan4/Makefile.conf linux-2.6.9/drivers/net/qsnet/elan4/Makefile.conf ---- clean/drivers/net/qsnet/elan4/Makefile.conf 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan4/Makefile.conf 2005-09-07 10:39:42.000000000 -0400 -@@ -0,0 +1,10 @@ -+# Flags for generating QsNet Linux Kernel Makefiles -+MODNAME = elan4.o -+MODULENAME = elan4 -+KOBJFILES = device.o i2c.o mmu.o sdram.o debug.o routetable.o trap.o user.o user_ddcq.o regions.o intcookie.o neterr.o device_Linux.o user_Linux.o procfs_Linux.o mmu_Linux.o -+EXPORT_KOBJS = device.o device_Linux.o mmu.o mmu_Linux.o procfs_Linux.o routetable.o sdram.o trap.o -+CONFIG_NAME = CONFIG_ELAN4 -+SGALFC = -+# EXTRALINES START -+ -+# EXTRALINES END -diff -urN clean/drivers/net/qsnet/elan4/mmu.c linux-2.6.9/drivers/net/qsnet/elan4/mmu.c ---- clean/drivers/net/qsnet/elan4/mmu.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan4/mmu.c 2005-07-14 09:34:12.000000000 -0400 -@@ -0,0 +1,1552 @@ -+/* -+ * Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: mmu.c,v 1.47.2.3 2005/07/14 13:34:12 david Exp $" -+/* $Source: /cvs/master/quadrics/elan4mod/mmu.c,v $*/ -+ -+#include -+#include -+ -+#include -+#include -+#include -+ -+int elan4_debug_mmu; -+int elan4_mmuhash_chain_reduction = 1; -+int elan4_mmuhash_chain_end_reduce = 0; -+int elan4_mmuhash_chain_middle_reduce = 0; -+int elan4_mmuhash_chain_middle_fail = 0; -+int elan4_mmuhash_shuffle_attempts = 0; -+int elan4_mmuhash_shuffle_done = 0; -+ -+/* Permission table - see ELAN4 MMU documentation */ -+u_char elan4_permtable[] = -+{ -+ 0x00, /* 0x000000 - Disable */ -+ 0x00, /* 0x000000 - Unused */ -+ 0x01, /* 0x000001 - Local Data Read */ -+ 0x03, /* 0x000011 - Local Data Write */ -+ 0x11, /* 0x010001 - Local Read */ -+ 0x10, /* 0x010000 - Local Execute */ -+ 0x05, /* 0x000101 - Read Only */ -+ 0x13, /* 0x010011 - Local Write */ -+ 0x20, /* 0x100000 - Local Event Access */ -+ 0x23, /* 0x100011 - Local Event Write Ac */ -+ 0xa3, /* 1x100011 - Remote Ev Loc Write */ -+ 0xaf, /* 1x101111 - Remote All */ -+ 0x07, /* 0x000111 - Remote Read Only */ -+ 0x0d, /* 0x001101 - Remote Write Only */ -+ 0x0f, /* 0x001111 - Remote Read/Write */ -+ 0xbf, /* 1x111111 - No Fault */ -+}; -+ -+u_char elan4_permreadonly[] = -+{ -+ PERM_Disabled, /* PERM_Disabled */ -+ PERM_Disabled, /* PERM_Unused */ -+ PERM_LocDataRead, /* PERM_LocDataRead */ -+ PERM_LocDataRead, /* PERM_LocDataWrite */ -+ PERM_LocRead, /* PERM_LocRead */ -+ PERM_LocExecute, /* PERM_LocExecute */ -+ PERM_ReadOnly, /* PERM_ReadOnly */ -+ PERM_LocRead, /* PERM_LocWrite */ -+ PERM_LocEventOnly, /* PERM_LocEventOnly */ -+ PERM_LocDataRead, /* PERM_LocEventWrite */ -+ PERM_LocDataRead, /* PERM_RemoteEvent */ -+ PERM_ReadOnly, /* PERM_RemoteAll */ -+ PERM_RemoteReadOnly, /* PERM_RemoteReadOnly */ -+ PERM_ReadOnly, /* PERM_RemoteWriteLocRead */ -+ PERM_ReadOnly, /* PERM_DataReadWrite */ -+ PERM_ReadOnly, /* PERM_NoFault */ -+}; -+ -+static void -+elan4mmu_synctag (ELAN4_DEV *dev, ELAN4_HASH_ENTRY *he, int tagidx) -+{ -+ E4_uint64 value = (he->he_tag[tagidx] & HE_TAG_VALID) ? he->he_tag[tagidx] & (TAG_ADDRESS_MASK | TAG_CONTEXT_MASK) : INVALID_CONTEXT; -+ -+ if (he->he_next) -+ value |= ((tagidx == 0) ? -+ ((he->he_next->he_entry >> TAG_CHAINPTR_HIGH_SHIFT) & TAG_CHAINPTR_30TO19_MASK) : -+ ((he->he_next->he_entry << TAG_CHAINPTR_LOW_SHIFT) & TAG_CHAINPTR_18TO6_MASK)); -+ else if (tagidx == 0) -+ value |= TAG_CHAINPTR_30TO19_MASK; -+ -+ MPRINTF (DBG_DEVICE, 4, "elan4mmu_synctag: he=%p tagidx=%d he->he_tag=%llx -> value=%llx\n", he, tagidx, he->he_tag[tagidx], value); -+ -+ elan4_sdram_writeq (dev, he->he_entry + E4MMU_TAG_OFFSET(tagidx), value); -+} -+ -+static void -+elan4mmu_chain_hents (ELAN4_DEV *dev, ELAN4_HASH_ENTRY *phe, ELAN4_HASH_ENTRY *he) -+{ -+ ASSERT ((elan4_sdram_readq (dev, phe->he_entry + E4MMU_TAG_OFFSET(0)) & TAG_CHAINPTR_30TO19_MASK) == TAG_CHAINPTR_30TO19_MASK); -+ -+ elan4_sdram_writeq (dev, phe->he_entry + E4MMU_TAG_OFFSET(1), -+ ((phe->he_tag[1] & (TAG_ADDRESS_MASK | TAG_CONTEXT_MASK)) | ((he->he_entry << TAG_CHAINPTR_LOW_SHIFT) & TAG_CHAINPTR_18TO6_MASK))); -+ elan4_sdram_writeq (dev, phe->he_entry + E4MMU_TAG_OFFSET(0), -+ ((phe->he_tag[0] & (TAG_ADDRESS_MASK | TAG_CONTEXT_MASK)) | ((he->he_entry >> TAG_CHAINPTR_HIGH_SHIFT) & TAG_CHAINPTR_30TO19_MASK))); -+} -+ -+static void -+elan4mmu_writepte (ELAN4_DEV *dev, ELAN4_HASH_ENTRY *he, int tagidx, int pteidx, E4_uint64 value) -+{ -+ /* -+ * NOTE - we can only change a valid PTE if we're upgrading it's permissions, -+ * any other changes should have invalidated it first. */ -+ -+ MPRINTF (DBG_DEVICE, 4, "elan4mmu_writepte: he=%p tagidx=%d pteidx=%x value=%llx\n", he, tagidx, pteidx, (unsigned long long) value); -+ -+ if (pteidx == 3) -+ { -+ elan4_sdram_writew (dev, he->he_entry + E4MMU_PTE3_WORD1_OFFSET(tagidx), (value >> 16) & 0xFFFF); -+ elan4_sdram_writew (dev, he->he_entry + E4MMU_PTE3_WORD2_OFFSET(tagidx), (value >> 32) & 0xFFFF); -+ elan4_sdram_writew (dev, he->he_entry + E4MMU_PTE3_WORD0_OFFSET(tagidx), (value >> 0) & 0xFFFF); -+ } -+ else -+ { -+ elan4_sdram_writew (dev, he->he_entry + E4MMU_PTE_HIGH_OFFSET(tagidx, pteidx), (value >> 32) & 0xFFFF); -+ elan4_sdram_writel (dev, he->he_entry + E4MMU_PTE_LOW_OFFSET(tagidx, pteidx), value & 0xFFFFFFFF); -+ } -+} -+ -+static void -+elan4mmu_invalidatepte (ELAN4_DEV *dev, ELAN4_HASH_ENTRY *he, int tagidx, int pteidx) -+{ -+ if (pteidx == 3) -+ elan4_sdram_writeb (dev, he->he_entry + E4MMU_PTE3_WORD0_OFFSET(tagidx), PTE_SetPerm (PERM_Disabled)); -+ else -+ elan4_sdram_writeb (dev, he->he_entry + E4MMU_PTE_LOW_OFFSET(tagidx, pteidx), PTE_SetPerm (PERM_Disabled)); -+} -+ -+static E4_uint64 -+elan4mmu_readpte (ELAN4_DEV *dev, ELAN4_HASH_ENTRY *he, int tagidx, int pteidx) -+{ -+ if (pteidx == 3) -+ return (((E4_uint64) elan4_sdram_readw (dev, he->he_entry + E4MMU_PTE3_WORD0_OFFSET(tagidx)) << 0) | -+ ((E4_uint64) elan4_sdram_readw (dev, he->he_entry + E4MMU_PTE3_WORD1_OFFSET(tagidx)) << 16) | -+ ((E4_uint64) elan4_sdram_readw (dev, he->he_entry + E4MMU_PTE3_WORD2_OFFSET(tagidx)) << 32)); -+ else -+ return ((E4_uint64) elan4_sdram_readl (dev, he->he_entry + E4MMU_PTE_LOW_OFFSET(tagidx, pteidx)) | -+ ((E4_uint64) elan4_sdram_readw (dev, he->he_entry + E4MMU_PTE_HIGH_OFFSET(tagidx, pteidx)) << 32)); -+} -+ -+ -+void -+elan4mmu_flush_tlb (ELAN4_DEV *dev) -+{ -+ PULSE_SYSCONTROL (dev, CONT_TLB_FLUSH); -+ -+ while (read_reg64 (dev, SysControlReg) & CONT_TLB_FLUSH) -+ DELAY (1); -+} -+ -+/* -+ * elanmmu_flush_tlb_hash - this flushes the hash copy entries and the elan -+ * tlb. However after the write to the hash copy entry if the elan was -+ * in the process of walking, then it could write the hash copy with a valid -+ * entry which we had just invalidated. However once we've seen the tlb flushed -+ * then if the walk engine had done a write - then we need to invaldate the -+ * hash copy entries again and reflush the tlb. -+ * -+ * If we're invalidating a lot of hash blocks, then the chances are that the -+ * walk engine will perform a write - so we flush the tlb first, then invalidate -+ * the hash copy entries, then flush the tlb again. -+ */ -+static void -+elan4mmu_flush_tlb_hash (ELAN4_DEV *dev, int tbl, unsigned baseidx, unsigned topidx) -+{ -+ int notmany = (abs(topidx - baseidx) < 5) ? 1 : 0; -+ int hashidx; -+ E4_uint32 reg; -+ -+ if (notmany) -+ PULSE_SYSCONTROL (dev, CONT_CLEAR_WALK_WROTE_TABLES); -+ else -+ elan4mmu_flush_tlb(dev); -+ -+ do { -+ for (hashidx = baseidx; hashidx <= topidx; hashidx++) -+ if (dev->dev_mmuhash[tbl][hashidx].he_tag[0] & HE_TAG_COPY) -+ { -+ ASSERT ((dev->dev_mmuhash[tbl][hashidx].he_tag[0] & HE_TAG_VALID) == 0); -+ ASSERT ((dev->dev_mmuhash[tbl][hashidx].he_tag[1] & HE_TAG_VALID) == 0); -+ -+ elan4mmu_synctag (dev, &dev->dev_mmuhash[tbl][hashidx], 0); -+ elan4mmu_synctag (dev, &dev->dev_mmuhash[tbl][hashidx], 1); -+ } -+ -+ PULSE_SYSCONTROL (dev, CONT_TLB_FLUSH); -+ -+ while ((reg = read_reg64 (dev, SysControlReg)) & CONT_TLB_FLUSH) -+ DELAY (1); -+ -+ } while (notmany-- && (reg & CONT_CLEAR_WALK_WROTE_TABLES) != 0); -+} -+ -+void -+elan4mmu_display_hent (ELAN4_DEV *dev, ELAN4_HASH_ENTRY *he, int hashidx) -+{ -+ int tagidx; -+ -+ elan4_debugf (DBG_DEVICE, DBG_MMU, "elan4mmu_display_hent: hashidx=%d he=%p entry at %lx\n", hashidx, he, he->he_entry); -+ elan4_debugf (DBG_DEVICE, DBG_MMU, " next=%p prev=%p chain=%p,%p\n", he->he_next, he->he_prev, he->he_chain[0], he->he_chain[1]); -+ for (tagidx = 0; tagidx < 2; tagidx++) -+ { -+ E4_uint64 tag = elan4_sdram_readq (dev, he->he_entry + E4MMU_TAG_OFFSET(tagidx)); -+ E4_uint64 pte0 = elan4_sdram_readq (dev, he->he_entry + E4MMU_PTE_LOW_OFFSET(tagidx, 0)); -+ E4_uint64 pte1 = elan4_sdram_readq (dev, he->he_entry + E4MMU_PTE_LOW_OFFSET(tagidx, 1)); -+ E4_uint64 pte2 = elan4_sdram_readq (dev, he->he_entry + E4MMU_PTE_LOW_OFFSET(tagidx, 2)); -+ E4_uint64 pte3 = ((pte0 >> 48) | (pte1 >> 32) | (pte2 >> 16)); -+ -+ elan4_debugf (DBG_DEVICE, DBG_MMU, " Tag %d (%llx,%08x) context=%04x vaddr=%llx\n", tagidx, he->he_tag[tagidx], he->he_pte[tagidx], (int) (tag & TAG_CONTEXT_MASK), (tag & TAG_ADDRESS_MASK)); -+ elan4_debugf (DBG_DEVICE, DBG_MMU, " Pte 0 - PPN=%llx PERM=%x TYPE=%x%s%s\n", (pte0 & PTE_PPN_MASK) >> PTE_PPN_SHIFT, -+ (int) (pte0 & PTE_PERM_MASK) >> PTE_PERM_SHIFT, (int)(pte0 & PTE_TYPE_MASK), (pte0 & PTE_MOD_MASK) ? " mod" : "", (pte0 & PTE_REF_MASK) ? " ref" : ""); -+ elan4_debugf (DBG_DEVICE, DBG_MMU, " Pte 1 - PPN=%llx PERM=%x TYPE=%x%s%s\n", (pte1 & PTE_PPN_MASK) >> PTE_PPN_SHIFT, -+ (int) (pte1 & PTE_PERM_MASK) >> PTE_PERM_SHIFT, (int)(pte1 & PTE_TYPE_MASK), (pte1 & PTE_MOD_MASK) ? " mod" : "", (pte1 & PTE_REF_MASK) ? " ref" : ""); -+ elan4_debugf (DBG_DEVICE, DBG_MMU, " Pte 2 - PPN=%llx PERM=%x TYPE=%x%s%s\n", (pte2 & PTE_PPN_MASK) >> PTE_PPN_SHIFT, -+ (int) (pte2 & PTE_PERM_MASK) >> PTE_PERM_SHIFT, (int)(pte2 & PTE_TYPE_MASK), (pte2 & PTE_MOD_MASK) ? " mod" : "", (pte2 & PTE_REF_MASK) ? " ref" : ""); -+ elan4_debugf (DBG_DEVICE, DBG_MMU, " Pte 3 - PPN=%llx PERM=%x TYPE=%x%s%s\n", (pte3 & PTE_PPN_MASK) >> PTE_PPN_SHIFT, -+ (int) (pte3 & PTE_PERM_MASK) >> PTE_PERM_SHIFT, (int)(pte3 & PTE_TYPE_MASK), (pte3 & PTE_MOD_MASK) ? " mod" : "", (pte3 & PTE_REF_MASK) ? " ref" : ""); -+ } -+} -+ -+static __inline__ ELAN4_HASH_ENTRY * -+he_ctxt_next (ELAN4_HASH_ENTRY *he, int ctxnum) -+{ -+ return ((he->he_tag[0] & TAG_CONTEXT_MASK) == ctxnum) ? he->he_chain[0] : he->he_chain[1]; -+} -+ -+static __inline__ ELAN4_HASH_ENTRY * -+he_ctxt_unlink (ELAN4_CTXT *ctxt, int tbl, int hashidx, ELAN4_HASH_ENTRY *prevhe, ELAN4_HASH_ENTRY *he, ELAN4_HASH_ENTRY *next) -+{ -+ /* Check whether either tag is in use by this context */ -+ if ((he->he_tag[0] & TAG_CONTEXT_MASK) == ctxt->ctxt_num || (he->he_tag[1] & TAG_CONTEXT_MASK) == ctxt->ctxt_num) -+ return he; -+ -+ if (prevhe == NULL) -+ ctxt->ctxt_mmuhash[tbl][hashidx] = next; -+ else -+ { -+ /* previous he, ensure that both chain pointers are changed is this ctxt is using both tags */ -+ ASSERT ((prevhe->he_tag[0] & TAG_CONTEXT_MASK) == ctxt->ctxt_num || (prevhe->he_tag[1] & TAG_CONTEXT_MASK) == ctxt->ctxt_num); -+ -+ if ((prevhe->he_tag[0] & TAG_CONTEXT_MASK) == ctxt->ctxt_num) -+ prevhe->he_chain[0] = next; -+ if ((prevhe->he_tag[1] & TAG_CONTEXT_MASK) == ctxt->ctxt_num) -+ prevhe->he_chain[1] = next; -+ } -+ -+ return prevhe; -+} -+ -+void -+elan4mmu_display (ELAN4_CTXT *ctxt, int tbl, const char *tag) -+{ -+ ELAN4_DEV *dev = ctxt->ctxt_dev; -+ ELAN4_HASH_ENTRY *he; -+ int hashidx; -+ -+ for (hashidx = 0; hashidx < dev->dev_hashsize[tbl]; hashidx++) -+ for (he = ctxt->ctxt_mmuhash[tbl][hashidx]; he != NULL; he = he_ctxt_next (he, ctxt->ctxt_num)) -+ { -+ elan4_debugf (DBG_DEVICE, DBG_MMU, "%s: hashidx=%d he=%p tags <%llx,%llx>\n", tag, hashidx, he, -+ (he->he_tag[0] & TAG_CONTEXT_MASK) == ctxt->ctxt_num ? E4MMU_TAG2VADDR (he->he_tag[0], hashidx, dev->dev_pageshift[tbl], dev->dev_hashsize[tbl]-1) : 0, -+ (he->he_tag[1] & TAG_CONTEXT_MASK) == ctxt->ctxt_num ? E4MMU_TAG2VADDR (he->he_tag[1], hashidx, dev->dev_pageshift[tbl], dev->dev_hashsize[tbl]-1) : 0); -+ elan4mmu_display_hent (dev, he, hashidx); -+ } -+} -+static ELAN4_HASH_ENTRY * -+elan4mmu_find_next_free (ELAN4_HASH_ENTRY *he) -+{ -+ /* the current one could be free */ -+ /* return NULL if not free one */ -+ while ( he ) -+ { -+ if ( ((he->he_tag[0] & TAG_CONTEXT_MASK) == INVALID_CONTEXT) || ((he->he_tag[1] & TAG_CONTEXT_MASK) == INVALID_CONTEXT)) -+ return he; -+ he = he->he_next; -+ } -+ return (NULL); -+} -+static ELAN4_HASH_ENTRY * -+elan4mmu_alloc_hent (ELAN4_DEV *dev, int tbl, int hashidx, E4_uint64 newtag, int *tagidx) -+{ -+ ELAN4_HASH_ENTRY *he, *phe; -+ unsigned long flags; -+ int i; -+ -+ spin_lock_irqsave (&dev->dev_mmulock, flags); -+ -+ /* see if there are any partial free blocks */ -+ if ((he = elan4mmu_find_next_free (&dev->dev_mmuhash[tbl][hashidx])) != NULL) -+ { -+ *tagidx = ((he->he_tag[0] & TAG_CONTEXT_MASK) == INVALID_CONTEXT) ? 0 : 1; -+ -+ MPRINTF (DBG_DEVICE, 3, "elan4mmu_alloc_hent: allocate he=%p idx=%d%s\n", he, *tagidx, (he == &dev->dev_mmuhash[tbl][hashidx]) ? " hash-block" : ""); -+ -+ he->he_tag[*tagidx] = newtag | HE_TAG_VALID; -+ -+ elan4mmu_synctag (dev, he, *tagidx); -+ -+ spin_unlock_irqrestore (&dev->dev_mmulock, flags); -+ return (he); -+ } -+ -+ if ((he = dev->dev_mmufreelist) != NULL) -+ dev->dev_mmufreelist = he->he_next; -+ else -+ { -+ ELAN4_HASH_CHUNK *hc; -+ sdramaddr_t entry; -+ -+ KMEM_ALLOC (hc, ELAN4_HASH_CHUNK *, sizeof (ELAN4_HASH_CHUNK), 0); -+ -+ if (hc == NULL) -+ { -+ spin_unlock_irqrestore (&dev->dev_mmulock, flags); -+ return ((ELAN4_HASH_ENTRY *) NULL); -+ } -+ -+ if ((entry = elan4_sdram_alloc (dev, sizeof (E4_HashTableEntry) * ELAN4_HENT_CHUNKS)) == (sdramaddr_t) 0) -+ { -+ spin_unlock_irqrestore (&dev->dev_mmulock, flags); -+ -+ KMEM_FREE (hc, sizeof (ELAN4_HASH_CHUNK)); -+ return ((ELAN4_HASH_ENTRY *) NULL); -+ } -+ -+ list_add_tail (&hc->hc_link, &dev->dev_hc_list); -+ -+ elan4_sdram_zeroq_sdram (dev, entry, sizeof (E4_HashTableEntry) * ELAN4_HENT_CHUNKS); -+ -+ /* no initialise all chunks and chain all but the first onto the freelist */ -+ for (i = 0; i < ELAN4_HENT_CHUNKS; i++, entry += sizeof (E4_HashTableEntry)) -+ { -+ hc->hc_hents[i].he_entry = entry; -+ -+ if (i == 0) -+ he = &hc->hc_hents[0]; -+ else -+ { -+ hc->hc_hents[i].he_next = dev->dev_mmufreelist; -+ dev->dev_mmufreelist = &hc->hc_hents[i]; -+ } -+ } -+ } -+ -+ /* Initialise hash entry, using slot 0 */ -+ *tagidx = 0; -+ -+ he->he_next = NULL; -+ he->he_prev = NULL; -+ he->he_chain[0] = NULL; -+ he->he_chain[1] = NULL; -+ he->he_tag[0] = newtag | HE_TAG_VALID; -+ he->he_tag[1] = E4MMU_TAG(0, INVALID_CONTEXT); -+ he->he_pte[0] = 0; -+ he->he_pte[1] = 0; -+ -+ elan4mmu_synctag (dev, he, 0); -+ -+ /* add to mmuhash lists */ -+ for (phe = &dev->dev_mmuhash[tbl][hashidx]; phe->he_next; phe = phe->he_next) -+ ; -+ phe->he_next = he; -+ he->he_prev = phe; -+ he->he_next = NULL; -+ -+ /* finally chain the hash block into the hash tables */ -+ elan4mmu_chain_hents (dev, phe, he); -+ -+ spin_unlock_irqrestore (&dev->dev_mmulock, flags); -+ return (he); -+} -+void -+elan4mmu_set_shuffle(ELAN4_CTXT *ctxt, int tbl, int hashidx) -+{ -+ int i; -+ -+ for(i=0; (ishuffle[tbl][i]>=0) && (ctxt->shuffle[tbl][i]!=hashidx); i++) -+ ; -+ if (ishuffle_needed[tbl] = 1; -+ ctxt->shuffle[tbl][i] = hashidx; -+ } -+} -+static int -+elan4mmm_try_to_free_hent(ELAN4_DEV *dev, int tbl, int hashidx, ELAN4_HASH_ENTRY *he) -+{ -+ ELAN4_HASH_ENTRY *prev; -+ int t; -+ ELAN4_CTXT *ctxt; -+ -+ -+ while (he) { -+ if ( ((he->he_tag[0] & TAG_CONTEXT_MASK) == INVALID_CONTEXT) -+ && ((he->he_tag[1] & TAG_CONTEXT_MASK) == INVALID_CONTEXT)) { -+ /* Both tags are now free */ -+ -+ if (he != &dev->dev_mmuhash[tbl][hashidx]) { -+ /* its not the hash entry block */ -+ -+ if ( he->he_next == NULL ) { -+ /* its the end one so just remove it */ -+ prev = he->he_prev; -+ -+ /* make the previous entry the end one and sync it */ -+ prev->he_next = NULL; -+ elan4mmu_synctag (dev, prev, 0); -+ -+ /* make sure the elan had finished traversing the list */ -+ elan4mmu_flush_tlb(dev); -+ -+ /* now we have a free he in our hands put it onto the free list */ -+ he->he_next = dev->dev_mmufreelist; -+ dev->dev_mmufreelist = he; -+ -+ elan4_mmuhash_chain_end_reduce++; -+ -+ he = prev; -+ } else { -+ /* can only remove if my he_entry high bits = next he_entry high bits. */ -+ -+ if (((he->he_entry >> TAG_CHAINPTR_HIGH_SHIFT) & TAG_CHAINPTR_30TO19_MASK) == -+ ((he->he_next->he_entry >> TAG_CHAINPTR_HIGH_SHIFT) & TAG_CHAINPTR_30TO19_MASK)) -+ { -+ prev = he->he_prev; -+ -+ /* make the previous entry jump over us and sync it */ -+ prev->he_next = he->he_next; -+ elan4mmu_synctag (dev, prev, 1); -+ he->he_next->he_prev = prev; -+ -+ /* make sure the elan had finished traversing the list */ -+ elan4mmu_flush_tlb(dev); -+ -+ /* now we have a free he in our hands put it onto the free list */ -+ he->he_next = dev->dev_mmufreelist; -+ dev->dev_mmufreelist = he; -+ -+ elan4_mmuhash_chain_middle_reduce++; -+ -+ he = prev; -+ -+ } else { -+ elan4_mmuhash_chain_middle_fail++; -+ /* at this point we wanted to remove an entry but we cant because this would mean -+ chanaging the high bits of the perivious pointer. -+ -+ It is assumed that this is a fairly rare occurance. -+ -+ The plan is. to tell the ctxt's in the end entry (which can always be removed) -+ to shuffle down. They need to do this as its guarded by a ctxt lock i dont have. -+ -+ Note the ctxt entry might not exist by the time they get round to shuffling. -+ and/or the empty node we want to shuffle to might have gone. so there is no -+ value in storing info about what you want to shuffle. -+ -+ just tell the ctxt to shuffle this hashidx. rather than allocate a block -+ of memory the size of the number of hashidx's to handle this we will use -+ a short array. assuming its rarely going to fill. if it does the all the ctxt's -+ hashidx's are shuffled (as its really unlikely to happen -+ */ -+ -+ /* mark all up to the end as needing shuffle */ -+ while (he->he_next) { -+ for(t=0;t<2;t++) { -+ if ((he->he_tag[t] & TAG_CONTEXT_MASK)!=INVALID_CONTEXT) { -+ ctxt = elan4_localctxt (dev, (he->he_tag[t] & TAG_CONTEXT_MASK)); -+ if (ctxt) { -+ ASSERT(ctxt->ctxt_ops); -+ if (ctxt->ctxt_ops->op_need_shuffle) -+ ctxt->ctxt_ops->op_need_shuffle (ctxt, tbl, hashidx); -+ } -+ } -+ } -+ he = he->he_next; -+ } -+ -+ he = NULL; -+ } -+ } -+ } else he = NULL; -+ } else he = NULL; -+ } -+ return (0); -+} -+static void -+elan4mmu_free_hent_nolock (ELAN4_DEV *dev, int tbl, int hashidx, ELAN4_HASH_ENTRY *he, int tagidx) -+{ -+ /* assumes some one has the mmulock before this is called */ -+ int pteidx; -+ -+ /* Invalidate the tag, and zero all ptes */ -+ for (pteidx = 0; pteidx < 4; pteidx++) -+ if (HE_GET_PTE(he, tagidx, pteidx) != HE_TYPE_INVALID) -+ elan4mmu_writepte (dev, he, tagidx, pteidx, 0); -+ -+ he->he_tag[tagidx] = E4MMU_TAG(0, INVALID_CONTEXT); -+ he->he_pte[tagidx] = 0; -+ -+ elan4mmu_synctag (dev, he, tagidx); -+ -+ if ((he->he_tag[tagidx^1] & TAG_CONTEXT_MASK) == INVALID_CONTEXT) /* Both tags are now free */ -+ { -+ if (he == &dev->dev_mmuhash[tbl][hashidx]) /* it's the hash block entry */ -+ { /* so as it's already on the freelist */ -+ he->he_chain[tagidx] = he->he_chain[tagidx^1]; /* just copy it's chain pointers */ -+ -+ MPRINTF (DBG_DEVICE, 3, "elan4mmu_free_hent: tbl=%d hashidx=%x tagidx=%d he=%p => all free but hashblk\n", tbl, hashidx, tagidx, he); -+ } -+ else -+ { -+ MPRINTF (DBG_DEVICE, 3, "elan4mmu_free_hent: tbl=%d hashidx=%x tagidx=%d he=%p => all free\n", tbl, hashidx, tagidx, he); -+ -+ /* remove it from the hash table, and place back on the anonymous freelist */ -+ he->he_chain[tagidx] = he->he_chain[tagidx^1]; -+ -+ if (elan4_mmuhash_chain_reduction) { -+ elan4mmm_try_to_free_hent (dev, tbl, hashidx, he); -+ } -+ } -+ } -+ else -+ { -+ /* Other tag still in use */ -+ MPRINTF (DBG_DEVICE, 3, "elan4mmu_free_hent: tbl=%d hashidx=%x tagidx=%d he=%p => other tag in use\n", tbl, hashidx, tagidx, he); -+ } -+} -+static void -+elan4mmu_free_hent (ELAN4_DEV *dev, int tbl, int hashidx, ELAN4_HASH_ENTRY *he, int tagidx) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave (&dev->dev_mmulock, flags); -+ elan4mmu_free_hent_nolock (dev, tbl, hashidx, he, tagidx); -+ spin_unlock_irqrestore (&dev->dev_mmulock, flags); -+} -+void -+print_dev(ELAN4_DEV *dev, int tbl, int index) -+{ -+ ELAN4_HASH_ENTRY *he = &dev->dev_mmuhash[tbl][index]; -+ int count=0; -+ -+ while (he) { -+ qsnet_debugf(1,"(dev) he%s %p entry 0x%010lx he_(%p,%p) chain(%p,%p) tag(0x%016llx,0x%016llx) pte(0x%010x,0x%010x)%s\n", -+ (he==&dev->dev_mmuhash[tbl][index])?"*":" ", he, -+ he->he_entry, he->he_next, he->he_prev, he->he_chain[0], he->he_chain[1], -+ (long long)he->he_tag[0], (long long)he->he_tag[1], he->he_pte[0], he->he_pte[1], -+ (he->he_next)? (( ((he->he_entry >> TAG_CHAINPTR_HIGH_SHIFT) & TAG_CHAINPTR_30TO19_MASK) == -+ ((he->he_next->he_entry >> TAG_CHAINPTR_HIGH_SHIFT) & TAG_CHAINPTR_30TO19_MASK))?" ":"*"):" "); -+ he = he->he_next; -+ -+ if (count++ > 1000) { -+ qsnet_debugf(1,"List Failed\n"); -+ he = NULL; -+ elan4_mmuhash_chain_reduction = 0; -+ } -+ } -+} -+void -+print_ctx(ELAN4_CTXT *ctxt, int tbl, int index) -+{ -+ ELAN4_HASH_ENTRY *he = ctxt->ctxt_mmuhash[tbl][index]; -+ ELAN4_DEV *dev = ctxt->ctxt_dev; -+ int count=0; -+ -+ while (he) { -+ qsnet_debugf(1,"(%04d) he%s %p entry 0x%010lx he_(%p,%p) chain(%p,%p) tag(0x%016llx,0x%016llx) pte(0x%010x,0x%010x)%s\n", -+ ctxt->ctxt_num, (he==&dev->dev_mmuhash[tbl][index])?"*":" ", he, -+ he->he_entry, he->he_next, he->he_prev, he->he_chain[0], he->he_chain[1], -+ (long long)he->he_tag[0], (long long)he->he_tag[1], he->he_pte[0], he->he_pte[1], -+ (he->he_next)?(( ((he->he_entry >> TAG_CHAINPTR_HIGH_SHIFT) & TAG_CHAINPTR_30TO19_MASK) == -+ ((he->he_next->he_entry >> TAG_CHAINPTR_HIGH_SHIFT) & TAG_CHAINPTR_30TO19_MASK))?" ":"*"):" "); -+ -+ if (((he->he_tag[0] & TAG_CONTEXT_MASK) != ctxt->ctxt_num) && ((he->he_tag[1] & TAG_CONTEXT_MASK) != ctxt->ctxt_num)) { -+ qsnet_debugf(1,"(%04d) neither tag is us so stopping 0x%llx 0x%llx \n", ctxt->ctxt_num, (long long)(he->he_tag[0] & TAG_CONTEXT_MASK), (long long)(he->he_tag[1] & TAG_CONTEXT_MASK)); -+ he = NULL; -+ } else { -+ he = he_ctxt_next (he, ctxt->ctxt_num); -+ } -+ if (count++ > 1000) { -+ qsnet_debugf(1,"List Failed\n"); -+ he = NULL; -+ elan4_mmuhash_chain_reduction = 0; -+ } -+ } -+} -+int -+dev_count(ELAN4_DEV *dev, int tbl, int index, int ctxt_num) -+{ -+ ELAN4_HASH_ENTRY *he = &dev->dev_mmuhash[tbl][index]; -+ int count = 0; -+ while (he) { -+ -+ if ((he->he_tag[0] & TAG_CONTEXT_MASK) == ctxt_num) count++; -+ if ((he->he_tag[1] & TAG_CONTEXT_MASK) == ctxt_num) count++; -+ -+ he = he->he_next; -+ } -+ return (count); -+} -+int -+ctx_count(ELAN4_CTXT *ctxt, int tbl, int index) -+{ -+ ELAN4_HASH_ENTRY *he = ctxt->ctxt_mmuhash[tbl][index]; -+ int count = 0; -+ while (he) { -+ -+ if ((he->he_tag[0] & TAG_CONTEXT_MASK) == ctxt->ctxt_num) count++; -+ if ((he->he_tag[1] & TAG_CONTEXT_MASK) == ctxt->ctxt_num) count++; -+ -+ if (((he->he_tag[0] & TAG_CONTEXT_MASK) != ctxt->ctxt_num) && ((he->he_tag[1] & TAG_CONTEXT_MASK) != ctxt->ctxt_num)) { -+ he = NULL; -+ } else { -+ he = he_ctxt_next (he, ctxt->ctxt_num); -+ } -+ } -+ return (count); -+} -+void -+elan4mmu_shuffle_up (ELAN4_CTXT *ctxt, int tbl, int hashidx) -+{ -+ ELAN4_DEV *dev = ctxt->ctxt_dev; -+ ELAN4_HASH_ENTRY *ctxt_prev = NULL; -+ ELAN4_HASH_ENTRY *ctxt_he = NULL; -+ ELAN4_HASH_ENTRY *ctxt_next = NULL; -+ ELAN4_HASH_ENTRY *hole; -+ ELAN4_HASH_ENTRY *tmp; -+ ELAN4_HASH_ENTRY *ctxt_remember; -+ int hole_tagidx; -+ int ctxt_tagidx; -+ int pteidx; -+ E4_uint64 value; -+ -+ elan4_mmuhash_shuffle_attempts++; -+ -+ /* find the first hole */ -+ hole = elan4mmu_find_next_free ( &dev->dev_mmuhash[tbl][hashidx] ) ; -+ if (hole == NULL) return; -+ -+ /* find the last ctx */ -+ /* 1 move tmp to the end */ -+ for(ctxt_he = hole; (ctxt_he->he_next != NULL); ctxt_he = ctxt_he->he_next) -+ ; -+ /* 2 move tmp back looking for either hole or ctxt */ -+ while ((ctxt_he != hole) -+ && ((ctxt_he->he_tag[0] & TAG_CONTEXT_MASK) != ctxt->ctxt_num ) -+ && ((ctxt_he->he_tag[1] & TAG_CONTEXT_MASK) != ctxt->ctxt_num )) -+ ctxt_he = ctxt_he->he_prev; -+ -+ /* if we found hole then there is not usefull swap to do */ -+ if (ctxt_he == hole) return; -+ -+ while (ctxt_he != hole) { -+ /***********/ -+ /* do swap */ -+ /***********/ -+ elan4_mmuhash_shuffle_done++; -+ -+ /* now we can move this ctxt's entry in ctxt_he to hole */ -+ if ( (hole->he_tag[0] & TAG_CONTEXT_MASK) == INVALID_CONTEXT ) hole_tagidx = 0; -+ else hole_tagidx = 1; -+ -+ if ( (ctxt_he->he_tag[0] & TAG_CONTEXT_MASK) == ctxt->ctxt_num ) ctxt_tagidx = 0; -+ else ctxt_tagidx = 1; -+ -+ /* find us in list do this now before list is played with */ -+ ctxt_prev = NULL; -+ tmp = ctxt->ctxt_mmuhash[tbl][hashidx]; -+ ctxt_next = he_ctxt_next (tmp, ctxt->ctxt_num); -+ while (tmp != ctxt_he) { -+ ctxt_prev = tmp; -+ tmp = ctxt_next; -+ ctxt_next = he_ctxt_next (tmp, ctxt->ctxt_num); -+ } -+ -+ /* copy over software chain and pte */ -+ hole->he_pte[hole_tagidx] = ctxt_he->he_pte[ctxt_tagidx]; -+ -+ /* copy over the valid elan pte's */ -+ /* not preserving the modified and referene bits */ -+ for (pteidx = 0; pteidx <= 3; pteidx++) -+ if (HE_GET_PTE(hole, hole_tagidx, pteidx)) -+ { -+ /* copy the pg_page and pg_dma_addr */ -+ hole->he_pg[hole_tagidx][pteidx] = ctxt_he->he_pg[ctxt_tagidx][pteidx]; -+ -+ value = elan4mmu_readpte (dev, ctxt_he, ctxt_tagidx, pteidx); -+ elan4mmu_writepte (dev, hole, hole_tagidx, pteidx, value); -+ } -+ -+ /* copy over tag and sync it*/ -+ hole->he_tag[hole_tagidx] = ctxt_he->he_tag[ctxt_tagidx]; -+ elan4mmu_synctag (dev, hole, hole_tagidx); -+ -+ /* before we remove it check if its going to get free'd */ -+ if ((ctxt_he->he_tag[ctxt_tagidx ^ 1] & TAG_CONTEXT_MASK) == INVALID_CONTEXT) { -+ /* this is ok as the existence of a hole guards agains falling off front of list */ -+ ctxt_remember = ctxt_he->he_prev; -+ } else ctxt_remember = ctxt_he; -+ -+ /* invalidate pte and tag */ -+ ctxt_he->he_tag[ctxt_tagidx] = E4MMU_TAG(0, INVALID_CONTEXT); -+ elan4mmu_synctag (dev, ctxt_he, ctxt_tagidx); -+ -+ /* should ensure that any walk in progress has completed */ -+ elan4mmu_flush_tlb(dev); -+ -+ for (pteidx = 0; pteidx <= 3; pteidx++) -+ if (HE_GET_PTE(ctxt_he, ctxt_tagidx, pteidx)) -+ elan4mmu_invalidatepte (dev, ctxt_he, ctxt_tagidx, pteidx); -+ -+ /* remove from the source end */ -+ elan4mmu_free_hent_nolock (dev, tbl, hashidx, ctxt_he, ctxt_tagidx); -+ -+ -+ /* sort out the ctxt links */ -+ /* first the hole */ -+ if ((hole->he_tag[hole_tagidx^1]& TAG_CONTEXT_MASK) == ctxt->ctxt_num) { -+ /* already in the list */ -+ hole->he_chain[hole_tagidx] = hole->he_chain[hole_tagidx^1]; -+ } else { -+ /* hole not in list */ -+ hole->he_chain[hole_tagidx] = ctxt->ctxt_mmuhash[tbl][hashidx]; -+ ctxt->ctxt_mmuhash[tbl][hashidx] = hole; -+ -+ /* this is one i missed for a bit */ -+ /* if we put the hole onto the list it might become the previous */ -+ if (ctxt_prev == NULL) ctxt_prev = hole; -+ } -+ -+ /* second remove the old one */ -+ if ((ctxt_he->he_tag[ctxt_tagidx^1]& TAG_CONTEXT_MASK) == ctxt->ctxt_num) { -+ /* Nothing to do as still in list as other tag is ours */ -+ } else { -+ if (ctxt_prev == NULL) { -+ ctxt->ctxt_mmuhash[tbl][hashidx] = ctxt_next; -+ } else { -+ if ((ctxt_prev->he_tag[0] & TAG_CONTEXT_MASK) == ctxt->ctxt_num) -+ ctxt_prev->he_chain[0] = ctxt_next; -+ if ((ctxt_prev->he_tag[1] & TAG_CONTEXT_MASK) == ctxt->ctxt_num) -+ ctxt_prev->he_chain[1] = ctxt_next; -+ } -+ } -+ -+ /***********/ -+ /* move on */ -+ /***********/ -+ ctxt_he = ctxt_remember; -+ -+ /* the hole is still a valid place to start looking */ -+ /* cant use elan4mmu_find_next_free as we need to stop if we pass ctxt_he */ -+ if (hole == ctxt_he) return; -+ while ( hole -+ && ((hole->he_tag[0] & TAG_CONTEXT_MASK) != INVALID_CONTEXT) -+ && ((hole->he_tag[1] & TAG_CONTEXT_MASK) != INVALID_CONTEXT)) -+ { -+ hole = hole->he_next; -+ if (hole == ctxt_he) return; -+ } -+ if (hole == NULL) return; -+ -+ /* start looking for the next ctxt */ -+ while ((ctxt_he != hole) -+ && ((ctxt_he->he_tag[0] & TAG_CONTEXT_MASK) != ctxt->ctxt_num ) -+ && ((ctxt_he->he_tag[1] & TAG_CONTEXT_MASK) != ctxt->ctxt_num )) -+ ctxt_he = ctxt_he->he_prev; -+ } -+ -+ /* if we found hole then there is not usefull swap to do */ -+ return; -+} -+void -+elan4mmu_do_shuffle (ELAN4_CTXT *ctxt, int tbl) -+{ -+ int i; -+ ELAN4_DEV *dev; -+ unsigned long flags; -+ -+ if (!ctxt) return; -+ dev = ctxt->ctxt_dev; -+ -+ spin_lock (&ctxt->ctxt_mmulock); -+ -+ for(i=0; i < ELAN4_CTXT_MAX_SHUFFLE ;i++) -+ { -+ if (ctxt->shuffle[tbl][i] != -1) -+ { -+ spin_lock_irqsave (&dev->dev_mmulock, flags); -+ -+ elan4mmu_shuffle_up(ctxt, tbl, ctxt->shuffle[tbl][i]); -+ ctxt->shuffle[tbl][i] = -1; -+ -+ spin_unlock_irqrestore (&dev->dev_mmulock, flags); -+ } -+ } -+ -+ ctxt->shuffle_needed[tbl] = 0; -+ -+ spin_unlock (&ctxt->ctxt_mmulock); -+} -+ -+ELAN4_HASH_ENTRY * -+elan4mmu_ptealloc (ELAN4_CTXT *ctxt, int tbl, E4_Addr vaddr, unsigned int *tagidxp) -+{ -+ ELAN4_DEV *dev = ctxt->ctxt_dev; -+ unsigned ctxnum = ctxt->ctxt_num; -+ unsigned hashidx = E4MMU_HASH_INDEX (ctxnum, vaddr, dev->dev_pageshift[tbl], dev->dev_hashsize[tbl]-1); -+ E4_uint64 newtag = E4MMU_TAG(vaddr, ctxnum); -+ ELAN4_HASH_ENTRY *he = &dev->dev_mmuhash[tbl][hashidx]; -+ unsigned tagidx; -+ -+ MPRINTF (ctxt, 2, "elan4mmu_ptealloc: tbl=%d ctxnum=%d vaddr=%llx -> hashidx %d\n", tbl, ctxnum, vaddr, hashidx); -+ -+ /* 1st) check whether we're reloading an existing entry */ -+ for (he = ctxt->ctxt_mmuhash[tbl][hashidx]; he != NULL; he = he_ctxt_next (he, ctxnum)) -+ { -+ ASSERT ((he->he_tag[0] & TAG_CONTEXT_MASK) == ctxnum || (he->he_tag[1] & TAG_CONTEXT_MASK) == ctxnum); -+ -+ for (tagidx = 0; tagidx < 2; tagidx++) -+ { -+ if ((he->he_tag[tagidx] & (TAG_ADDRESS_MASK | TAG_CONTEXT_MASK | HE_TAG_VALID)) == (newtag | HE_TAG_VALID)) -+ { -+ MPRINTF (ctxt, 2, "elan4mmu_ptealloc: return old he %p tagidx %d\n", he, tagidx); -+ -+ *tagidxp = tagidx; -+ return he; -+ } -+ } -+ } -+ -+ if ((he = elan4mmu_alloc_hent (dev, tbl, hashidx, newtag, &tagidx)) == NULL) -+ return NULL; -+ -+ /* chain onto context hash */ -+ if ((he->he_tag[tagidx ^ 1] & TAG_CONTEXT_MASK) == ctxnum) /* already chained using other link */ -+ { /* so ensure both slots are chained the same */ -+ he->he_chain[tagidx] = he->he_chain[tagidx^1]; -+ } -+ else -+ { -+ he->he_chain[tagidx] = ctxt->ctxt_mmuhash[tbl][hashidx]; -+ ctxt->ctxt_mmuhash[tbl][hashidx] = he; -+ } -+ -+ MPRINTF (ctxt, 2, "elan4mmu_ptealloc: return new he %p tagidx %d\n", he, tagidx); -+ -+ *tagidxp = tagidx; -+ -+ return he; -+} -+ -+int -+elan4mmu_pteload (ELAN4_CTXT *ctxt, int tbl, E4_Addr vaddr, int type, E4_uint64 newpte) -+{ -+ ELAN4_DEV *dev = ctxt->ctxt_dev; -+ unsigned pteidx = E4MMU_SHIFT_ADDR(vaddr, dev->dev_pageshift[tbl]) & 3; -+ unsigned tagidx; -+ ELAN4_HASH_ENTRY *he; -+ -+ MPRINTF (ctxt, 0, "elan4mmu_pteload: ctx=%d tbl=%d pteidx=%d vaddr=%llx type=%d pte=%llx\n", -+ ctxt->ctxt_num, tbl, pteidx, (unsigned long long)vaddr, type, newpte); -+ -+ spin_lock (&ctxt->ctxt_mmulock); -+ -+ if ((he = elan4mmu_ptealloc (ctxt, tbl, vaddr, &tagidx)) == NULL) -+ { -+ spin_unlock (&ctxt->ctxt_mmulock); -+ return -ENOMEM; -+ } -+ -+ MPRINTF (ctxt, 1, "elan4mmu_pteload: %s he=%p tagidx=%d pteidx=%d\n", HE_GET_PTE(he,0,pteidx) ? "reloading" : "loading", he, tagidx, pteidx); -+ -+ if (HE_GET_PTE(he,tagidx,pteidx) != HE_TYPE_INVALID && /* invalid -> valid */ -+ (elan4mmu_readpte (dev, he, tagidx, pteidx) & PTE_PPN_MASK) != (newpte & PTE_PPN_MASK)) /* or same phys address */ -+ { -+ spin_unlock (&ctxt->ctxt_mmulock); -+ return -EINVAL; -+ } -+ -+ elan4mmu_writepte (dev, he, tagidx, pteidx, newpte); -+ -+ HE_SET_PTE(he, tagidx, pteidx, type); -+ -+ spin_unlock (&ctxt->ctxt_mmulock); -+ return 0; -+} -+ -+int -+elan4mmu_pteload_page (ELAN4_CTXT *ctxt, int tbl, E4_Addr vaddr, struct page *page, int perm) -+{ -+ ELAN4_DEV *dev = ctxt->ctxt_dev; -+ unsigned int pteidx = E4MMU_SHIFT_ADDR(vaddr, dev->dev_pageshift[tbl]) & 3; -+ unsigned int tagidx; -+ unsigned int type; -+ E4_uint64 newpte; -+ int topaddr; -+ ELAN4_HASH_ENTRY *he; -+ ELAN4_PTE_PAGE *pg; -+ -+ MPRINTF (ctxt, 1, "elan4mmu_pteload_page: ctx=%d tbl=%d pteidx=%d vaddr=%llx page=%p\n", -+ ctxt->ctxt_num, tbl, pteidx, (unsigned long long)vaddr, page); -+ -+ spin_lock (&ctxt->ctxt_mmulock); -+ if ((he = elan4mmu_ptealloc (ctxt, tbl, vaddr, &tagidx)) == NULL) -+ { -+ MPRINTF (ctxt, 1, "elan4mmu_pteload_page: ctx=%d failed ENOMEM\n", ctxt->ctxt_num); -+ spin_unlock (&ctxt->ctxt_mmulock); -+ return -ENOMEM; -+ } -+ -+ pg = &he->he_pg[tagidx][pteidx]; -+ -+ if (HE_GET_PTE(he,tagidx,pteidx) != HE_TYPE_INVALID && pg->pg_page != page) /* invalid -> valid, or same page*/ -+ { -+ MPRINTF (ctxt, 1, "elan4mmu_pteload_page: ctx=%d failed: pg_page=%p page=%p PTE=%x EINVAL\n", -+ ctxt->ctxt_num, pg->pg_page, page, HE_GET_PTE(he,tagidx,pteidx)); -+ -+ spin_unlock (&ctxt->ctxt_mmulock); -+ return -EINVAL; -+ } -+ -+ if (HE_GET_PTE (he,tagidx,pteidx) == HE_TYPE_INVALID) -+ { -+ if ((ctxt->ctxt_features & ELAN4_FEATURE_PIN_DOWN) != 0) -+ page_cache_get (page); -+ -+ pg->pg_page = page; -+ -+ if (dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] & ELAN4_FEATURE_PCI_MAP) -+ { -+ struct scatterlist sg; -+ -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) -+ sg.address = NULL; -+#endif -+ sg.page = page; -+ sg.offset = 0; -+ sg.length = PAGE_SIZE; -+ -+ if (pci_map_sg (dev->dev_osdep.pdev, &sg, 1, PCI_DMA_BIDIRECTIONAL) == 0 || sg.length == 0) -+ { -+ spin_unlock (&ctxt->ctxt_mmulock); -+ return -EFAULT; -+ } -+ -+ pg->pg_dma_addr = sg.dma_address | (vaddr & (PAGE_SIZE-1)); -+ -+ MPRINTF (ctxt, 1, "elan4mmu_pteload_page: pci_map_sg -> %lx\n", pg->pg_dma_addr); -+ } -+ else -+ { -+ pg->pg_dma_addr = (page_to_pfn (page) << PAGE_SHIFT) | (vaddr & (PAGE_SIZE-1)); -+ -+ MPRINTF (ctxt, 1, "elan4mmu_pteload_page: directmap -> %lx\n", pg->pg_dma_addr); -+ } -+ } -+ -+#if defined(__BIG_ENDIAN__) -+ type = PTE_SetPerm (perm) | PTE_PciNotLocal | PTE_BigEndian; -+#else -+ type = PTE_SetPerm (perm) | PTE_PciNotLocal; -+#endif -+ -+ topaddr = elan4mmu_alloc_topaddr (dev, pg->pg_dma_addr, type); -+ -+ if (dev->dev_topaddrmode) -+ newpte = dev->dev_pteval | (pg->pg_dma_addr >> PTE_PADDR_SHIFT) | (type & ~0xc) | (topaddr << 2); -+ else -+ newpte = dev->dev_pteval | ((pg->pg_dma_addr >> PTE_PADDR_SHIFT) & ~PTE_TOPADDR_MASK) | (((E4_uint64) topaddr) << 45) | type; -+ -+ ASSERT (HE_GET_PTE(he,tagidx,pteidx) == HE_TYPE_INVALID || /* invalid -> valid */ -+ (elan4mmu_readpte (dev, he, tagidx, pteidx) & PTE_PPN_MASK) == (newpte & PTE_PPN_MASK)); /* or same phys address */ -+ -+ elan4mmu_writepte (dev, he, tagidx, pteidx, newpte); -+ -+ HE_SET_PTE (he, tagidx, pteidx, HE_TYPE_PAGE); -+ -+ spin_unlock (&ctxt->ctxt_mmulock); -+ -+ return 0; -+} -+ -+void -+elan4mmu_pteunload (ELAN4_CTXT *ctxt, ELAN4_HASH_ENTRY *he, unsigned int tagidx, unsigned int pteidx) -+{ -+ ELAN4_DEV *dev = ctxt->ctxt_dev; -+ ELAN4_PTE_PAGE *pg = &he->he_pg[tagidx][pteidx]; -+ -+ switch (HE_GET_PTE(he,tagidx,pteidx)) -+ { -+ case HE_TYPE_PAGE: -+ MPRINTF (DBG_DEVICE, 1, "elan4mmu_pteunload: he=%p tagidx=%d pteidx=%d page=%p -> %lx\n", -+ he, tagidx, pteidx, pg->pg_page, pg->pg_dma_addr); -+ -+ if (dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] & ELAN4_FEATURE_PCI_MAP) -+ { -+ struct scatterlist sg; -+ -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) -+ sg.address = NULL; -+#endif -+ sg.page = pg->pg_page; -+ sg.offset = 0; -+ sg.length = PAGE_SIZE; -+ sg.dma_address = pg->pg_dma_addr; -+ -+ pci_unmap_sg (dev->dev_osdep.pdev, &sg, 1, PCI_DMA_BIDIRECTIONAL); -+ } -+ -+ if ((ctxt->ctxt_features & ELAN4_FEATURE_PIN_DOWN) != 0) -+ page_cache_release (pg->pg_page); -+ -+ pg->pg_page = NULL; -+ pg->pg_dma_addr = 0; -+ break; -+ } -+ -+ HE_SET_PTE(he, tagidx, pteidx, HE_TYPE_INVALID); -+ -+ elan4mmu_writepte (dev, he, tagidx, pteidx, 0); -+} -+ -+ -+void -+elan4mmu_unload_range (ELAN4_CTXT *ctxt, int tbl, E4_Addr start, unsigned long len) -+{ -+ ELAN4_DEV *dev = ctxt->ctxt_dev; -+ unsigned ctxnum = ctxt->ctxt_num; -+ unsigned long tagspan = (1 << (dev->dev_pageshift[tbl] + 2)); -+ E4_Addr end = start + len - 1; -+ int needflush = 0; -+ unsigned baseidx, topidx; -+ unsigned hashidx, tagidx, pteidx; -+ ELAN4_HASH_ENTRY *he, *prevhe, *next; -+ -+ MPRINTF (ctxt, 0, "elan4mmu_unload_range: tbl=%d start=%llx end=%llx len=%lx\n", tbl, start, end, len); -+ -+ /* determine how much of the hash table we've got to scan */ -+ -+ /* GNAT 6760: When we have a Main page size which maps onto multiple Elan pages -+ * we need to do something a bit more clever here or else it takes ms per page invalidate -+ * This change helps in the meantime -+ */ -+ /* if (len <= (1 << dev->dev_pageshift[tbl])) */ -+ if (len <= PAGE_SIZE) -+ { -+ baseidx = E4MMU_HASH_INDEX (ctxnum, start, dev->dev_pageshift[tbl], dev->dev_hashsize[tbl]-1); -+ topidx = E4MMU_HASH_INDEX (ctxnum, end, dev->dev_pageshift[tbl], dev->dev_hashsize[tbl]-1); -+ -+ if (baseidx != topidx) -+ { -+ /* GNAT 6760: Need to search whole of the hash table (slow!) */ -+ baseidx = 0; -+ topidx = dev->dev_hashsize[tbl] - 1; -+ } -+ } -+ else -+ { -+ baseidx = 0; -+ topidx = dev->dev_hashsize[tbl] - 1; -+ } -+ -+ MPRINTF (ctxt, 1, "elan4mmu_unload_range: baseidx=%d topidx=%d\n", baseidx, topidx); -+ -+ spin_lock (&ctxt->ctxt_mmulock); -+ -+ /* 1st - invalidate the tag for all hash blocks which are completely invalidated, -+ * and remember the first/last hash blocks */ -+ for (hashidx = baseidx; hashidx <= topidx; hashidx++) -+ for (he = ctxt->ctxt_mmuhash[tbl][hashidx]; he != NULL; he = he_ctxt_next (he, ctxnum)) -+ for (tagidx = 0; tagidx < 2; tagidx++) -+ if ((he->he_tag[tagidx] & TAG_CONTEXT_MASK) == ctxnum) -+ { -+ E4_Addr base = E4MMU_TAG2VADDR (he->he_tag[tagidx], hashidx, dev->dev_pageshift[tbl], dev->dev_hashsize[tbl]-1); -+ E4_Addr top = base + (tagspan -1); -+ -+ if (start < top && end > base) -+ { -+ unsigned bidx = (start <= base) ? 0 : (start & (tagspan-1)) >> dev->dev_pageshift[tbl]; -+ unsigned tidx = (end >= top) ? 3 : (end & (tagspan-1)) >> dev->dev_pageshift[tbl]; -+ -+ MPRINTF (ctxt, 1, "elan4mmu_unload_range: he=%p base=%llx top=%llx hashidx=%d bidx=%d tidx=%d\n", he, base, top, hashidx, bidx, tidx); -+ -+ for (pteidx = bidx; pteidx <= tidx; pteidx++) -+ if (HE_GET_PTE(he, tagidx, pteidx) != HE_TYPE_INVALID) -+ { -+ elan4mmu_invalidatepte (dev, he, tagidx, pteidx); -+ needflush = 1; -+ } -+ } -+ else if (base >= start && top <= end) /* hash entry completely spanned */ -+ { /* so invalidate the tag */ -+ MPRINTF (ctxt, 1, "elan4mmu_unload_range: he=%p base=%llx top=%llx spanned\n", he, base, top); -+ -+ he->he_tag[tagidx] &= ~HE_TAG_VALID; -+ -+ elan4mmu_synctag (dev, he, tagidx); -+ needflush = 1; -+ } -+ } -+ -+ if (needflush) -+ { -+ /* 2nd invalidate the first/last hash blocks if they are partially invalidated -+ * and flush the tlb/hash copy blocks */ -+ elan4mmu_flush_tlb_hash (dev, tbl, baseidx, topidx); -+ -+ /* 3rd free off the hash entries which are completely invalidated */ -+ for (hashidx = baseidx; hashidx <= topidx; hashidx++) -+ for (prevhe = NULL, he = ctxt->ctxt_mmuhash[tbl][hashidx]; he != NULL; he = next) -+ { -+ next = he_ctxt_next (he, ctxnum); -+ -+ for (tagidx = 0; tagidx < 2; tagidx++) -+ if ((he->he_tag[tagidx] & TAG_CONTEXT_MASK) == ctxnum) -+ { -+ E4_Addr base = E4MMU_TAG2VADDR (he->he_tag[tagidx], hashidx, dev->dev_pageshift[tbl], dev->dev_hashsize[tbl]-1); -+ E4_Addr top = base + (tagspan -1); -+ -+ if (start < top && end > base) -+ { -+ unsigned bidx = (start <= base) ? 0 : (start & (tagspan-1)) >> dev->dev_pageshift[tbl]; -+ unsigned tidx = (end >= top) ? 3 : (end & (tagspan-1)) >> dev->dev_pageshift[tbl]; -+ -+ MPRINTF (ctxt, 1, "elan4mmu_unload_range: he=%p base=%llx top=%llx bidx=%d tidx=%d\n", he, base, top, bidx, tidx); -+ -+ for (pteidx = bidx; pteidx <= tidx; pteidx++) -+ if (HE_GET_PTE(he, tagidx, pteidx) != HE_TYPE_INVALID) -+ elan4mmu_pteunload (ctxt, he,tagidx, pteidx); -+ } -+ -+ if ((base >= start && top <= end) || he->he_pte[tagidx] == 0) /* hash entry completely spanned or all pte's cleared */ -+ { /* so invalidate the pte's and free it */ -+ -+ MPRINTF (ctxt, 1, "elan4mmu_unload_range: he=%p base=%llx top=%llx spanned or empty\n", he, base, top); -+ -+ elan4mmu_free_hent (dev, tbl, hashidx, he, tagidx); -+ } -+ } -+ -+ prevhe = he_ctxt_unlink (ctxt, tbl, hashidx, prevhe, he, next); -+ } -+ } -+ spin_unlock (&ctxt->ctxt_mmulock); -+} -+ -+void -+elan4mmu_invalidate_ctxt (ELAN4_CTXT *ctxt) -+{ -+ ELAN4_DEV *dev = ctxt->ctxt_dev; -+ int ctxnum = ctxt->ctxt_num; -+ ELAN4_HASH_ENTRY *he; -+ int tbl, hashidx, tagidx, pteidx; -+ -+ MPRINTF (ctxt, 0, "elan4mmu_invalidate_ctxt: invalidating ctxnum=%d\n", ctxnum); -+ -+ spin_lock (&ctxt->ctxt_mmulock); -+ -+ /* 1st invalidate all tags belonging to me */ -+ for (tbl = 0; tbl < NUM_HASH_TABLES; tbl++) -+ for (hashidx = 0; hashidx < dev->dev_hashsize[tbl]; hashidx++) -+ for (he = ctxt->ctxt_mmuhash[tbl][hashidx]; he != NULL; he = he_ctxt_next (he, ctxnum)) -+ for (tagidx = 0; tagidx < 2; tagidx++) -+ if ((he->he_tag[tagidx] & TAG_CONTEXT_MASK) == ctxnum) /* own tag block */ -+ { -+ MPRINTF (ctxt, 1, "elan4mmu_invalidate_ctxt: he=%p addr=%llx hashidx=%d tagidx=%d\n", -+ he, he->he_tag[tagidx] & TAG_ADDRESS_MASK, hashidx, tagidx); -+ -+ he->he_tag[tagidx] &= ~HE_TAG_VALID; -+ -+ elan4mmu_synctag (dev, he, tagidx); -+ } -+ -+ /* 2nd flush the tlb & cached hash block */ -+ elan4mmu_flush_tlb (dev); -+ -+ /* 3rd invalidate all pte's and free off the hash entries */ -+ for (tbl = 0; tbl < NUM_HASH_TABLES; tbl++) -+ for (hashidx = 0; hashidx < dev->dev_hashsize[tbl]; hashidx++) -+ while ((he = ctxt->ctxt_mmuhash[tbl][hashidx]) != NULL) -+ { -+ ctxt->ctxt_mmuhash[tbl][hashidx] = he_ctxt_next (he, ctxnum); -+ -+ for (tagidx = 0; tagidx < 2; tagidx++) -+ if ((he->he_tag[tagidx] & TAG_CONTEXT_MASK) == ctxnum) -+ { -+ for (pteidx = 0; pteidx < 4; pteidx++) -+ if (HE_GET_PTE(he, tagidx, pteidx) != HE_TYPE_INVALID) -+ elan4mmu_pteunload (ctxt, he, tagidx, pteidx); -+ -+ elan4mmu_free_hent (dev, tbl, hashidx, he, tagidx); -+ } -+ } -+ spin_unlock (&ctxt->ctxt_mmulock); -+} -+ -+ELAN4_HASH_CACHE * -+elan4mmu_reserve (ELAN4_CTXT *ctxt, int tbl, E4_Addr start, unsigned int npages, int cansleep) -+{ -+ ELAN4_DEV *dev = ctxt->ctxt_dev; -+ E4_Addr end = start + (npages << dev->dev_pageshift[tbl]) - 1; -+ unsigned long tagshift = dev->dev_pageshift[tbl] + 2; -+ E4_Addr tagspan = 1 << tagshift; -+ E4_Addr base = (start & ~(tagspan-1)); -+ E4_Addr top = (end & ~(tagspan-1)) + (tagspan-1); -+ unsigned int nhes = (top - base + 1) >> tagshift; -+ ELAN4_HASH_CACHE *hc; -+ unsigned int tagidx, pteidx; -+ E4_Addr addr; -+ int i; -+ -+ MPRINTF (ctxt, 0, "elan4mmu_reserve: start=%llx npages=%d\n", start, npages); -+ MPRINTF (ctxt, 0, " pageshift=%d tagspan=%lx base=%llx top=%llx end=%llx nhes=%d\n", -+ dev->dev_pageshift[tbl], tagspan, base, top, end, nhes); -+ -+ KMEM_ALLOC (hc, ELAN4_HASH_CACHE *, offsetof (ELAN4_HASH_CACHE, hc_hes[nhes]), cansleep); -+ -+ if (hc == NULL) -+ return NULL; -+ -+ hc->hc_start = start; -+ hc->hc_end = end; -+ hc->hc_tbl = tbl; -+ -+ spin_lock (&ctxt->ctxt_mmulock); -+ for (addr = base, i = 0; i < nhes; addr += tagspan, i++) -+ { -+ unsigned bidx = (i == 0) ? (start & (tagspan-1)) >> dev->dev_pageshift[tbl] : 0; -+ unsigned tidx = (i == (nhes-1)) ? (end & (tagspan-1)) >> dev->dev_pageshift[tbl] : 3; -+ -+ -+ if ((hc->hc_hes[i] = elan4mmu_ptealloc (ctxt, tbl, addr & ~(tagspan-1), &tagidx)) == NULL) -+ goto failed; -+ -+ -+ MPRINTF (ctxt, 2, "elan4mmu_reserve: tbl=%d addr=%llx -> hashidx=%d tagidx=%d\n", tbl, addr & ~(tagspan-1), -+ E4MMU_HASH_INDEX (ctxt->ctxt_num, (addr & ~(tagspan-1)), dev->dev_pageshift[tbl], dev->dev_hashsize[tbl]-1), tagidx); -+ -+ for (pteidx = bidx; pteidx <= tidx; pteidx++) -+ { -+ ASSERT (HE_GET_PTE (hc->hc_hes[i], tagidx, pteidx) == HE_TYPE_INVALID); -+ -+ MPRINTF (ctxt, 2, "elan4mmu_reserve: i=%d addr=%llx he=%p (tagidx=%d pteidx=%d)\n", -+ i, addr, hc->hc_hes[i], tagidx, pteidx); -+ -+ HE_SET_PTE (hc->hc_hes[i], tagidx, pteidx, HE_TYPE_RESERVED); -+ } -+ } -+ spin_unlock (&ctxt->ctxt_mmulock); -+ -+ return hc; -+ -+ failed: -+ for (i--, addr -= tagspan; i >= 0; i--, addr -= tagspan) -+ { -+ unsigned bidx = (i == 0) ? (start & (tagspan-1)) >> dev->dev_pageshift[tbl] : 0; -+ unsigned tidx = (i == (nhes-1)) ? (end & (tagspan-1)) >> dev->dev_pageshift[tbl] : 3; -+ unsigned hashidx = E4MMU_HASH_INDEX (ctxt->ctxt_num, addr, dev->dev_pageshift[tbl], dev->dev_hashsize[tbl]-1); -+ unsigned tagidx = (addr == E4MMU_TAG2VADDR (hc->hc_hes[i]->he_tag[0], hashidx, dev->dev_pageshift[tbl], dev->dev_hashsize[tbl]-1)) ? 0 : 1; -+ -+ for (pteidx = bidx; pteidx <= tidx; pteidx++) -+ HE_SET_PTE(hc->hc_hes[i], tagidx, pteidx, HE_TYPE_INVALID); -+ -+ if (hc->hc_hes[i]->he_pte[tagidx] == 0) -+ elan4mmu_free_hent (dev, tbl, hashidx, hc->hc_hes[i], tagidx); -+ } -+ spin_unlock (&ctxt->ctxt_mmulock); -+ -+ KMEM_FREE (hc, offsetof (ELAN4_HASH_CACHE, hc_hes[nhes])); -+ -+ return NULL; -+} -+ -+void -+elan4mmu_release (ELAN4_CTXT *ctxt, ELAN4_HASH_CACHE *hc) -+{ -+ ELAN4_DEV *dev = ctxt->ctxt_dev; -+ E4_Addr start = hc->hc_start; -+ E4_Addr end = hc->hc_end; -+ unsigned long tagshift = dev->dev_pageshift[hc->hc_tbl] + 2; -+ E4_Addr tagspan = 1 << tagshift; -+ E4_Addr base = (start & ~(tagspan-1)); -+ E4_Addr top = (end & ~(tagspan-1)) + (tagspan-1); -+ unsigned int nhes = (top - base + 1) >> tagshift; -+ ELAN4_HASH_ENTRY *prevhe, *he, *next; -+ E4_Addr addr; -+ unsigned int pteidx; -+ int i; -+ -+ spin_lock (&ctxt->ctxt_mmulock); -+ -+ MPRINTF (ctxt, 0, "elan4mmu_release: base=%llx top=%llx\n", base, top); -+ -+ for (addr = base, i = 0; i < nhes; addr += tagspan, i++) -+ { -+ unsigned bidx = (i == 0) ? (start & (tagspan-1)) >> dev->dev_pageshift[hc->hc_tbl] : 0; -+ unsigned tidx = (i == (nhes-1)) ? (end & (tagspan-1)) >> dev->dev_pageshift[hc->hc_tbl] : 3; -+ unsigned hashidx = E4MMU_HASH_INDEX (ctxt->ctxt_num, addr, dev->dev_pageshift[hc->hc_tbl], dev->dev_hashsize[hc->hc_tbl]-1); -+ unsigned tagidx = (addr == E4MMU_TAG2VADDR (hc->hc_hes[i]->he_tag[0], hashidx, dev->dev_pageshift[hc->hc_tbl], dev->dev_hashsize[hc->hc_tbl]-1)) ? 0 : 1; -+ -+ for (pteidx = bidx; pteidx <= tidx; pteidx++) -+ { -+ elan4mmu_invalidatepte (dev, hc->hc_hes[i], tagidx, pteidx); -+ -+ HE_SET_PTE(hc->hc_hes[i], tagidx, pteidx, HE_TYPE_INVALID); -+ } -+ -+ MPRINTF (ctxt, 2, "elan4mmu_release: i=%d addr=%llx he=%p (hashidx=%d tagidx=%d pteidx=%d) pte=%x\n", -+ i, addr, hc->hc_hes[i], hashidx, tagidx, pteidx, hc->hc_hes[i]->he_pte[tagidx]); -+ -+ /* remove from context hash */ -+ /* need to move to the hc->hc_hes[i] in the ctxt list and set prevhe, he, next */ -+ prevhe = NULL; -+ he = ctxt->ctxt_mmuhash[hc->hc_tbl][hashidx]; -+ next = he_ctxt_next (he, ctxt->ctxt_num); -+ -+ while(he != hc->hc_hes[i]) { -+ prevhe = he; -+ he = next; -+ next = he_ctxt_next (he, ctxt->ctxt_num); -+ } -+ -+ if (he->he_pte[tagidx] == 0) -+ elan4mmu_free_hent (dev, hc->hc_tbl, hashidx, he, tagidx); -+ -+ he_ctxt_unlink (ctxt, hc->hc_tbl, hashidx, prevhe, he, next); -+ } -+ spin_unlock (&ctxt->ctxt_mmulock); -+} -+ -+void -+elan4mmu_set_pte (ELAN4_CTXT *ctxt, ELAN4_HASH_CACHE *hc, unsigned int idx, E4_uint64 newpte) -+{ -+ ELAN4_DEV *dev = ctxt->ctxt_dev; -+ unsigned int tbl = hc->hc_tbl; -+ unsigned int tagshift = dev->dev_pageshift[tbl] + 2; -+ E4_Addr tagspan = 1 << tagshift; -+ E4_Addr addr = hc->hc_start + (idx << dev->dev_pageshift[tbl]); -+ ELAN4_HASH_ENTRY *he = hc->hc_hes[(addr - (hc->hc_start & ~(tagspan-1))) >> tagshift]; -+ unsigned pteidx = E4MMU_SHIFT_ADDR(addr, dev->dev_pageshift[tbl]) & 3; -+ unsigned tagidx = he->he_tag[0] == (E4MMU_TAG (addr, ctxt->ctxt_num) | HE_TAG_VALID) ? 0 : 1; -+ -+ MPRINTF (ctxt, 2, "elan4mmu_set_pte: idx=%d addr=%llx he=%p (tagidx=%d pteidx=%d) newpte=%llx\n", idx, addr, he, tagidx, pteidx, newpte); -+ -+ ASSERT (he->he_tag[tagidx] == (E4MMU_TAG (addr, ctxt->ctxt_num) | HE_TAG_VALID)); -+ -+ elan4mmu_writepte (dev, he, tagidx, pteidx, newpte); -+} -+ -+E4_uint64 -+elan4mmu_get_pte (ELAN4_CTXT *ctxt, ELAN4_HASH_CACHE *hc, unsigned int idx) -+{ -+ ELAN4_DEV *dev = ctxt->ctxt_dev; -+ unsigned int tbl = hc->hc_tbl; -+ unsigned int tagshift = dev->dev_pageshift[tbl] + 2; -+ E4_Addr tagspan = 1 << tagshift; -+ E4_Addr addr = hc->hc_start + (idx << dev->dev_pageshift[tbl]); -+ ELAN4_HASH_ENTRY *he = hc->hc_hes[(addr - (hc->hc_start & ~(tagspan-1))) >> tagshift]; -+ unsigned pteidx = E4MMU_SHIFT_ADDR(addr, dev->dev_pageshift[tbl]) & 3; -+ unsigned tagidx = he->he_tag[0] == (E4MMU_TAG (addr, ctxt->ctxt_num) | HE_TAG_VALID) ? 0 : 1; -+ -+ ASSERT (he->he_tag[tagidx] == (E4MMU_TAG (addr, ctxt->ctxt_num) | HE_TAG_VALID)); -+ -+ return elan4mmu_readpte (dev, he, tagidx, pteidx); -+} -+ -+void -+elan4mmu_clear_pte (ELAN4_CTXT *ctxt, ELAN4_HASH_CACHE *hc, unsigned int idx) -+{ -+ ELAN4_DEV *dev = ctxt->ctxt_dev; -+ unsigned int tbl = hc->hc_tbl; -+ unsigned int tagshift = dev->dev_pageshift[tbl] + 2; -+ E4_Addr tagspan = 1 << tagshift; -+ E4_Addr addr = hc->hc_start + (idx << dev->dev_pageshift[tbl]); -+ ELAN4_HASH_ENTRY *he = hc->hc_hes[(addr - (hc->hc_start & ~(tagspan-1))) >> tagshift]; -+ unsigned pteidx = E4MMU_SHIFT_ADDR(addr, dev->dev_pageshift[tbl]) & 3; -+ unsigned tagidx = he->he_tag[0] == (E4MMU_TAG (addr, ctxt->ctxt_num) | HE_TAG_VALID) ? 0 : 1; -+ -+ MPRINTF (ctxt, 2, "elan4mmu_clear_pte: idx=%d addr=%llx he=%p (tagidx=%d pteidx=%d)\n", idx, addr, he, tagidx, pteidx); -+ -+ ASSERT (he->he_tag[tagidx] == (E4MMU_TAG (addr, ctxt->ctxt_num) | HE_TAG_VALID)); -+ -+ elan4mmu_invalidatepte (dev, he, tagidx, pteidx); -+} -+ -+int -+elan4mmu_display_mmuhash(ELAN4_DEV *dev, int tbl, int *index_ptr, char *page, int count) -+{ -+ char *p = page; -+ unsigned long flags; -+ ELAN4_HASH_ENTRY *he; -+ int index = *index_ptr; -+ -+ spin_lock_irqsave (&dev->dev_mmulock, flags); -+ -+ he = &dev->dev_mmuhash[tbl][index]; -+ -+ /* move to the next entry that actually has contents in its chain */ -+ while ((he->he_next == NULL) && ((he->he_tag[0] & TAG_CONTEXT_MASK) == INVALID_CONTEXT) -+ && ((he->he_tag[1] & TAG_CONTEXT_MASK) == INVALID_CONTEXT)) -+ { -+ index++; -+ if ( index >= dev->dev_hashsize[tbl] ) { -+ /* didnt find anything and have looped */ -+ spin_unlock_irqrestore (&dev->dev_mmulock, flags); -+ *index_ptr = dev->dev_hashsize[tbl]; -+ return (p - page); -+ } -+ he = &dev->dev_mmuhash[tbl][index]; -+ } -+ *index_ptr = index; /* the actual one we will print */ -+ -+ -+ while (he) { -+ if ( ((p - page)+175) > count ) { -+ /* might not fit in */ -+ p += sprintf( p , "...\n"); -+ he = NULL; -+ } else { -+ int ctxt0_bit = 0; -+ int ctxt1_bit = 0; -+ ELAN4_CTXT *ctxt0; -+ ELAN4_CTXT *ctxt1; -+ -+ if ( (he->he_tag[0] & TAG_CONTEXT_MASK) != INVALID_CONTEXT) { -+ ctxt0 = elan4_localctxt (dev, (he->he_tag[0] & TAG_CONTEXT_MASK)); -+ ctxt0_bit = ctxt0->shuffle_needed[0]; -+ } -+ if ( (he->he_tag[1] & TAG_CONTEXT_MASK) != INVALID_CONTEXT) { -+ ctxt1 = elan4_localctxt (dev, (he->he_tag[1] & TAG_CONTEXT_MASK)); -+ ctxt1_bit = ctxt1->shuffle_needed[0]; -+ } -+ -+ p += sprintf(p ,"(%4d,%1d) he%s %p entry 0x%010lx he_(%p,%p) chain(%p,%p) tag(0x%016llx-%d,0x%016llx-%d) pte(0x%010x,0x%010x)%s\n", -+ index,tbl, (he==&dev->dev_mmuhash[tbl][index])?"*":" ", he, -+ he->he_entry, he->he_next, he->he_prev, he->he_chain[0], he->he_chain[1], -+ (long long)he->he_tag[0], ctxt0_bit, (long long)he->he_tag[1], ctxt1_bit, he->he_pte[0], he->he_pte[1], -+ (he->he_next)? (( ((he->he_entry >> TAG_CHAINPTR_HIGH_SHIFT) & TAG_CHAINPTR_30TO19_MASK) == -+ ((he->he_next->he_entry >> TAG_CHAINPTR_HIGH_SHIFT) & TAG_CHAINPTR_30TO19_MASK))?" ":"*"):" "); -+ he = he->he_next; -+ } -+ } -+ -+ spin_unlock_irqrestore (&dev->dev_mmulock, flags); -+ -+ return (p - page); -+} -+ -+int -+elan4mmu_display_ctxt_mmuhash(ELAN4_CTXT *ctxt, int tbl, int *index_ptr, char *page, int count) -+{ -+ ELAN4_HASH_ENTRY *he; -+ char *p = page; -+ ELAN4_DEV *dev = ctxt->ctxt_dev; -+ int index = *index_ptr; -+ -+ spin_lock (&ctxt->ctxt_mmulock); -+ -+ he = ctxt->ctxt_mmuhash[tbl][index]; -+ while (! he ) { -+ index++; -+ if ( index >= dev->dev_hashsize[tbl] ) { -+ /* didnt find anything and have looped */ -+ spin_unlock (&ctxt->ctxt_mmulock); -+ *index_ptr = dev->dev_hashsize[tbl]; -+ return (p - page); -+ } -+ he = ctxt->ctxt_mmuhash[tbl][index]; -+ } -+ *index_ptr = index; /* the actual one we will print */ -+ -+ while (he) { -+ if ( ((p - page)+175) > count ) { -+ /* might not fit in */ -+ p += sprintf( p , "...\n"); -+ he = NULL; -+ } else { -+ p += sprintf(p ,"(%4d,%1d) he%s %p entry 0x%010lx he_(%p,%p) chain(%p,%p) tag(0x%016llx,0x%016llx) pte(0x%010x,0x%010x)%s\n", -+ index,tbl, (he==&dev->dev_mmuhash[tbl][index])?"*":" ", he, -+ he->he_entry, he->he_next, he->he_prev, he->he_chain[0], he->he_chain[1], -+ (long long)he->he_tag[0], (long long)he->he_tag[1], he->he_pte[0], he->he_pte[1], -+ (he->he_next)?(( ((he->he_entry >> TAG_CHAINPTR_HIGH_SHIFT) & TAG_CHAINPTR_30TO19_MASK) == -+ ((he->he_next->he_entry >> TAG_CHAINPTR_HIGH_SHIFT) & TAG_CHAINPTR_30TO19_MASK))?" ":"*"):" "); -+ -+ he = he_ctxt_next (he, ctxt->ctxt_num); -+ } -+ } -+ -+ spin_unlock (&ctxt->ctxt_mmulock); -+ -+ return (p - page); -+} -+ -+int -+elan4mmu_display_bucket_mmuhash(ELAN4_DEV *dev, int tbl, int *buckets, int nBuckets, char *page, int c) -+{ -+ ELAN4_HASH_ENTRY *he; -+ unsigned long flags; -+ char *p = page; -+ int b; -+ int index; -+ int count; -+ int totals[nBuckets]; -+ -+ for (b=0;bdev_hashsize[tbl]; index++) { -+ -+ /* how long is this chain */ -+ spin_lock_irqsave (&dev->dev_mmulock, flags); -+ -+ he = &dev->dev_mmuhash[tbl][index]; -+ count = 0; -+ while (he) { -+ count++; -+ ASSERT(count < 1000000); /* seems we have a loop */ -+ he = he->he_next; -+ } -+ spin_unlock_irqrestore (&dev->dev_mmulock, flags); -+ -+ /* bucket the lenth */ -+ for(b=0;b -+ -+#include -+#include -+ -+#include -+#include -+ -+int -+elan4mmu_sdram_aliascheck (ELAN4_CTXT *ctxt, E4_Addr addr, sdramaddr_t phys) -+{ -+ ELAN4_DEV *dev = ctxt->ctxt_dev; -+ -+ /* -+ * On MPSAS we don't allocate a large enough context table, so -+ * if we see an address/context pair which would "alias" because -+ * they differ in unchecked hash bits to a previous pteload, -+ * then we kill the application. -+ */ -+ unsigned hashval = (E4MMU_SHIFT_ADDR(addr, (dev->dev_pageshift[0]) + 2) ^ E4MMU_CONTEXT_SCRAMBLE(ctxt->ctxt_num)); -+ -+ if (dev->dev_rsvd_hashval[0] == 0xFFFFFFFF) -+ dev->dev_rsvd_hashval[0] = hashval & dev->dev_rsvd_hashmask[0]; -+ -+ if ((hashval & dev->dev_rsvd_hashmask[0]) != dev->dev_rsvd_hashval[0]) -+ { -+ printk ("elan4mmu_sdram_aliascheck: vaddr=%016llx ctxnum=%x -> [%x] overlaps %x - %x [hashidx=%x]\n", (unsigned long long) addr, -+ ctxt->ctxt_num, hashval, hashval & dev->dev_rsvd_hashmask[0], dev->dev_rsvd_hashval[0], -+ E4MMU_HASH_INDEX (ctxt->ctxt_num, addr, dev->dev_pageshift[0], dev->dev_hashsize[0]-1)); -+ -+ return 0; -+ } -+ -+ if (((addr & (SDRAM_PGOFF_OFFSET << PAGE_SHIFT)) != (phys & (SDRAM_PGOFF_OFFSET << PAGE_SHIFT)))) -+ { -+ printk ("elan4mmu_sdram_aliascheck: vaddr=%016llx incorrectly alias sdram at %lx\n", (unsigned long long) addr, phys); -+ return 0; -+ } -+ -+ return 1; -+} -+ -+int -+elan4mmu_alloc_topaddr (ELAN4_DEV *dev, physaddr_t paddr, unsigned type) -+{ -+#if defined(__i386) && !defined(CONFIG_X86_PAE) -+ if (dev->dev_topaddrvalid == 0) -+ { -+ dev->dev_topaddrvalid = 1; -+ -+ pci_write_config_word (dev->dev_osdep.pdev, PCI_ELAN_TOPPHYSADDR(0), 0); -+ pci_write_config_word (dev->dev_osdep.pdev, PCI_ELAN_TOPPHYSADDR(1), 0); -+ pci_write_config_word (dev->dev_osdep.pdev, PCI_ELAN_TOPPHYSADDR(2), 0); -+ pci_write_config_word (dev->dev_osdep.pdev, PCI_ELAN_TOPPHYSADDR(3), 0); -+ } -+ return (0); -+#else -+ register int i; -+ E4_uint16 match; -+ -+ if (dev->dev_topaddrmode) /* ExtraMasterAddrBits=1 => match {paddr[63:50],type[3:2]} */ -+ match = ((paddr >> 48) & ~3) | ((type >> 2) & 3); -+ else /* ExtraMasterAddrBits=0 => match {paddr[63:48]} */ -+ match = (paddr >> 48); -+ -+ MPRINTF (DBG_DEVICE, 2, "elan4mmu_alloc_topaddr: mode=%d paddr=%lx type=%x match=%x [%x %x.%x.%x.%x]\n", -+ dev->dev_topaddrmode, paddr, type, match, dev->dev_topaddrvalid, -+ dev->dev_topaddr[0], dev->dev_topaddr[1], dev->dev_topaddr[2], dev->dev_topaddr[3]); -+ -+ for (i = 0; i < 4; i++) -+ if ((dev->dev_topaddrvalid & (1 << i)) && dev->dev_topaddr[i] == match) -+ return (i); -+ -+ for (i = 0; i < 4; i++) -+ { -+ if ((dev->dev_topaddrvalid & (1 << i)) == 0) -+ { -+ MPRINTF (DBG_DEVICE, 2, "elan4mmu_alloc_topaddr: allocate slot %d for %x\n", i, match); -+ -+ dev->dev_topaddrvalid |= (1 << i); -+ dev->dev_topaddr[i] = match; -+ -+ pci_write_config_word (dev->dev_osdep.pdev, PCI_ELAN_TOPPHYSADDR(i), match); -+ return (i); -+ } -+ } -+ -+ panic ("elan4mmu_alloc_topaddr: all topaddrs in use\n"); -+ return (0); -+#endif -+} -+ -+/* -+ * Convert a physical address into an pte. This should generate a "local" pte for -+ * physical addresses which are elan4 sdram or elan4 command queues. For elan4 -+ * registers and other addresses on the same bus, this should be the local pci -+ * bus address. All other addresses should access the physical address via the -+ * PCI bridge. -+ */ -+ -+int -+elan4mmu_categorise_paddr (ELAN4_DEV *dev, physaddr_t *physp) -+{ -+ physaddr_t sdram_base = dev->dev_sdram_phys; -+ physaddr_t sdram_top = dev->dev_sdram_phys + pci_resource_len (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM); -+ physaddr_t regs_base = dev->dev_regs_phys; -+ physaddr_t regs_top = dev->dev_regs_phys + pci_resource_len (dev->dev_osdep.pdev, ELAN4_BAR_REGISTERS); -+ physaddr_t phys = *physp; -+ int iscommand; -+ -+ if (phys >= sdram_base && phys <= sdram_top) -+ { -+ (*physp) = (phys ^ sdram_base); -+ return HE_TYPE_SDRAM; -+ } -+ -+ if (phys >= regs_base && phys < regs_top) -+ { -+ if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA) -+ iscommand = (phys < (regs_base + ELAN4_REVA_REG_OFFSET)); -+ else -+ iscommand = (phys < (regs_base + ELAN4_REVB_I2C_OFFSET)); -+ -+ if (iscommand) -+ { -+ (*physp) = phys ^ regs_base; -+ -+ return HE_TYPE_COMMAND; -+ } -+ else -+ { -+ u32 blow, bhigh; -+ -+ /* compute a local pci address from our register BAR */ -+ pci_read_config_dword (dev->dev_osdep.pdev, PCI_BASE_ADDRESS_2, &blow); -+ pci_read_config_dword (dev->dev_osdep.pdev, PCI_BASE_ADDRESS_3, &bhigh); -+ -+ (*physp) = (((physaddr_t) bhigh) << 32) | (blow & PCI_BASE_ADDRESS_MEM_MASK) | (phys ^ regs_base); -+ -+ return HE_TYPE_REGS; -+ } -+ } -+ -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0) -+ if (VALID_PAGE (virt_to_page (phys_to_virt (phys)))) -+#else -+ if (virt_addr_valid (phys_to_virt (phys))) -+#endif -+ return HE_TYPE_PAGE; -+ -+ return HE_TYPE_OTHER; -+} -+ -+E4_uint64 -+elan4mmu_phys2pte (ELAN4_DEV *dev, physaddr_t phys, unsigned perm) -+{ -+ unsigned int type = 0; -+ E4_uint64 pte; -+ -+ switch (elan4mmu_categorise_paddr (dev, &phys)) -+ { -+ case HE_TYPE_SDRAM: -+ type = PTE_SetPerm (perm); -+ break; -+ -+ case HE_TYPE_COMMAND: -+ type = PTE_SetPerm (perm) | PTE_CommandQueue; -+ break; -+ -+ case HE_TYPE_REGS: -+ type = PTE_SetPerm (perm) | PTE_PciNotLocal; -+ break; -+ -+ case HE_TYPE_PAGE: -+ if (dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] & ELAN4_FEATURE_PCI_MAP) -+ { -+ struct scatterlist list; -+ -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) -+ list.address = NULL; -+#endif -+ list.page = virt_to_page (phys_to_virt (phys));; -+ list.offset = (phys & (PAGE_SIZE-1)); -+ list.length = (1 << dev->dev_pageshift[0]); -+ -+ if (pci_map_sg (dev->dev_osdep.pdev, &list, 1, PCI_DMA_BIDIRECTIONAL) == 0) -+ { -+ printk ("elan4mmu_phys2pte: pci_map_sg failed\n"); -+ return -EFAULT; -+ } -+ -+ type = PTE_SetPerm (perm) | PTE_PciNotLocal | dev->dev_pteval; -+ phys = list.dma_address; -+ break; -+ } -+ /* DROPTHROUGH */ -+ -+ case HE_TYPE_OTHER: -+ if (dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] & ELAN4_FEATURE_PCI_MAP) -+ return -EFAULT; -+ -+ type = PTE_SetPerm (perm) | PTE_PciNotLocal | dev->dev_pteval; -+ break; -+ } -+ -+ if ((type & PTE_PciNotLocal) == 0) -+ pte = (phys >> PTE_PADDR_SHIFT) | type; -+ else -+ { -+ unsigned topaddr = elan4mmu_alloc_topaddr (dev, phys, type); -+ -+ if (dev->dev_topaddrmode) -+ pte = (phys >> PTE_PADDR_SHIFT) | (type & ~0xc) | (topaddr << 2); -+ else -+ pte = ((phys >> PTE_PADDR_SHIFT) & ~PTE_TOPADDR_MASK) | (((E4_uint64) topaddr) << 45) | type; -+ } -+ -+ return pte; -+} -+ -+physaddr_t -+elan4mmu_pte2phys (ELAN4_DEV *dev, E4_uint64 pte) -+{ -+ physaddr_t sdram_base = dev->dev_sdram_phys; -+ physaddr_t regs_base = dev->dev_regs_phys; -+ physaddr_t phys; -+ -+ if (pte & PTE_PciNotLocal) -+ { -+ if (dev->dev_topaddrmode) -+ phys = ((physaddr_t)(dev->dev_topaddr[(pte >> 2) & 3] & 0xfffc) << 48) | ((pte & PTE_PPN_MASK) << PTE_PADDR_SHIFT); -+ else -+ phys = ((physaddr_t)(dev->dev_topaddr[(pte >> 45) & 3] & 0xffff) << 48)| ((pte & PTE_PPN_MASK & ~PTE_TOPADDR_MASK) << PTE_PADDR_SHIFT); -+ -+#if defined(__alpha) -+ phys ^= alpha_mv.pci_dac_offset; -+#elif defined(__sparc) -+ phys ^= 0xfffe000000000000; -+#endif -+ return phys; -+ } -+ -+ if (pte & PTE_CommandQueue) -+ return (regs_base | ((pte & PTE_PPN_MASK) << PTE_PADDR_SHIFT)); -+ -+ /* sdram */ -+ return (sdram_base | ((pte & PTE_PPN_MASK) << PTE_PADDR_SHIFT)); -+} -+ -+EXPORT_SYMBOL(elan4mmu_phys2pte); -+EXPORT_SYMBOL(elan4mmu_pte2phys); -diff -urN clean/drivers/net/qsnet/elan4/neterr.c linux-2.6.9/drivers/net/qsnet/elan4/neterr.c ---- clean/drivers/net/qsnet/elan4/neterr.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan4/neterr.c 2005-07-20 07:35:36.000000000 -0400 -@@ -0,0 +1,270 @@ -+/* -+ * Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: neterr.c,v 1.8.2.1 2005/07/20 11:35:36 mike Exp $" -+/* $Source: /cvs/master/quadrics/elan4mod/neterr.c,v $*/ -+ -+#include -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+typedef struct neterr_inputq -+{ -+ E4_InputQueue inputq; /* input queue */ -+ E4_Event32 qevent; /* input queue event */ -+ E4_uint64 sent; /* # messages sent (cq flow control)*/ -+} NETERR_INPUTQ; -+ -+#define NETERR_NSLOTS 64 /* single page of queue space (4Kb) */ -+ -+#define NETERR_RETRIES 16 -+#define NETERR_CQ_SIZE CQ_Size8K -+#define NETERR_CQ_MSGS (CQ_Size(NETERR_CQ_SIZE) / (21*8)) -+#define NETERR_VP_COUNT 64 /* this *must* be > NETERR_CQ_MSGS */ -+#define NETERR_VP_BASE 1 /* use vp 1 upwards */ -+ -+void -+elan4_neterr_interrupt (ELAN4_DEV *dev, void *arg) -+{ -+ E4_Addr qfptr = elan4_sdram_readq (dev, dev->dev_neterr_inputq + offsetof (NETERR_INPUTQ, inputq.q_fptr)); -+ E4_Addr qbptr = elan4_sdram_readq (dev, dev->dev_neterr_inputq + offsetof (NETERR_INPUTQ, inputq.q_bptr)); -+ E4_Addr qfirst = DEVICE_NETERR_SLOTS_ADDR; -+ E4_Addr qlast = qfirst + (NETERR_NSLOTS-1) * ELAN4_NETERR_MSG_SIZE; -+ ELAN4_CQ *cq = dev->dev_neterr_intcq; -+ int count = 0; -+ ELAN4_CTXT *ctxt; -+ ELAN4_NETERR_MSG msg; -+ -+ while (qfptr != qbptr) -+ { -+ elan4_sdram_copyq_from_sdram (dev, dev->dev_neterr_slots + (qfptr - qfirst), &msg, ELAN4_NETERR_MSG_SIZE); -+ -+ ctxt = elan4_networkctxt (dev, msg.msg_context); -+ -+ if (ctxt != NULL && ctxt->ctxt_ops->op_neterrmsg) -+ ctxt->ctxt_ops->op_neterrmsg (ctxt, &msg); -+ else -+ PRINTF (DBG_DEVICE, DBG_NETERR, "elan4_neterr_interrupt: no process - sender %d.%d\n", msg.msg_sender.loc_node, msg.msg_sender.loc_context); -+ -+ count++; -+ -+ /* move on the from pointer */ -+ qfptr = (qfptr == qlast) ? qfirst : qfptr + ELAN4_NETERR_MSG_SIZE; -+ -+ elan4_sdram_writeq (dev, dev->dev_neterr_inputq + offsetof (NETERR_INPUTQ, inputq.q_fptr), qfptr); -+ } -+ -+ if (count == 0) -+ { -+ printk ("elan4_neterr_interrupt: spurious\n"); -+ return; -+ } -+ -+ /* Issue the waitevent to the interrupt queue */ -+ writeq (WAIT_EVENT_CMD | (DEVICE_NETERR_INPUTQ_ADDR + offsetof (NETERR_INPUTQ, qevent)), (void *)cq->cq_mapping); -+ writeq ( E4_EVENT_INIT_VALUE (-32 * count, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0), (void *)cq->cq_mapping); -+ writeq ( DEVICE_NETERR_INTCQ_ADDR, (void *)cq->cq_mapping); -+ writeq (INTERRUPT_CMD | (dev->dev_neterr_intop.op_cookie << E4_MAIN_INT_SHIFT), (void *)cq->cq_mapping); -+ -+ pioflush_reg (dev); -+} -+ -+int -+elan4_neterr_init (ELAN4_DEV *dev) -+{ -+ unsigned int intqaddr; -+ E4_Addr qfirst, qlast; -+ -+ if ((dev->dev_neterr_inputq = elan4_sdram_alloc (dev, SDRAM_PAGE_SIZE)) == 0) -+ return 0; -+ -+ if ((dev->dev_neterr_slots = elan4_sdram_alloc (dev, roundup (NETERR_NSLOTS * ELAN4_NETERR_MSG_SIZE, SDRAM_PAGE_SIZE))) == 0) -+ return 0; -+ -+ if ((dev->dev_neterr_msgcq = elan4_alloccq (&dev->dev_ctxt, NETERR_CQ_SIZE, CQ_STENEnableBit | CQ_WriteEnableBit, CQ_Priority)) == NULL) -+ return 0; -+ -+ if ((dev->dev_neterr_intcq = elan4_alloccq (&dev->dev_ctxt, CQ_Size1K, CQ_WaitEventEnableBit | CQ_InterruptEnableBit, CQ_Priority)) == NULL) -+ return 0; -+ -+ intqaddr = (dev->dev_cqoffset + elan4_cq2num (dev->dev_neterr_intcq)) * CQ_CommandMappingSize; -+ qfirst = DEVICE_NETERR_SLOTS_ADDR; -+ qlast = qfirst + (NETERR_NSLOTS-1) * ELAN4_NETERR_MSG_SIZE; -+ -+ spin_lock_init (&dev->dev_neterr_lock); -+ -+ /* Register an interrupt operation */ -+ dev->dev_neterr_intop.op_function = elan4_neterr_interrupt; -+ dev->dev_neterr_intop.op_arg = NULL; -+ -+ elan4_register_intop (dev, &dev->dev_neterr_intop); -+ -+ /* Initialise the inputq descriptor and event */ -+ elan4_sdram_writeq (dev, dev->dev_neterr_inputq + offsetof (NETERR_INPUTQ, inputq.q_fptr), qfirst); -+ elan4_sdram_writeq (dev, dev->dev_neterr_inputq + offsetof (NETERR_INPUTQ, inputq.q_bptr), qfirst); -+ elan4_sdram_writeq (dev, dev->dev_neterr_inputq + offsetof (NETERR_INPUTQ, inputq.q_control), E4_InputQueueControl (qfirst, qlast, ELAN4_NETERR_MSG_SIZE)); -+ elan4_sdram_writeq (dev, dev->dev_neterr_inputq + offsetof (NETERR_INPUTQ, inputq.q_event), DEVICE_NETERR_INPUTQ_ADDR + offsetof (NETERR_INPUTQ, qevent)); -+ -+ elan4_sdram_writeq (dev, dev->dev_neterr_inputq + offsetof (NETERR_INPUTQ, qevent.ev_CountAndType), E4_EVENT_INIT_VALUE (-32, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0)); -+ elan4_sdram_writeq (dev, dev->dev_neterr_inputq + offsetof (NETERR_INPUTQ, qevent.ev_WritePtr), DEVICE_NETERR_INTCQ_ADDR); -+ elan4_sdram_writeq (dev, dev->dev_neterr_inputq + offsetof (NETERR_INPUTQ, qevent.ev_WriteValue), (dev->dev_neterr_intop.op_cookie << E4_MAIN_INT_SHIFT) | INTERRUPT_CMD); -+ -+ elan4_sdram_writeq (dev, dev->dev_neterr_inputq + offsetof (NETERR_INPUTQ, sent), 0); -+ -+ /* Map them all into the device context */ -+ elan4mmu_pteload (&dev->dev_ctxt, 0, DEVICE_NETERR_INPUTQ_ADDR, HE_TYPE_SDRAM, (dev->dev_neterr_inputq >> PTE_PADDR_SHIFT) | PTE_SetPerm(PERM_RemoteAll)); -+ elan4mmu_pteload (&dev->dev_ctxt, 0, DEVICE_NETERR_INTCQ_ADDR, HE_TYPE_SDRAM, (intqaddr >> PTE_PADDR_SHIFT) | PTE_SetPerm(PERM_LocDataWrite) | PTE_CommandQueue); -+ elan4mmu_pteload (&dev->dev_ctxt, 0, DEVICE_NETERR_SLOTS_ADDR, HE_TYPE_SDRAM, (dev->dev_neterr_slots >> PTE_PADDR_SHIFT) | PTE_SetPerm(PERM_DataReadWrite)); -+ -+ /* finally attach to the neterr context */ -+ if (elan4_attach_filter (&dev->dev_ctxt, ELAN4_NETERR_CONTEXT_NUM) != 0) -+ panic ("elan4_neterr_init: failed to attach to neterr context\n"); -+ -+ /* and drop the context filter */ -+ elan4_set_filter (&dev->dev_ctxt, ELAN4_NETERR_CONTEXT_NUM, E4_FILTER_HIGH_PRI); -+ -+ return 1; -+} -+ -+void -+elan4_neterr_destroy (ELAN4_DEV *dev) -+{ -+ if (dev->dev_neterr_intcq) -+ { -+ elan4_detach_filter (&dev->dev_ctxt, ELAN4_NETERR_CONTEXT_NUM); -+ -+ elan4mmu_unload_range (&dev->dev_ctxt, 0, DEVICE_NETERR_SLOTS_ADDR, 1 << dev->dev_pageshift[0]); -+ elan4mmu_unload_range (&dev->dev_ctxt, 0, DEVICE_NETERR_INTCQ_ADDR, 1 << dev->dev_pageshift[0]); -+ elan4mmu_unload_range (&dev->dev_ctxt, 0, DEVICE_NETERR_INPUTQ_ADDR, 1 << dev->dev_pageshift[0]); -+ -+ spin_lock_destroy (&dev->dev_neterr_lock); -+ } -+ -+ if (dev->dev_neterr_intcq) -+ elan4_freecq (&dev->dev_ctxt, dev->dev_neterr_intcq); -+ dev->dev_neterr_intcq = NULL; -+ -+ if (dev->dev_neterr_msgcq) -+ elan4_freecq (&dev->dev_ctxt, dev->dev_neterr_msgcq); -+ dev->dev_neterr_msgcq = NULL; -+ -+ if (dev->dev_neterr_slots) -+ elan4_sdram_free (dev, dev->dev_neterr_slots, roundup (NETERR_NSLOTS * ELAN4_NETERR_MSG_SIZE, SDRAM_PAGE_SIZE)); -+ dev->dev_neterr_slots = 0; -+ -+ if (dev->dev_neterr_inputq) -+ elan4_sdram_free (dev, dev->dev_neterr_inputq, SDRAM_PAGE_SIZE); -+ dev->dev_neterr_inputq = 0; -+} -+ -+int -+elan4_neterr_sendmsg (ELAN4_DEV *dev, unsigned int nodeid, unsigned int retries, ELAN4_NETERR_MSG *msg) -+{ -+ ELAN4_CQ *cq = dev->dev_neterr_msgcq; -+ E4_uint64 sent; -+ E4_VirtualProcessEntry route; -+ unsigned int vp; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&dev->dev_neterr_lock, flags); -+ -+ sent = elan4_sdram_readq (dev, dev->dev_neterr_inputq + offsetof (NETERR_INPUTQ, sent)); -+ -+ PRINTF (DBG_DEVICE, DBG_NETERR, "elan4_neterr_sendmsg: nodeid=%d retries=%d cookie=%llx sender=%d,%d%s\n", -+ nodeid, retries, msg->msg_cookies[0], msg->msg_sender.loc_node, msg->msg_sender.loc_context, -+ (dev->dev_neterr_queued - sent) >= NETERR_CQ_MSGS ? " - no cq space" : ""); -+ -+ if ((dev->dev_neterr_queued - sent) >= NETERR_CQ_MSGS) -+ { -+ spin_unlock_irqrestore (&dev->dev_neterr_lock, flags); -+ return 0; -+ } -+ -+ vp = NETERR_VP_BASE + (dev->dev_neterr_queued % NETERR_VP_COUNT); -+ -+ if (elan4_generate_route (&dev->dev_position, &route, ELAN4_NETERR_CONTEXT_NUM, nodeid, nodeid, FIRST_SYSTEM_PACKET | FIRST_HIGH_PRI) < 0) -+ { -+ spin_unlock_irqrestore (&dev->dev_neterr_lock, flags); -+ return 0; -+ } -+ -+ elan4_write_route (dev, dev->dev_routetable, vp, &route); -+ -+ writeq ((GUARD_CMD | GUARD_CHANNEL(0) | GUARD_RESET(retries)), (void *)cq->cq_mapping); -+ writeq (NOP_CMD, (void *)cq->cq_mapping); -+ -+ writeq (OPEN_STEN_PKT_CMD | OPEN_PACKET (0, PACK_OK | RESTART_COUNT_ZERO, vp), (void *)cq->cq_mapping); -+ writeq (SEND_TRANS_CMD | (TR_INPUT_Q_GETINDEX << 16), (void *)cq->cq_mapping); -+ writeq ( DEVICE_NETERR_INPUTQ_ADDR + offsetof (NETERR_INPUTQ, inputq), (void *)cq->cq_mapping); -+ -+ writeq (SEND_TRANS_CMD | (TR_WRITE (64 >> 3, 0, TR_DATATYPE_DWORD) << 16), (void *)cq->cq_mapping); -+ writeq ( 0 /* address */, (void *)cq->cq_mapping); -+ writeq ( ((E4_uint64 *) msg)[0], (void *)cq->cq_mapping); -+ writeq ( ((E4_uint64 *) msg)[1], (void *)cq->cq_mapping); -+ writeq ( ((E4_uint64 *) msg)[2], (void *)cq->cq_mapping); -+ writeq ( ((E4_uint64 *) msg)[3], (void *)cq->cq_mapping); -+ writeq ( ((E4_uint64 *) msg)[4], (void *)cq->cq_mapping); -+ writeq ( ((E4_uint64 *) msg)[5], (void *)cq->cq_mapping); -+ writeq ( ((E4_uint64 *) msg)[6], (void *)cq->cq_mapping); -+ writeq ( ((E4_uint64 *) msg)[7], (void *)cq->cq_mapping); -+ -+ writeq (SEND_TRANS_CMD | (TR_INPUT_Q_COMMIT << 16), (void *)cq->cq_mapping); -+ writeq ( DEVICE_NETERR_INPUTQ_ADDR + offsetof (NETERR_INPUTQ, inputq), (void *)cq->cq_mapping); -+ writeq ( 0 /* cookie */, (void *)cq->cq_mapping); -+ -+ writeq (GUARD_CMD | GUARD_CHANNEL(0) | GUARD_RESET(NETERR_RETRIES), (void *)cq->cq_mapping); -+ writeq (WRITE_DWORD_CMD | (DEVICE_NETERR_INPUTQ_ADDR + offsetof (NETERR_INPUTQ, sent)), (void *)cq->cq_mapping); -+ writeq ( ++dev->dev_neterr_queued, (void *)cq->cq_mapping); -+ -+ pioflush_reg (dev); -+ -+ spin_unlock_irqrestore (&dev->dev_neterr_lock, flags); -+ -+ return 1; -+} -+ -+int -+elan4_neterr_iproc_trap (ELAN4_DEV *dev, ELAN4_IPROC_TRAP *trap) -+{ -+ E4_IprocTrapHeader *hdrp = &trap->tr_transactions[trap->tr_trappedTrans]; -+ unsigned long flags; -+ -+ switch (IPROC_TrapValue (hdrp->IProcStatusCntxAndTrType)) -+ { -+ case InputEopErrorOnWaitForEop: -+ case InputEopErrorTrap: -+ case InputCrcErrorAfterPAckOk: -+ return 1; -+ -+ case InputEventEngineTrapped: -+ printk ("elan%d: device_iproc_trap: InputEventEngineTrapped - Trans=%x TrAddr=%llx\n", -+ dev->dev_instance, (int)IPROC_TransactionType (hdrp->IProcStatusCntxAndTrType), (long long) hdrp->TrAddr); -+ -+ if ((IPROC_TransactionType (hdrp->IProcStatusCntxAndTrType) & TR_OPCODE_MASK) == (TR_INPUT_Q_COMMIT & TR_OPCODE_MASK) && -+ hdrp->TrAddr == DEVICE_NETERR_INPUTQ_ADDR + offsetof (NETERR_INPUTQ, inputq)) -+ { -+ spin_lock_irqsave (&dev->dev_neterr_lock, flags); -+ writeq ((DEVICE_NETERR_INPUTQ_ADDR + offsetof (NETERR_INPUTQ, qevent)) | SET_EVENT_CMD, (void *)(dev->dev_neterr_msgcq->cq_mapping)); -+ spin_unlock_irqrestore (&dev->dev_neterr_lock, flags); -+ return 1; -+ } -+ -+ default: -+ return 0; -+ } -+} -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/elan4/procfs_Linux.c linux-2.6.9/drivers/net/qsnet/elan4/procfs_Linux.c ---- clean/drivers/net/qsnet/elan4/procfs_Linux.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan4/procfs_Linux.c 2005-09-07 10:35:03.000000000 -0400 -@@ -0,0 +1,1426 @@ -+/* -+ * Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: procfs_Linux.c,v 1.43.2.6 2005/09/07 14:35:03 mike Exp $ $Name: QSNETMODULES-5-11-3_20050907 $" -+/* $Source: /cvs/master/quadrics/elan4mod/procfs_Linux.c,v $*/ -+ -+#include -+ -+#include -+#include -+#include -+ -+#include -+ -+#include -+#include -+#include -+#include -+#include -+/* -+ * -+ * procfs format for elan4: -+ * -+ * /proc/qsnet/elan4/config -+ * elan4_debug -+ * elan4_debug_toconsole -+ * elan4_debug_tobuffer -+ * elan4_debug_display_ctxt -+ * elan4_debug_ignore_ctxt -+ * elan4_debug_ignore_type -+ * elan4_debug_mmu -+ * elan4_mainint_punt_loops -+ * user_p2p_route_options -+ * user_bcast_route_options -+ * -+ * /proc/qsnet/elan4/deviceN -+ * stats -+ * position -+ * vpd -+ */ -+ -+struct proc_dir_entry *elan4_procfs_root; -+struct proc_dir_entry *elan4_config_root; -+ -+/* borrowed from fs/proc/proc_misc - helper for proc_read_int */ -+static int -+proc_calc_metrics(char *page, char **start, off_t off, int count, int *eof, int len) -+{ -+ if (len <= off+count) *eof = 1; -+ *start = page + off; -+ len -= off; -+ if (len>count) len = count; -+ if (len<0) len = 0; -+ return len; -+} -+ -+static int -+proc_read_devinfo (char *page, char **start, off_t off, -+ int count, int *eof, void *data) -+{ -+ ELAN4_DEV *dev = (ELAN4_DEV *) data; -+ int len = 0; -+ -+ if (! dev) -+ len = sprintf (page, "\n"); -+ else -+ { -+ len += sprintf (page + len, "dev_vendor_id 0x%x\n", dev->dev_devinfo.dev_vendor_id); -+ len += sprintf (page + len, "dev_device_id 0x%x\n", dev->dev_devinfo.dev_vendor_id); -+ len += sprintf (page + len, "dev_revision_id 0x%x\n", dev->dev_devinfo.dev_revision_id); -+ len += sprintf (page + len, "dev_instance 0x%x\n", dev->dev_devinfo.dev_instance); -+ len += sprintf (page + len, "dev_rail 0x%x\n", dev->dev_devinfo.dev_rail); -+ len += sprintf (page + len, "dev_driver_version 0x%x\n", dev->dev_devinfo.dev_driver_version); -+ len += sprintf (page + len, "dev_params_mask 0x%x\n", dev->dev_devinfo.dev_params_mask); -+ len += sprintf (page + len, "dev_params: \n"); -+ len += sprintf (page + len, " 0 - PciCmdQPadFlag 0x%x\n", dev->dev_devinfo.dev_params.values[0]); -+ len += sprintf (page + len, " 1 - EventCopyWinPt 0x%x\n", dev->dev_devinfo.dev_params.values[1]); -+ len += sprintf (page + len, " 2 - PciWriteCombining 0x%x\n", dev->dev_devinfo.dev_params.values[2]); -+ len += sprintf (page + len, " 3 - 0x%x\n", dev->dev_devinfo.dev_params.values[3]); -+ len += sprintf (page + len, " 4 - 0x%x\n", dev->dev_devinfo.dev_params.values[4]); -+ len += sprintf (page + len, " 5 - 0x%x\n", dev->dev_devinfo.dev_params.values[5]); -+ len += sprintf (page + len, " 6 - 0x%x\n", dev->dev_devinfo.dev_params.values[6]); -+ len += sprintf (page + len, " 7 - 0x%x\n", dev->dev_devinfo.dev_params.values[7]); -+ len += sprintf (page + len, " 8 - 0x%x\n", dev->dev_devinfo.dev_params.values[8]); -+ len += sprintf (page + len, " 9 - 0x%x\n", dev->dev_devinfo.dev_params.values[9]); -+ len += sprintf (page + len, " 10 - 0x%x\n", dev->dev_devinfo.dev_params.values[10]); -+ len += sprintf (page + len, " 11 - features 0x%x\n", dev->dev_devinfo.dev_params.values[11]); -+ len += sprintf (page + len, "dev_num_down_links_value 0x%x\n", dev->dev_devinfo.dev_num_down_links_value); -+ } -+ -+ return (qsnet_proc_calc_metrics (page, start, off, count, eof, len)); -+} -+ -+static int -+proc_read_position (char *page, char **start, off_t off, -+ int count, int *eof, void *data) -+{ -+ ELAN4_DEV *dev = (ELAN4_DEV *) data; -+ int len; -+ -+ if (dev->dev_position.pos_mode == ELAN_POS_UNKNOWN) -+ len = sprintf (page, "\n"); -+ else -+ len = sprintf (page, -+ "NodeId %d\n" -+ "NumLevels %d\n" -+ "NumNodes %d\n", -+ dev->dev_position.pos_nodeid, -+ dev->dev_position.pos_levels, -+ dev->dev_position.pos_nodes); -+ -+ return (qsnet_proc_calc_metrics (page, start, off, count, eof, len)); -+} -+ -+static int -+proc_write_position (struct file *file, const char *buf, unsigned long count, void *data) -+{ -+ ELAN4_DEV *dev = (ELAN4_DEV *) data; -+ unsigned nodeid = ELAN_INVALID_NODE; -+ unsigned numnodes = 0; -+ char *page, *p; -+ int res; -+ ELAN_POSITION pos; -+ -+ if (count == 0) -+ return (0); -+ -+ if (count >= PAGE_SIZE) -+ return (-EINVAL); -+ -+ if ((page = (char *) __get_free_page (GFP_KERNEL)) == NULL) -+ return (-ENOMEM); -+ -+ MOD_INC_USE_COUNT; -+ -+ if (copy_from_user (page, buf, count)) -+ res = -EFAULT; -+ else -+ { -+ page[count] = '\0'; -+ -+ if (page[count-1] == '\n') -+ page[count-1] = '\0'; -+ -+ if (! strcmp (page, "")) -+ { -+ pos.pos_mode = ELAN_POS_UNKNOWN; -+ pos.pos_nodeid = ELAN_INVALID_NODE; -+ pos.pos_nodes = 0; -+ pos.pos_levels = 0; -+ } -+ else -+ { -+ for (p = page; *p; ) -+ { -+ while (isspace (*p)) -+ p++; -+ -+ if (! strncmp (p, "NodeId=", strlen("NodeId="))) -+ nodeid = simple_strtoul (p + strlen ("NodeId="), NULL, 0); -+ if (! strncmp (p, "NumNodes=", strlen ("NumNodes="))) -+ numnodes = simple_strtoul (p + strlen ("NumNodes="), NULL, 0); -+ -+ while (*p && !isspace(*p)) -+ p++; -+ } -+ -+ if (elan4_compute_position (&pos, nodeid, numnodes, dev->dev_devinfo.dev_num_down_links_value) != 0) -+ printk ("elan%d: invalid values for NodeId=%d NumNodes=%d\n", dev->dev_instance, nodeid, numnodes); -+ else -+ { -+ printk ("elan%d: setting NodeId=%d NumNodes=%d NumLevels=%d\n", dev->dev_instance, pos.pos_nodeid, -+ pos.pos_nodes, pos.pos_levels); -+ -+ if (elan4_set_position (dev, &pos) < 0) -+ printk ("elan%d: failed to set device position\n", dev->dev_instance); -+ } -+ } -+ } -+ -+ MOD_DEC_USE_COUNT; -+ free_page ((unsigned long) page); -+ -+ return (count); -+} -+ -+static int -+proc_read_temp (char *page, char **start, off_t off, -+ int count, int *eof, void *data) -+{ -+ ELAN4_DEV *dev = (ELAN4_DEV *) data; -+ unsigned char values[2]; -+ int len; -+ -+ if (i2c_disable_auto_led_update (dev) < 0) -+ len = sprintf (page, ""); -+ else -+ { -+ if (i2c_read (dev, I2C_TEMP_ADDR, 2, values) < 0) -+ len = sprintf (page, ""); -+ else -+ len = sprintf (page, "%s%d%s\n", (values[0] & 0x80) ? "-" : "", -+ (values[0] & 0x80) ? -((signed char)values[0]) - 1 : values[0], -+ (values[1] & 0x80) ? ".5" : ".0"); -+ -+ i2c_enable_auto_led_update (dev); -+ } -+ -+ return (qsnet_proc_calc_metrics (page, start, off, count, eof, len)); -+} -+ -+static int -+proc_read_eccerr (char *page, char **start, off_t off, -+ int count, int *eof, void *data) -+{ -+ ELAN4_DEV *dev = (ELAN4_DEV *) data; -+ char errstr[200]; -+ register int i, len = 0; -+ -+ *page = '\0'; -+ -+ for (i = 0; i < sizeof (dev->dev_sdramerrs)/sizeof(dev->dev_sdramerrs[0]); i++) -+ if (dev->dev_sdramerrs[i].ErrorCount != 0) -+ len += sprintf (page + len, "%s occured %0d times\n", -+ elan4_sdramerr2str (dev, dev->dev_sdramerrs[i].EccStatus, dev->dev_sdramerrs[i].ConfigReg, errstr), -+ dev->dev_sdramerrs[i].ErrorCount); -+ -+ return (qsnet_proc_calc_metrics (page, start, off, count, eof, len)); -+} -+ -+static int -+proc_read_vpd (char *page, char **start, off_t off, -+ int count, int *eof, void *data) -+{ -+ ELAN4_DEV *dev = (ELAN4_DEV *) data; -+ int len; -+ -+ if ( elan4_read_vpd (dev, NULL, page) ) -+ len = sprintf (page, "no vpd tags found\n"); -+ else -+ len = strlen(page)+1; -+ -+ return (qsnet_proc_calc_metrics (page, start, off, count, eof, len)); -+} -+ -+static int -+proc_read_linkportkey (char *page, char **start, off_t off, -+ int count, int *eof, void *data) -+{ -+ ELAN4_DEV *dev = (ELAN4_DEV *) data; -+ int len; -+ -+ len = sprintf (page, "%llx\n", read_reg64 (dev, LinkPortLock)); -+ -+ return (qsnet_proc_calc_metrics (page, start, off, count, eof, len)); -+} -+ -+static int -+proc_write_linkportkey (struct file *file, const char *buf, unsigned long count, void *data) -+{ -+ ELAN4_DEV *dev = (ELAN4_DEV *) data; -+ int res = 0; -+ char tmpbuf[30]; -+ -+ if (count > sizeof (tmpbuf) - 1) -+ return -EINVAL; -+ -+ MOD_INC_USE_COUNT; -+ -+ if (copy_from_user (tmpbuf, buf, count)) -+ res = -EFAULT; -+ else -+ { -+ tmpbuf[count] = '\0'; -+ -+ write_reg64 (dev, LinkPortLock, simple_strtoull (tmpbuf, NULL, 16)); -+ } -+ -+ MOD_DEC_USE_COUNT; -+ -+ return (count); -+} -+ -+static int -+proc_read_stats_translations (char *page, char **start, off_t off, int count, int *eof, void *data) -+{ -+ ELAN4_TRANS_STATS *pr = (ELAN4_TRANS_STATS *)data; -+ int tbl = pr->tbl; -+ ELAN4_DEV *dev = list_entry(pr, ELAN4_DEV, trans_stats[tbl] ); -+ char *p = page; -+ -+ if (off) return (0); -+ -+ p += elan4mmu_display_bucket_mmuhash(dev, tbl, pr->buckets, ELAN4_TRANS_STATS_NUM_BUCKETS , p, count); -+ -+ return (proc_calc_metrics (page, start, off, count, eof, p - page)); -+} -+ -+static int -+proc_write_stats_translations (struct file *file, const char *buf, unsigned long count, void *data) -+{ -+ ELAN4_TRANS_STATS *pr = (ELAN4_TRANS_STATS *)data; -+ int b0, b1, b2, b3, b4, b5, b6; -+ -+ int res = 0; -+ char tmpbuf[30]; -+ -+ if (count > sizeof (tmpbuf) - 1) -+ return -EINVAL; -+ -+ MOD_INC_USE_COUNT; -+ -+ if (copy_from_user (tmpbuf, buf, count)) -+ res = -EFAULT; -+ else -+ { -+ tmpbuf[count] = '\0'; -+ sscanf(tmpbuf,"%d %d %d %d %d %d %d\n", &b0, &b1, &b2, &b3, &b4, &b5, &b6); -+ -+ pr->buckets[0] = b0; -+ pr->buckets[1] = b1; -+ pr->buckets[2] = b2; -+ pr->buckets[3] = b3; -+ pr->buckets[4] = b4; -+ pr->buckets[5] = b5; -+ pr->buckets[6] = b6; -+ pr->buckets[6] = 99999999; -+ -+ b0 = 1; -+ for(b1=0 ; b1 < ELAN4_TRANS_STATS_NUM_BUCKETS; b1++) { -+ if ( pr->buckets[b1] < b0) -+ pr->buckets[b1] = 99999999; -+ b0 = pr->buckets[b1]; -+ } -+ } -+ -+ MOD_DEC_USE_COUNT; -+ -+ return (count); -+} -+static int -+elan4_read_mmuhash_reduction_func (char *page, char **start, off_t off, int count, int *eof, void *data) -+{ -+ char *p = page; -+ -+ if (off) return (0); -+ -+ p += sprintf(p ,"elan4mmu hash reduction : %s\nend reductions %d\nmiddle reductions %d\nmiddle failed %d\n", -+ elan4_mmuhash_chain_reduction?"On":"Off", -+ elan4_mmuhash_chain_end_reduce, -+ elan4_mmuhash_chain_middle_reduce, -+ elan4_mmuhash_chain_middle_fail); -+ p += sprintf(p ,"shuffle attempts %d\nshuffle done %d\n", -+ elan4_mmuhash_shuffle_attempts, -+ elan4_mmuhash_shuffle_done -+ ); -+ -+ return (proc_calc_metrics (page, start, off, count, eof, p - page)); -+} -+ -+static int -+elan4_write_mmuhash_reduction_func (struct file *file, const char *buf, unsigned long count, void *data) -+{ -+ int res = 0; -+ char tmpbuf[30]; -+ -+ if (count > sizeof (tmpbuf) - 1) -+ return -EINVAL; -+ -+ MOD_INC_USE_COUNT; -+ -+ if (copy_from_user (tmpbuf, buf, count)) -+ res = -EFAULT; -+ else -+ { -+ if (tmpbuf[0] == '0') elan4_mmuhash_chain_reduction = 0; -+ if (tmpbuf[0] == '1') elan4_mmuhash_chain_reduction = 1; -+ -+ tmpbuf[count] = '\0'; -+ } -+ -+ MOD_DEC_USE_COUNT; -+ -+ return (count); -+} -+ -+typedef struct elan4_trans_private -+{ -+ ELAN4_DEV *pr_dev; -+ ELAN4_CTXT *pr_ctxt; -+ int pr_index; -+ int pr_tbl; -+ char *pr_page; -+ unsigned pr_off; -+ unsigned pr_len; -+ -+ int pr_changed; -+} ELAN4_TRANS_PRIVATE; -+ -+static int -+elan4_ctxt_trans_open (struct inode *inode, struct file *file) -+{ -+ ELAN4_CTXT_TRANS_INDEX *trans = (ELAN4_CTXT_TRANS_INDEX *)( PDE(inode)->data ); -+ ELAN4_TRANS_PRIVATE *pr; -+ -+ if ((pr = kmalloc (sizeof (ELAN4_TRANS_PRIVATE), GFP_KERNEL)) == NULL) -+ return (-ENOMEM); -+ -+ pr->pr_tbl = trans->tbl; -+ pr->pr_ctxt = list_entry(trans, ELAN4_CTXT, trans_index[trans->tbl] ); -+ -+ pr->pr_index = 0; -+ -+ pr->pr_len = 0; -+ pr->pr_off = 0; -+ pr->pr_changed = 1; -+ pr->pr_page = NULL; -+ -+ file->private_data = (void *) pr; -+ -+ MOD_INC_USE_COUNT; -+ return (0); -+} -+ -+static ssize_t -+elan4_ctxt_trans_read (struct file *file, char *buf, size_t count, loff_t *ppos) -+{ -+ ELAN4_TRANS_PRIVATE *pr = (ELAN4_TRANS_PRIVATE *) file->private_data; -+ ELAN4_CTXT *ctxt = pr->pr_ctxt; -+ ELAN4_DEV *dev = ctxt->ctxt_dev; -+ int error; -+ -+ if ( pr->pr_index >= dev->dev_hashsize[pr->pr_tbl] ) -+ return (0); -+ -+ if ((error = verify_area (VERIFY_WRITE, buf, count)) != 0) -+ return (error); -+ -+ if (pr->pr_page == NULL && (pr->pr_page = (char *) __get_free_page (GFP_KERNEL)) == NULL) -+ return (-ENOMEM); -+ -+ if (pr->pr_off >= pr->pr_len) -+ { -+ if (elan4mmu_display_ctxt_mmuhash(ctxt, pr->pr_tbl, &pr->pr_index, pr->pr_page, count)) -+ pr->pr_len = strlen (pr->pr_page); -+ else -+ pr->pr_len = 0; -+ -+ pr->pr_off = 0; -+ pr->pr_changed = 0; -+ pr->pr_index++; -+ } -+ -+ if (count >= (pr->pr_len - pr->pr_off)) -+ count = pr->pr_len - pr->pr_off; -+ -+ copy_to_user (buf, pr->pr_page + pr->pr_off, count); -+ -+ pr->pr_off += count; -+ *ppos += count; -+ -+ if (pr->pr_off >= pr->pr_len) -+ { -+ free_page ((unsigned long) pr->pr_page); -+ pr->pr_page = NULL; -+ } -+ -+ return (count); -+} -+ -+static int -+elan4_ctxt_trans_release (struct inode *inode, struct file *file) -+{ -+ ELAN4_TRANS_PRIVATE *pr = (ELAN4_TRANS_PRIVATE *) file->private_data; -+ -+ if (pr->pr_page) -+ free_page ((unsigned long) pr->pr_page); -+ kfree (pr); -+ -+ MOD_DEC_USE_COUNT; -+ return (0); -+} -+ -+static struct file_operations qsnet_ctxt_trans_fops = -+{ -+ open: elan4_ctxt_trans_open, -+ release: elan4_ctxt_trans_release, -+ read: elan4_ctxt_trans_read, -+}; -+ -+void -+proc_insertctxt(ELAN4_DEV *dev, ELAN4_CTXT *ctxt) -+{ -+ struct proc_dir_entry *p; -+ char name[32]; -+ int t; -+ -+ /* GNAT 7565: Need to hold kernel lock when adding/removing -+ * procfs entries outside the module init/fini paths -+ */ -+ lock_kernel(); -+ -+ if (dev->dev_osdep.ctxtdir) -+ { -+ sprintf(name,"%d", ctxt->ctxt_num); -+ if ((ctxt->procdir = proc_mkdir (name, dev->dev_osdep.ctxtdir)) != NULL) -+ { -+ for (t = 0; t < NUM_HASH_TABLES; t++) -+ { -+ sprintf(name , "translations_%d", t); -+ -+ ctxt->trans_index[t].tbl = t; -+ ctxt->trans_index[t].index = 0; -+ -+ if ((p = create_proc_entry (name, 0, ctxt->procdir)) != NULL) -+ { -+ p->proc_fops = &qsnet_ctxt_trans_fops; -+ p->data = & ctxt->trans_index[t]; -+ p->owner = THIS_MODULE; -+ } -+ } -+ } -+ } -+ -+ unlock_kernel(); -+} -+ -+void -+proc_removectxt(ELAN4_DEV *dev, ELAN4_CTXT *ctxt) -+{ -+ int t; -+ char name[32]; -+ -+ /* GNAT 7565: Need to hold kernel lock when adding/removing -+ * procfs entries outside the module init/fini paths -+ */ -+ lock_kernel(); -+ -+ if (dev->dev_osdep.ctxtdir && ctxt->procdir != NULL) -+ { -+ for (t = 0; t < NUM_HASH_TABLES; t++) -+ { -+ sprintf(name , "translations_%d", t); -+ remove_proc_entry (name, ctxt->procdir); -+ } -+ -+ sprintf(name,"%d", ctxt->ctxt_num); -+ remove_proc_entry (name, dev->dev_osdep.ctxtdir); -+ } -+ -+ unlock_kernel(); -+} -+ -+static struct device_info -+{ -+ char *name; -+ int (*read_func) (char *page, char **start, off_t off, int count, int *eof, void *data); -+ int (*write_func) (struct file *file, const char *buf, unsigned long count, void *data); -+ unsigned minrev; -+} device_info[] = { -+ {"devinfo", proc_read_devinfo, NULL, 0}, -+ {"position", proc_read_position, proc_write_position, 0}, -+ {"temp", proc_read_temp, NULL, 1}, -+ {"eccerr", proc_read_eccerr, NULL, 0}, -+ {"vpd", proc_read_vpd, NULL, 0}, -+ {"linkportkey", proc_read_linkportkey, proc_write_linkportkey, 0}, -+}; -+ -+static int -+proc_read_link_stats (char *page, char **start, off_t off, int count, int *eof, void *data) -+{ -+ ELAN4_DEV *dev = (ELAN4_DEV *) data; -+ char *p = page; -+ -+ p += sprintf (p, "%20s %ld\n", "link_errors", dev->dev_stats.s_link_errors); -+ p += sprintf (p, "%20s %ld\n", "lock_errors", dev->dev_stats.s_lock_errors); -+ p += sprintf (p, "%20s %ld\n", "deskew_errors", dev->dev_stats.s_deskew_errors); -+ p += sprintf (p, "%20s %ld\n", "phase_errors", dev->dev_stats.s_phase_errors); -+ -+ p += sprintf (p, "%20s %ld\n", "data_errors", dev->dev_stats.s_data_errors); -+ p += sprintf (p, "%20s %ld\n", "fifo_overflow0", dev->dev_stats.s_fifo_overflow0); -+ p += sprintf (p, "%20s %ld\n", "fifo_overflow1", dev->dev_stats.s_fifo_overflow1); -+ p += sprintf (p, "%20s %ld\n", "mod45changed", dev->dev_stats.s_mod45changed); -+ p += sprintf (p, "%20s %ld\n", "pack_not_seen", dev->dev_stats.s_pack_not_seen); -+ -+ p += sprintf (p, "%20s %ld\n", "linkport_keyfail", dev->dev_stats.s_linkport_keyfail); -+ p += sprintf (p, "%20s %ld\n", "eop_reset", dev->dev_stats.s_eop_reset); -+ p += sprintf (p, "%20s %ld\n", "bad_length", dev->dev_stats.s_bad_length); -+ p += sprintf (p, "%20s %ld\n", "crc_error", dev->dev_stats.s_crc_error); -+ p += sprintf (p, "%20s %ld\n", "crc_bad", dev->dev_stats.s_crc_bad); -+ -+ p += sprintf (p, "%20s %ld\n", "cproc_timeout", dev->dev_stats.s_cproc_timeout); -+ p += sprintf (p, "%20s %ld\n", "dproc_timeout", dev->dev_stats.s_dproc_timeout); -+ -+ return (proc_calc_metrics (page, start, off, count, eof, p - page)); -+} -+ -+static char * -+proc_sprintf_bucket_stat (char *p, char *name, unsigned long *stats, int *buckets) -+{ -+ int i; -+ -+ p += sprintf (p, "%20s ", name); -+ -+ for (i = 0; i < ELAN4_DEV_STATS_BUCKETS-1; i++) -+ p += sprintf (p, "%ld(<=%d) ", stats[i], buckets[i]); -+ p += sprintf (p, "%ld(>%d)\n", stats[i], buckets[i-1]); -+ -+ return p; -+} -+ -+static int -+proc_read_intr_stats (char *page, char **start, off_t off, int count, int *eof, void *data) -+{ -+ ELAN4_DEV *dev = (ELAN4_DEV *) data; -+ char *p = page; -+ -+ p += sprintf (p, "%20s %ld\n", "interrupts", dev->dev_stats.s_interrupts); -+ p += sprintf (p, "%20s %ld\n", "haltints", dev->dev_stats.s_haltints); -+ -+ p += sprintf (p, "%20s %ld\n", "mainint_punts", dev->dev_stats.s_mainint_punts); -+ p += sprintf (p, "%20s %ld\n", "mainint_rescheds", dev->dev_stats.s_mainint_rescheds); -+ -+ p = proc_sprintf_bucket_stat (p, "mainints", dev->dev_stats.s_mainints, MainIntBuckets); -+ -+ return (proc_calc_metrics (page, start, off, count, eof, p - page)); -+} -+ -+static int -+proc_read_trap_stats (char *page, char **start, off_t off, int count, int *eof, void *data) -+{ -+ ELAN4_DEV *dev = (ELAN4_DEV *) data; -+ char *p = page; -+ -+ p += sprintf (p, "%20s %ld\n", "cproc_traps", dev->dev_stats.s_cproc_traps); -+ p += sprintf (p, "%20s %ld\n", "dproc_traps", dev->dev_stats.s_dproc_traps); -+ p += sprintf (p, "%20s %ld\n", "eproc_traps", dev->dev_stats.s_eproc_traps); -+ p += sprintf (p, "%20s %ld\n", "iproc_traps", dev->dev_stats.s_iproc_traps); -+ p += sprintf (p, "%20s %ld\n", "tproc_traps", dev->dev_stats.s_tproc_traps); -+ -+ return (proc_calc_metrics (page, start, off, count, eof, p - page)); -+} -+ -+static int -+proc_read_cproc_trap_stats (char *page, char **start, off_t off, int count, int *eof, void *data) -+{ -+ ELAN4_DEV *dev = (ELAN4_DEV *) data; -+ char *p = page; -+ int i; -+ extern char *const CProcTrapNames[]; -+ -+ for (i = 0; i < sizeof (dev->dev_stats.s_cproc_trap_types)/sizeof(dev->dev_stats.s_cproc_trap_types[0]); i++) -+ p += sprintf (p, "%-40s %ld\n", CProcTrapNames[i], dev->dev_stats.s_cproc_trap_types[i]); -+ -+ return (proc_calc_metrics (page, start, off, count, eof, p - page)); -+} -+ -+static int -+proc_read_dproc_trap_stats (char *page, char **start, off_t off, int count, int *eof, void *data) -+{ -+ ELAN4_DEV *dev = (ELAN4_DEV *) data; -+ char *p = page; -+ int i; -+ extern char *const DProcTrapNames[]; -+ -+ for (i = 0; i < sizeof (dev->dev_stats.s_dproc_trap_types)/sizeof(dev->dev_stats.s_dproc_trap_types[0]); i++) -+ p += sprintf (p, "%-40s %ld\n", DProcTrapNames[i], dev->dev_stats.s_dproc_trap_types[i]); -+ -+ return (proc_calc_metrics (page, start, off, count, eof, p - page)); -+} -+ -+static int -+proc_read_eproc_trap_stats (char *page, char **start, off_t off, int count, int *eof, void *data) -+{ -+ ELAN4_DEV *dev = (ELAN4_DEV *) data; -+ char *p = page; -+ int i; -+ extern char *const EProcTrapNames[]; -+ -+ for (i = 0; i < sizeof (dev->dev_stats.s_eproc_trap_types)/sizeof(dev->dev_stats.s_eproc_trap_types[0]); i++) -+ p += sprintf (p, "%-40s %ld\n", EProcTrapNames[i], dev->dev_stats.s_eproc_trap_types[i]); -+ -+ return (proc_calc_metrics (page, start, off, count, eof, p - page)); -+} -+ -+static int -+proc_read_iproc_trap_stats (char *page, char **start, off_t off, int count, int *eof, void *data) -+{ -+ ELAN4_DEV *dev = (ELAN4_DEV *) data; -+ char *p = page; -+ int i; -+ extern char *const IProcTrapNames[]; -+ -+ for (i = 0; i < sizeof (dev->dev_stats.s_iproc_trap_types)/sizeof(dev->dev_stats.s_iproc_trap_types[0]); i++) -+ p += sprintf (p, "%-40s %ld\n", IProcTrapNames[i], dev->dev_stats.s_iproc_trap_types[i]); -+ -+ return (proc_calc_metrics (page, start, off, count, eof, p - page)); -+} -+ -+static int -+proc_read_tproc_trap_stats (char *page, char **start, off_t off, int count, int *eof, void *data) -+{ -+ ELAN4_DEV *dev = (ELAN4_DEV *) data; -+ char *p = page; -+ int i; -+ extern char *const TProcTrapNames[]; -+ -+ for (i = 0; i < sizeof (dev->dev_stats.s_tproc_trap_types)/sizeof(dev->dev_stats.s_tproc_trap_types[0]); i++) -+ p += sprintf (p, "%-40s %ld\n", TProcTrapNames[i], dev->dev_stats.s_tproc_trap_types[i]); -+ -+ return (proc_calc_metrics (page, start, off, count, eof, p - page)); -+} -+ -+static int -+proc_read_sdram_stats (char *page, char **start, off_t off, int count, int *eof, void *data) -+{ -+ ELAN4_DEV *dev = (ELAN4_DEV *) data; -+ char *p = page; -+ -+ p += sprintf (p, "%20s %ld\n", "correctable_errors", dev->dev_stats.s_correctable_errors); -+ p += sprintf (p, "%20s %ld\n", "multiple_errors", dev->dev_stats.s_multiple_errors); -+ p += sprintf (p, "%20s %ldK\n", "sdram_bytes_free", dev->dev_stats.s_sdram_bytes_free/1024); -+ -+ return (proc_calc_metrics (page, start, off, count, eof, p - page)); -+} -+ -+void -+elan4_ringbuf_store (ELAN4_ROUTE_RINGBUF *ringbuf, E4_VirtualProcessEntry *route, ELAN4_DEV *dev) -+{ -+ int newend; -+ -+ ASSERT (kmutex_is_locked (&dev->dev_lock)); -+ -+ memcpy(&ringbuf->routes[ringbuf->end], route, sizeof(E4_VirtualProcessEntry)); -+ newend = ringbuf->end + 1; -+ if (newend >= DEV_STASH_ROUTE_COUNT) -+ newend -= DEV_STASH_ROUTE_COUNT; -+ if (newend == ringbuf->start) -+ ringbuf->start += 1; -+ if (ringbuf->start >= DEV_STASH_ROUTE_COUNT) -+ ringbuf->start -= DEV_STASH_ROUTE_COUNT; -+ ringbuf->end = newend; -+} -+ -+static int -+proc_read_dproc_timeout_stats (char *page, char **start, off_t off, int count, int *eof, void *data) -+{ -+ ELAN4_DEV *dev = (ELAN4_DEV *) data; -+ char *p = page; -+ unsigned int *dproc_timeout; -+ -+ dproc_timeout = dev->dev_dproc_timeout; -+ -+ if (!dproc_timeout) -+ p += sprintf (p, "No stats available\n"); -+ else -+ { -+ int i; -+ -+ for (i=0; idev_position.pos_nodes; i++) -+ if (dproc_timeout[i] != 0) -+ p += sprintf (p, "Node %d: %u errors\n", i, dproc_timeout[i]); -+ } -+ -+ return (proc_calc_metrics (page, start, off, count, eof, p - page)); -+} -+ -+static int -+proc_read_dproc_timeout_routes (char *page, char **start, off_t off, int count, int *eof, void *data) -+{ -+ ELAN4_DEV *dev = (ELAN4_DEV *) data; -+ char *p = page; -+ ELAN4_ROUTE_RINGBUF *ringbuf; -+ char routestr[33]; -+ -+ ringbuf = &dev->dev_dproc_timeout_routes; -+ -+ if (!ringbuf) -+ p += sprintf (p, "No stats available\n"); -+ else -+ { -+ int start; -+ int end; -+ int i; -+ -+ memset(&routestr, 0, 33); -+ -+ kmutex_lock(&dev->dev_lock); -+ -+ start = ringbuf->start; -+ end = ringbuf->end; -+ -+ if (end < start) -+ end = DEV_STASH_ROUTE_COUNT; -+ -+ for (i=start; iroutes[i], routestr); -+ p += sprintf (p, "Route %llx %llx->%s\n", (long long)ringbuf->routes[i].Values[0], (long long)ringbuf->routes[i].Values[1], routestr); -+ } -+ -+ if (ringbuf->end < start) -+ { -+ start = 0; -+ end = ringbuf->end; -+ for (i=start; iroutes[i], routestr); -+ p += sprintf (p, "Route %llx %llx->%s\n", (long long)ringbuf->routes[i].Values[0], (long long)ringbuf->routes[i].Values[1], routestr); -+ } -+ } -+ -+ kmutex_unlock(&dev->dev_lock); -+ } -+ -+ return (proc_calc_metrics (page, start, off, count, eof, p - page)); -+} -+ -+ -+static int -+proc_read_cproc_timeout_stats (char *page, char **start, off_t off, int count, int *eof, void *data) -+{ -+ ELAN4_DEV *dev = (ELAN4_DEV *) data; -+ char *p = page; -+ unsigned int *cproc_timeout; -+ -+ cproc_timeout = dev->dev_cproc_timeout; -+ -+ if (!cproc_timeout) -+ p += sprintf (p, "No stats available\n"); -+ else -+ { -+ int i; -+ -+ for (i=0; idev_position.pos_nodes; i++) -+ if (cproc_timeout[i] != 0) -+ p += sprintf (p, "Node %d: %u errors\n", i, cproc_timeout[i]); -+ } -+ -+ return (proc_calc_metrics (page, start, off, count, eof, p - page)); -+} -+ -+static int -+proc_read_cproc_timeout_routes (char *page, char **start, off_t off, int count, int *eof, void *data) -+{ -+ ELAN4_DEV *dev = (ELAN4_DEV *) data; -+ char *p = page; -+ ELAN4_ROUTE_RINGBUF *ringbuf; -+ char routestr[33]; -+ -+ ringbuf = &dev->dev_cproc_timeout_routes; -+ -+ if (!ringbuf) -+ p += sprintf (p, "No stats available\n"); -+ else -+ { -+ int start; -+ int end; -+ int i; -+ -+ memset(&routestr, 0, 33); -+ -+ kmutex_lock(&dev->dev_lock); -+ -+ start = ringbuf->start; -+ end = ringbuf->end; -+ -+ if (end < start) -+ end = DEV_STASH_ROUTE_COUNT; -+ -+ for (i=start; iroutes[i], routestr); -+ p += sprintf (p, "Route %llx %llx->%s\n", (long long)ringbuf->routes[i].Values[0], (long long)ringbuf->routes[i].Values[1], routestr); -+ } -+ -+ if (ringbuf->end < start) -+ { -+ start = 0; -+ end = ringbuf->end; -+ for (i=start; iroutes[i], routestr); -+ p += sprintf (p, "Route %llx %llx->%s\n", (long long)ringbuf->routes[i].Values[0], (long long)ringbuf->routes[i].Values[1], routestr); -+ } -+ } -+ -+ kmutex_unlock(&dev->dev_lock); -+ } -+ -+ return (proc_calc_metrics (page, start, off, count, eof, p - page)); -+} -+ -+static int -+proc_read_traperr_stats (char *page, char **start, off_t off, int count, int *eof, void *data) -+{ -+ ELAN4_DEV *dev = (ELAN4_DEV *) data; -+ char *p = page; -+ unsigned int *ack_errors; -+ -+ ack_errors = dev->dev_ack_errors; -+ -+ if (!ack_errors) -+ p += sprintf (p, "No stats available\n"); -+ else -+ { -+ int i; -+ -+ for (i=0; idev_position.pos_nodes; i++) -+ if (ack_errors[i] != 0) -+ p += sprintf (p, "Node %d: %u errors\n", i, ack_errors[i]); -+ } -+ -+ return (proc_calc_metrics (page, start, off, count, eof, p - page)); -+} -+ -+static int -+proc_read_ackerror_routes (char *page, char **start, off_t off, int count, int *eof, void *data) -+{ -+ ELAN4_DEV *dev = (ELAN4_DEV *) data; -+ char *p = page; -+ ELAN4_ROUTE_RINGBUF *ringbuf; -+ char routestr[33]; -+ -+ ringbuf = &dev->dev_ack_error_routes; -+ -+ if (!ringbuf) -+ p += sprintf (p, "No stats available\n"); -+ else -+ { -+ int start; -+ int end; -+ int i; -+ -+ memset(&routestr, 0, 33); -+ -+ kmutex_lock(&dev->dev_lock); -+ -+ start = ringbuf->start; -+ end = ringbuf->end; -+ -+ if (end < start) -+ end = DEV_STASH_ROUTE_COUNT; -+ -+ for (i=start; iroutes[i], routestr); -+ p += sprintf (p, "Route %llx %llx->%s\n", (long long)ringbuf->routes[i].Values[0], (long long)ringbuf->routes[i].Values[1], routestr); -+ } -+ -+ if (ringbuf->end < start) -+ { -+ start = 0; -+ end = ringbuf->end; -+ for (i=start; iroutes[i], routestr); -+ p += sprintf (p, "Route %llx %llx->%s\n", (long long)ringbuf->routes[i].Values[0], (long long)ringbuf->routes[i].Values[1], routestr); -+ } -+ } -+ -+ kmutex_unlock(&dev->dev_lock); -+ } -+ -+ return (proc_calc_metrics (page, start, off, count, eof, p - page)); -+} -+ -+static struct stats_info -+{ -+ char *name; -+ int (*read_func) (char *page, char **start, off_t off, int count, int *eof, void *data); -+ int (*write_func) (struct file *file, const char *buf, unsigned long count, void *data); -+} stats_info[] = { -+ {"link", proc_read_link_stats, NULL}, -+ {"intr", proc_read_intr_stats, NULL}, -+ {"trap", proc_read_trap_stats, NULL}, -+ {"cproc", proc_read_cproc_trap_stats, NULL}, -+ {"dproc", proc_read_dproc_trap_stats, NULL}, -+ {"eproc", proc_read_eproc_trap_stats, NULL}, -+ {"iproc", proc_read_iproc_trap_stats, NULL}, -+ {"tproc", proc_read_tproc_trap_stats, NULL}, -+ {"sdram", proc_read_sdram_stats, NULL}, -+ {"trapdmaerr", proc_read_traperr_stats, NULL}, -+ {"dproctimeout", proc_read_dproc_timeout_stats, NULL}, -+ {"cproctimeout", proc_read_cproc_timeout_stats, NULL}, -+ {"dproctimeoutroutes", proc_read_dproc_timeout_routes, NULL}, -+ {"cproctimeoutroutes", proc_read_cproc_timeout_routes, NULL}, -+ {"ackerrroutes", proc_read_ackerror_routes, NULL}, -+}; -+ -+static int -+proc_read_sysconfig (char *page, char **start, off_t off, int count, int *eof, void *data) -+{ -+ ELAN4_DEV *dev = (ELAN4_DEV *) data; -+ E4_uint32 syscontrol = dev->dev_syscontrol; -+ int len = 0; -+ -+ *eof = 1; -+ if (off != 0) -+ return (0); -+ -+ if (syscontrol & CONT_EN_ALL_SETS) -+ len += sprintf (page + len, "%sEN_ALL_SETS", len == 0 ? "" : " "); -+ if (syscontrol & CONT_MMU_ENABLE) -+ len += sprintf (page + len, "%sMMU_ENABLE", len == 0 ? "" : " "); -+ if (syscontrol & CONT_CACHE_HASH_TABLE) -+ len += sprintf (page + len, "%sCACHE_HASH_TABLE", len == 0 ? "" : " "); -+ if (syscontrol & CONT_CACHE_CHAINS) -+ len += sprintf (page + len, "%sCACHE_CHAINS", len == 0 ? "" : " "); -+ if (syscontrol & CONT_CACHE_ROOT_CNTX) -+ len += sprintf (page + len, "%sCACHE_ROOT_CNTX", len == 0 ? "" : " "); -+ if (syscontrol & CONT_CACHE_STEN_ROUTES) -+ len += sprintf (page + len, "%sCACHE_STEN_ROUTES", len == 0 ? "" : " "); -+ if (syscontrol & CONT_CACHE_DMA_ROUTES) -+ len += sprintf (page + len, "%sCACHE_DMA_ROUTES", len == 0 ? "" : " "); -+ if (syscontrol & CONT_INHIBIT_MAX_CHAIN_ITEMS) -+ len += sprintf (page + len, "%sINHIBIT_MAX_CHAIN_ITEMS", len == 0 ? "" : " "); -+ -+ len += sprintf (page + len, "%sTABLE0_MASK_SIZE=%d", len == 0 ? "" : " ", (syscontrol >> CONT_TABLE0_MASK_SIZE_SHIFT) & PAGE_MASK_MASK); -+ len += sprintf (page + len, "%sTABLE0_PAGE_SIZE=%d", len == 0 ? "" : " ", (syscontrol >> CONT_TABLE0_PAGE_SIZE_SHIFT) & PAGE_SIZE_MASK); -+ len += sprintf (page + len, "%sTABLE1_MASK_SIZE=%d", len == 0 ? "" : " ", (syscontrol >> CONT_TABLE1_MASK_SIZE_SHIFT) & PAGE_MASK_MASK); -+ len += sprintf (page + len, "%sTABLE1_PAGE_SIZE=%d", len == 0 ? "" : " ", (syscontrol >> CONT_TABLE1_PAGE_SIZE_SHIFT) & PAGE_SIZE_MASK); -+ -+ if (syscontrol & CONT_2K_NOT_1K_DMA_PACKETS) -+ len += sprintf (page + len, "%s2K_NOT_1K_DMA_PACKETS", len == 0 ? "" : " "); -+ if (syscontrol & CONT_ALIGN_ALL_DMA_PACKETS) -+ len += sprintf (page + len, "%sALIGN_ALL_DMA_PACKETS", len == 0 ? "" : " "); -+ if (syscontrol & CONT_DIRECT_MAP_PCI_WRITES) -+ len += sprintf (page + len, "%sDIRECT_MAP_PCI_WRITES", len == 0 ? "" : " "); -+ -+ len += sprintf (page + len, "\n"); -+ -+ *start = page; -+ return (len); -+} -+ -+static int -+proc_write_sysconfig (struct file *file, const char *ubuffer, unsigned long count, void *data) -+{ -+ ELAN4_DEV *dev = (ELAN4_DEV *) data; -+ unsigned long page = __get_free_page (GFP_KERNEL); -+ char *buffer = (char *)page; -+ int add = 0; -+ int sub = 0; -+ -+ count = MIN (count, PAGE_SIZE - 1); -+ if (copy_from_user (buffer, ubuffer, count)) -+ { -+ free_page (page); -+ return (-EFAULT); -+ } -+ -+ buffer[count] = 0; /* terminate string */ -+ -+ while (*buffer != 0) -+ { -+ char *ptr; -+ char *end; -+ int ch; -+ int val; -+ int op; -+ -+ ch = *buffer; -+ if (ch == ' ' || ch == '\t' || ch == '\r' || ch == '\n') -+ { -+ buffer++; -+ continue; -+ } -+ -+ op = *buffer; -+ if (op == '+' || op == '-') -+ buffer++; -+ -+ for (end = buffer; *end != 0; end++) -+ if (*end == ' ' || *end == '\t' || -+ *end == '\r' || *end == '\n') -+ break; -+ -+ if (end == buffer) -+ break; -+ -+ ch = *end; -+ *end = 0; -+ -+ for (ptr = buffer; *ptr != 0; ptr++) -+ if ('a' <= *ptr && *ptr <= 'z') -+ *ptr = *ptr + 'A' - 'a'; -+ -+ if (!strcmp (buffer, "EN_ALL_SETS")) -+ val = CONT_EN_ALL_SETS; -+ if (!strcmp (buffer, "CACHE_HASH_TABLE")) -+ val = CONT_CACHE_HASH_TABLE; -+ else if (!strcmp (buffer, "CACHE_CHAINS")) -+ val = CONT_CACHE_CHAINS; -+ else if (!strcmp (buffer, "CACHE_ROOT_CNTX")) -+ val = CONT_CACHE_ROOT_CNTX; -+ else if (!strcmp (buffer, "CACHE_STEN_ROUTES")) -+ val = CONT_CACHE_STEN_ROUTES; -+ else if (!strcmp (buffer, "CACHE_DMA_ROUTES")) -+ val = CONT_CACHE_DMA_ROUTES; -+ else if (!strcmp (buffer, "2K_NOT_1K_DMA_PACKETS")) -+ val = CONT_2K_NOT_1K_DMA_PACKETS; -+ else if (!strcmp (buffer, "ALIGN_ALL_DMA_PACKETS")) -+ val = CONT_ALIGN_ALL_DMA_PACKETS; -+ else -+ val = 0; -+ -+ if (op == '+') -+ add |= val; -+ else if (op == '-') -+ sub |= val; -+ -+ *end = ch; -+ buffer = end; -+ } -+ -+ if ((add | sub) & CONT_EN_ALL_SETS) -+ elan4_sdram_flushcache (dev, 0, E4_CacheSize); -+ -+ CHANGE_SYSCONTROL (dev, add, sub); -+ -+ if ((add | sub) & CONT_EN_ALL_SETS) -+ elan4_sdram_flushcache (dev, 0, E4_CacheSize); -+ -+ free_page (page); -+ return (count); -+} -+ -+static struct config_info -+{ -+ char *name; -+ int (*read_func) (char *page, char **start, off_t off, int count, int *eof, void *data); -+ int (*write_func) (struct file *file, const char *buf, unsigned long count, void *data); -+} config_info[] = { -+ {"sysconfig", proc_read_sysconfig, proc_write_sysconfig}, -+}; -+ -+static int -+elan4_trans_open (struct inode *inode, struct file *file) -+{ -+ ELAN4_TRANS_INDEX *trans = (ELAN4_TRANS_INDEX *)( PDE(inode)->data ); -+ ELAN4_TRANS_PRIVATE *pr; -+ -+ if ((pr = kmalloc (sizeof (ELAN4_TRANS_PRIVATE), GFP_KERNEL)) == NULL) -+ return (-ENOMEM); -+ -+ pr->pr_tbl = trans->tbl; -+ pr->pr_dev = list_entry(trans, ELAN4_DEV, trans_index[trans->tbl] ); -+ pr->pr_index = 0; -+ -+ pr->pr_len = 0; -+ pr->pr_off = 0; -+ pr->pr_changed = 1; -+ pr->pr_page = NULL; -+ -+ file->private_data = (void *) pr; -+ -+ MOD_INC_USE_COUNT; -+ return (0); -+} -+ -+static ssize_t -+elan4_trans_read (struct file *file, char *buf, size_t count, loff_t *ppos) -+{ -+ ELAN4_TRANS_PRIVATE *pr = (ELAN4_TRANS_PRIVATE *) file->private_data; -+ ELAN4_DEV *dev = pr->pr_dev; -+ int error; -+ -+ if ( pr->pr_index >= dev->dev_hashsize[pr->pr_tbl] ) -+ return (0); -+ -+ if ((error = verify_area (VERIFY_WRITE, buf, count)) != 0) -+ return (error); -+ -+ if (pr->pr_page == NULL && (pr->pr_page = (char *) __get_free_page (GFP_KERNEL)) == NULL) -+ return (-ENOMEM); -+ -+ if (pr->pr_off >= pr->pr_len) -+ { -+ if (elan4mmu_display_mmuhash(dev, pr->pr_tbl, &pr->pr_index, pr->pr_page, count)) -+ pr->pr_len = strlen (pr->pr_page); -+ else -+ pr->pr_len = 0; -+ -+ pr->pr_off = 0; -+ pr->pr_changed = 0; -+ pr->pr_index++; -+ } -+ -+ if (count >= (pr->pr_len - pr->pr_off)) -+ count = pr->pr_len - pr->pr_off; -+ -+ copy_to_user (buf, pr->pr_page + pr->pr_off, count); -+ -+ pr->pr_off += count; -+ *ppos += count; -+ -+ if (pr->pr_off >= pr->pr_len) -+ { -+ free_page ((unsigned long) pr->pr_page); -+ pr->pr_page = NULL; -+ } -+ -+ return (count); -+} -+ -+static int -+elan4_trans_release (struct inode *inode, struct file *file) -+{ -+ ELAN4_TRANS_PRIVATE *pr = (ELAN4_TRANS_PRIVATE *) file->private_data; -+ -+ if (pr->pr_page) -+ free_page ((unsigned long) pr->pr_page); -+ kfree (pr); -+ -+ MOD_DEC_USE_COUNT; -+ return (0); -+} -+ -+static struct file_operations qsnet_trans_fops = -+{ -+ open: elan4_trans_open, -+ release: elan4_trans_release, -+ read: elan4_trans_read, -+}; -+ -+void -+elan4_procfs_device_init (ELAN4_DEV *dev) -+{ -+ struct proc_dir_entry *p; -+ char name[NAME_MAX]; -+ int i; -+ -+ sprintf (name, "device%d", dev->dev_instance); -+ dev->dev_osdep.procdir = proc_mkdir (name, elan4_procfs_root); -+ -+ for (i = 0; i < sizeof (device_info)/sizeof (device_info[0]); i++) -+ { -+ if (dev->dev_devinfo.dev_revision_id < device_info[i].minrev) -+ continue; -+ -+ if ((p = create_proc_entry (device_info[i].name, 0, dev->dev_osdep.procdir)) != NULL) -+ { -+ p->read_proc = device_info[i].read_func; -+ p->write_proc = device_info[i].write_func; -+ p->data = dev; -+ p->owner = THIS_MODULE; -+ } -+ } -+ -+ for(i = 0; i < NUM_HASH_TABLES; i++) { -+ sprintf (name, "translations_%d",i); -+ -+ dev->trans_index[i].tbl = i; -+ -+ if ((p = create_proc_entry (name, 0, dev->dev_osdep.procdir)) != NULL) -+ { -+ p->proc_fops = &qsnet_trans_fops; -+ p->data = & dev->trans_index[i]; -+ p->owner = THIS_MODULE; -+ } -+ } -+ -+ dev->dev_osdep.configdir = proc_mkdir ("config", dev->dev_osdep.procdir); -+ for (i = 0; i < sizeof (config_info)/sizeof (config_info[0]); i++) -+ { -+ if ((p = create_proc_entry (config_info[i].name, 0, dev->dev_osdep.configdir)) != NULL) -+ { -+ p->read_proc = config_info[i].read_func; -+ p->write_proc = config_info[i].write_func; -+ p->data = dev; -+ p->owner = THIS_MODULE; -+ } -+ } -+ -+ dev->dev_osdep.statsdir = proc_mkdir ("stats", dev->dev_osdep.procdir); -+ for (i = 0; i < sizeof (stats_info)/sizeof (stats_info[0]); i++) -+ { -+ if ((p = create_proc_entry (stats_info[i].name, 0, dev->dev_osdep.statsdir)) != NULL) -+ { -+ p->read_proc = stats_info[i].read_func; -+ p->write_proc = stats_info[i].write_func; -+ p->data = dev; -+ p->owner = THIS_MODULE; -+ } -+ } -+ for(i = 0; i < NUM_HASH_TABLES; i++) { -+ sprintf (name, "translations_%d",i); -+ -+ dev->trans_stats[i].tbl = i; -+ dev->trans_stats[i].buckets[0] = 1; -+ dev->trans_stats[i].buckets[1] = 5; -+ dev->trans_stats[i].buckets[2] = 10; -+ dev->trans_stats[i].buckets[3] = 50; -+ dev->trans_stats[i].buckets[4] = 100; -+ dev->trans_stats[i].buckets[5] = 200; -+ dev->trans_stats[i].buckets[6] = 99999999; -+ -+ if ((p = create_proc_entry (name, 0, dev->dev_osdep.statsdir)) != NULL) -+ { -+ p->read_proc = proc_read_stats_translations; -+ p->write_proc = proc_write_stats_translations; -+ p->data = & dev->trans_stats[i]; -+ p->owner = THIS_MODULE; -+ } -+ } -+ -+ dev->dev_osdep.ctxtdir = proc_mkdir ("ctxt", dev->dev_osdep.procdir); -+} -+ -+void -+elan4_procfs_device_fini (ELAN4_DEV *dev) -+{ -+ char name[NAME_MAX]; -+ int i; -+ -+ if (dev->dev_osdep.ctxtdir) -+ remove_proc_entry ("ctxt", dev->dev_osdep.procdir); -+ -+ for (i = 0; i < sizeof (stats_info)/sizeof (stats_info[0]); i++) -+ remove_proc_entry (stats_info[i].name, dev->dev_osdep.statsdir); -+ -+ for (i = 0; i < NUM_HASH_TABLES; i++) { -+ sprintf(name , "translations_%d", i); -+ remove_proc_entry (name, dev->dev_osdep.statsdir); -+ } -+ remove_proc_entry ("stats", dev->dev_osdep.procdir); -+ -+ for (i = 0; i < sizeof (config_info)/sizeof (config_info[0]); i++) -+ remove_proc_entry (config_info[i].name, dev->dev_osdep.configdir); -+ remove_proc_entry ("config", dev->dev_osdep.procdir); -+ -+ for (i = 0; i < sizeof (device_info)/sizeof (device_info[0]); i++) -+ { -+ if (dev->dev_devinfo.dev_revision_id < device_info[i].minrev) -+ continue; -+ -+ remove_proc_entry (device_info[i].name, dev->dev_osdep.procdir); -+ } -+ -+ for (i = 0; i < NUM_HASH_TABLES; i++) { -+ sprintf(name , "translations_%d", i); -+ remove_proc_entry (name, dev->dev_osdep.procdir); -+ } -+ -+ sprintf (name, "device%d", dev->dev_instance); -+ remove_proc_entry (name, elan4_procfs_root); -+} -+ -+void -+elan4_procfs_init(void) -+{ -+ struct proc_dir_entry *p; -+ -+ elan4_procfs_root = proc_mkdir("elan4", qsnet_procfs_root); -+ elan4_config_root = proc_mkdir("config", elan4_procfs_root); -+ -+ qsnet_proc_register_hex (elan4_config_root, "elan4_debug", &elan4_debug, 0); -+ qsnet_proc_register_hex (elan4_config_root, "elan4_debug_toconsole", &elan4_debug_toconsole, 0); -+ qsnet_proc_register_hex (elan4_config_root, "elan4_debug_tobuffer", &elan4_debug_tobuffer, 0); -+ qsnet_proc_register_int (elan4_config_root, "elan4_debug_mmu", &elan4_debug_mmu, 0); -+ qsnet_proc_register_int (elan4_config_root, "elan4_mainint_punt_loops", &elan4_mainint_punt_loops, 0); -+ qsnet_proc_register_hex (elan4_config_root, "user_p2p_route_options", &user_p2p_route_options, 0); -+ qsnet_proc_register_hex (elan4_config_root, "user_bcast_route_options", &user_bcast_route_options, 0); -+ qsnet_proc_register_int (elan4_config_root, "user_dproc_retry_count", &user_dproc_retry_count, 0); -+ qsnet_proc_register_int (elan4_config_root, "user_cproc_retry_count", &user_cproc_retry_count, 0); -+ qsnet_proc_register_int (elan4_config_root, "user_pagefault_enabled", &user_pagefault_enabled, 0); -+ qsnet_proc_register_int (elan4_config_root, "num_fault_save", &num_fault_save, 0); -+ qsnet_proc_register_int (elan4_config_root, "min_fault_pages", &min_fault_pages, 0); -+ qsnet_proc_register_int (elan4_config_root, "max_fault_pages", &max_fault_pages, 0); -+ qsnet_proc_register_int (elan4_config_root, "assfail_mode", &assfail_mode, 0); -+ -+ if ((p = create_proc_entry ("mmuhash_reduction", 0, elan4_config_root)) != NULL) -+ { -+ p->read_proc = elan4_read_mmuhash_reduction_func; -+ p->write_proc = elan4_write_mmuhash_reduction_func; -+ p->data = NULL; -+ p->owner = THIS_MODULE; -+ } -+ -+#if defined(IOPROC_PATCH_APPLIED) -+ qsnet_proc_register_int (elan4_config_root, "user_ioproc_enabled", &user_ioproc_enabled, 0); -+#endif -+} -+ -+void -+elan4_procfs_fini(void) -+{ -+#if defined(IOPROC_PATCH_APPLIED) -+ remove_proc_entry ("user_ioproc_enabled", elan4_config_root); -+#endif -+ -+ remove_proc_entry ("mmuhash_reduction", elan4_config_root); -+ -+ remove_proc_entry ("assfail_mode", elan4_config_root); -+ remove_proc_entry ("max_fault_pages", elan4_config_root); -+ remove_proc_entry ("min_fault_pages", elan4_config_root); -+ remove_proc_entry ("num_fault_save", elan4_config_root); -+ remove_proc_entry ("user_pagefault_enabled", elan4_config_root); -+ remove_proc_entry ("user_cproc_retry_count", elan4_config_root); -+ remove_proc_entry ("user_dproc_retry_count", elan4_config_root); -+ remove_proc_entry ("user_bcast_route_options", elan4_config_root); -+ remove_proc_entry ("user_p2p_route_options", elan4_config_root); -+ remove_proc_entry ("elan4_mainint_punt_loops", elan4_config_root); -+ remove_proc_entry ("elan4_debug_mmu", elan4_config_root); -+ remove_proc_entry ("elan4_debug_tobuffer", elan4_config_root); -+ remove_proc_entry ("elan4_debug_toconsole", elan4_config_root); -+ remove_proc_entry ("elan4_debug", elan4_config_root); -+ -+ remove_proc_entry ("config", elan4_procfs_root); -+ remove_proc_entry ("elan4", qsnet_procfs_root); -+} -+ -+EXPORT_SYMBOL(elan4_procfs_root); -+EXPORT_SYMBOL(elan4_config_root); -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/elan4/quadrics_version.h linux-2.6.9/drivers/net/qsnet/elan4/quadrics_version.h ---- clean/drivers/net/qsnet/elan4/quadrics_version.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan4/quadrics_version.h 2005-09-07 10:39:49.000000000 -0400 -@@ -0,0 +1 @@ -+#define QUADRICS_VERSION "5.11.3qsnet" -diff -urN clean/drivers/net/qsnet/elan4/regions.c linux-2.6.9/drivers/net/qsnet/elan4/regions.c ---- clean/drivers/net/qsnet/elan4/regions.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan4/regions.c 2004-10-21 11:31:12.000000000 -0400 -@@ -0,0 +1,609 @@ -+/* -+ * Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: regions.c,v 1.22 2004/10/21 15:31:12 david Exp $" -+/* $Source: /cvs/master/quadrics/elan4mod/regions.c,v $*/ -+ -+#include -+ -+#include -+#include -+#include -+ -+/*================================================================================*/ -+/* elan address region management */ -+USER_RGN * -+user_findrgn_elan (USER_CTXT *uctx, E4_Addr addr, int tail) -+{ -+ USER_RGN *rgn; -+ USER_RGN *hirgn; -+ USER_RGN *lorgn; -+ E4_Addr base; -+ E4_Addr lastaddr; -+ int forward; -+ -+ ASSERT (SPINLOCK_HELD (&uctx->uctx_rgnlock) || kmutex_is_locked (&uctx->uctx_rgnmutex)); -+ -+ if (uctx->uctx_ergns == NULL) -+ return (NULL); -+ -+ rgn = uctx->uctx_ergnlast; -+ if (rgn == NULL) -+ rgn = uctx->uctx_ergns; -+ -+ forward = 0; -+ if ((base = rgn->rgn_ebase) < addr) -+ { -+ if (addr <= (base + rgn->rgn_len - 1)) -+ return (rgn); /* ergnlast contained addr */ -+ -+ hirgn = uctx->uctx_etail; -+ -+ if ((lastaddr = (hirgn->rgn_ebase + hirgn->rgn_len - 1)) < addr) -+ return (tail ? hirgn : NULL); /* addr is out of range */ -+ -+ if ((addr - base) > (lastaddr - addr)) -+ rgn = hirgn; -+ else -+ { -+ rgn = rgn->rgn_enext; -+ forward++; -+ } -+ } -+ else -+ { -+ lorgn = uctx->uctx_ergns; -+ -+ if (lorgn->rgn_ebase > addr) -+ return (lorgn); /* lowest regions is higher than addr */ -+ if ((addr - lorgn->rgn_ebase) < (base - addr)) -+ { -+ rgn = lorgn; /* search forward from head */ -+ forward++; -+ } -+ } -+ if (forward) -+ { -+ while ((rgn->rgn_ebase + rgn->rgn_len - 1) < addr) -+ rgn = rgn->rgn_enext; -+ -+ if (rgn->rgn_ebase <= addr) -+ uctx->uctx_ergnlast = rgn; -+ return (rgn); -+ } -+ else -+ { -+ while (rgn->rgn_ebase > addr) -+ rgn = rgn->rgn_eprev; -+ -+ if ((rgn->rgn_ebase + rgn->rgn_len - 1) < addr) -+ return (rgn->rgn_enext); -+ else -+ { -+ uctx->uctx_ergnlast = rgn; -+ return (rgn); -+ } -+ } -+} -+ -+static int -+user_addrgn_elan (USER_CTXT *uctx, USER_RGN *nrgn) -+{ -+ USER_RGN *rgn = user_findrgn_elan (uctx, nrgn->rgn_ebase, 1); -+ E4_Addr nbase = nrgn->rgn_ebase; -+ E4_Addr ntop = nbase + nrgn->rgn_len - 1; -+ E4_Addr base; -+ -+ ASSERT (SPINLOCK_HELD (&uctx->uctx_rgnlock) && kmutex_is_locked (&uctx->uctx_rgnmutex)); -+ -+ if (rgn == NULL) -+ { -+ uctx->uctx_ergns = uctx->uctx_etail = nrgn; -+ nrgn->rgn_enext = nrgn->rgn_eprev = NULL; -+ } -+ else -+ { -+ base = rgn->rgn_ebase; -+ -+ if ((base + rgn->rgn_len - 1) < nbase) /* top of region below requested address */ -+ { /* so insert after region (and hence at end */ -+ nrgn->rgn_eprev = rgn; /* of list */ -+ nrgn->rgn_enext = NULL; -+ rgn->rgn_enext = uctx->uctx_etail = nrgn; -+ } -+ else -+ { -+ if (nbase >= base || ntop >= base) /* overlapping region */ -+ return (-1); -+ -+ nrgn->rgn_enext = rgn; /* insert before region */ -+ nrgn->rgn_eprev = rgn->rgn_eprev; -+ rgn->rgn_eprev = nrgn; -+ if (uctx->uctx_ergns == rgn) -+ uctx->uctx_ergns = nrgn; -+ else -+ nrgn->rgn_eprev->rgn_enext = nrgn; -+ } -+ } -+ uctx->uctx_ergnlast = nrgn; -+ -+ return (0); -+} -+ -+static USER_RGN * -+user_removergn_elan (USER_CTXT *uctx, USER_RGN *rgn) -+{ -+ ASSERT (SPINLOCK_HELD (&uctx->uctx_rgnlock) && kmutex_is_locked (&uctx->uctx_rgnmutex)); -+ -+ uctx->uctx_ergnlast = rgn->rgn_enext; -+ if (rgn == uctx->uctx_etail) -+ uctx->uctx_etail = rgn->rgn_eprev; -+ else -+ rgn->rgn_enext->rgn_eprev = rgn->rgn_eprev; -+ -+ if (rgn == uctx->uctx_ergns) -+ uctx->uctx_ergns = rgn->rgn_enext; -+ else -+ rgn->rgn_eprev->rgn_enext = rgn->rgn_enext; -+ -+ return (rgn); -+} -+ -+USER_RGN * -+user_rgnat_elan (USER_CTXT *uctx, E4_Addr addr) -+{ -+ USER_RGN *rgn = user_findrgn_elan (uctx, addr, 0); -+ -+ if (rgn != NULL && rgn->rgn_ebase <= addr && addr <= (rgn->rgn_ebase + rgn->rgn_len - 1)) -+ return (rgn); -+ -+ return (NULL); -+} -+ -+/* main address region management */ -+USER_RGN * -+user_findrgn_main (USER_CTXT *uctx, virtaddr_t addr, int tail) -+{ -+ USER_RGN *rgn; -+ USER_RGN *hirgn; -+ USER_RGN *lorgn; -+ virtaddr_t lastaddr; -+ virtaddr_t base; -+ int forward; -+ -+ ASSERT (SPINLOCK_HELD (&uctx->uctx_rgnlock) || kmutex_is_locked (&uctx->uctx_rgnmutex)); -+ -+ if (uctx->uctx_mrgns == NULL) -+ return (NULL); -+ -+ rgn = uctx->uctx_mrgnlast; -+ if (rgn == NULL) -+ rgn = uctx->uctx_mrgns; -+ -+ forward = 0; -+ if ((base = rgn->rgn_mbase) < addr) -+ { -+ if (addr <= (base + rgn->rgn_len - 1)) -+ return (rgn); /* ergnlast contained addr */ -+ -+ hirgn = uctx->uctx_mtail; -+ if ((lastaddr = hirgn->rgn_mbase + hirgn->rgn_len - 1) < addr) -+ return (tail ? hirgn : NULL); /* addr is out of range */ -+ -+ if ((addr - base) > (lastaddr - addr)) -+ rgn = hirgn; -+ else -+ { -+ rgn = rgn->rgn_mnext; -+ forward++; -+ } -+ } -+ else -+ { -+ lorgn = uctx->uctx_mrgns; -+ if (lorgn->rgn_mbase > addr) -+ return (lorgn); /* lowest regions is higher than addr */ -+ if ((addr - lorgn->rgn_mbase) < (base - addr)) -+ { -+ rgn = lorgn; /* search forward from head */ -+ forward++; -+ } -+ } -+ if (forward) -+ { -+ while ((rgn->rgn_mbase + rgn->rgn_len - 1) < addr) -+ rgn = rgn->rgn_mnext; -+ -+ if (rgn->rgn_mbase <= addr) -+ uctx->uctx_mrgnlast = rgn; -+ return (rgn); -+ } -+ else -+ { -+ while (rgn->rgn_mbase > addr) -+ rgn = rgn->rgn_mprev; -+ -+ if ((rgn->rgn_mbase + rgn->rgn_len - 1) < addr) -+ return (rgn->rgn_mnext); -+ else -+ { -+ uctx->uctx_mrgnlast = rgn; -+ return (rgn); -+ } -+ } -+} -+ -+static int -+user_addrgn_main (USER_CTXT *uctx, USER_RGN *nrgn) -+{ -+ USER_RGN *rgn = user_findrgn_main (uctx, nrgn->rgn_mbase, 1); -+ virtaddr_t nbase = nrgn->rgn_mbase; -+ virtaddr_t ntop = nbase + nrgn->rgn_len - 1; -+ virtaddr_t base; -+ -+ ASSERT (SPINLOCK_HELD (&uctx->uctx_rgnlock) && kmutex_is_locked (&uctx->uctx_rgnmutex)); -+ -+ if (rgn == NULL) -+ { -+ uctx->uctx_mrgns = uctx->uctx_mtail = nrgn; -+ nrgn->rgn_mnext = nrgn->rgn_mprev = NULL; -+ } -+ else -+ { -+ base = rgn->rgn_mbase; -+ -+ if ((base + rgn->rgn_len - 1) < nbase) /* top of region below requested address */ -+ { /* so insert after region (and hence at end */ -+ nrgn->rgn_mprev = rgn; /* of list */ -+ nrgn->rgn_mnext = NULL; -+ rgn->rgn_mnext = uctx->uctx_mtail = nrgn; -+ } -+ else -+ { -+ if (nbase >= base || ntop >= base) /* overlapping region */ -+ return (-1); -+ -+ nrgn->rgn_mnext = rgn; /* insert before region */ -+ nrgn->rgn_mprev = rgn->rgn_mprev; -+ rgn->rgn_mprev = nrgn; -+ if (uctx->uctx_mrgns == rgn) -+ uctx->uctx_mrgns = nrgn; -+ else -+ nrgn->rgn_mprev->rgn_mnext = nrgn; -+ } -+ } -+ uctx->uctx_mrgnlast = nrgn; -+ -+ return (0); -+} -+ -+static USER_RGN * -+user_removergn_main (USER_CTXT *uctx, USER_RGN *rgn) -+{ -+ ASSERT (SPINLOCK_HELD (&uctx->uctx_rgnlock) && kmutex_is_locked (&uctx->uctx_rgnmutex)); -+ -+ uctx->uctx_mrgnlast = rgn->rgn_mnext; -+ if (rgn == uctx->uctx_mtail) -+ uctx->uctx_mtail = rgn->rgn_mprev; -+ else -+ rgn->rgn_mnext->rgn_mprev = rgn->rgn_mprev; -+ -+ if (rgn == uctx->uctx_mrgns) -+ uctx->uctx_mrgns = rgn->rgn_mnext; -+ else -+ rgn->rgn_mprev->rgn_mnext = rgn->rgn_mnext; -+ -+ return (rgn); -+} -+ -+/* Remove whole region from both lists */ -+static void -+user_removergn (USER_CTXT *uctx, USER_RGN *rgn) -+{ -+ spin_lock (&uctx->uctx_rgnlock); -+ -+ elan4mmu_unload_range (&uctx->uctx_ctxt, 0 /* XXXX tbl */, rgn->rgn_ebase, rgn->rgn_len); -+ -+ user_removergn_elan (uctx, rgn); -+ user_removergn_main (uctx, rgn); -+ -+ spin_unlock (&uctx->uctx_rgnlock); -+ -+ KMEM_FREE (rgn, sizeof (USER_RGN)); -+} -+ -+/* Remove all allocated regions */ -+void -+user_freergns (USER_CTXT *uctx) -+{ -+ kmutex_lock (&uctx->uctx_rgnmutex); -+ -+ while (uctx->uctx_mrgns) -+ user_removergn(uctx, uctx->uctx_mrgns); -+ -+ kmutex_unlock (&uctx->uctx_rgnmutex); -+ -+ ASSERT (uctx->uctx_ergns == NULL); -+} -+ -+USER_RGN * -+user_rgnat_main (USER_CTXT *uctx, virtaddr_t addr) -+{ -+ USER_RGN *rgn = user_findrgn_main (uctx, addr, 0); -+ -+ if (rgn != NULL && rgn->rgn_mbase <= addr && addr <= (rgn->rgn_mbase + rgn->rgn_len - 1)) -+ return (rgn); -+ return (NULL); -+} -+ -+int -+user_setperm (USER_CTXT *uctx, virtaddr_t maddr, E4_Addr eaddr, unsigned long len, unsigned perm) -+{ -+ USER_RGN *nrgn; -+ -+ PRINTF4 (uctx, DBG_PERM, "user_setperm: user %lx elan %llx len %lx perm %x\n", maddr, (long long) eaddr, len, perm); -+ -+ if ((maddr & PAGEOFFSET) || (eaddr & PAGEOFFSET) || (len & PAGEOFFSET)) -+ { -+ PRINTF0 (uctx, DBG_PERM, "user_setperm: alignment failure\n"); -+ return (-EINVAL); -+ } -+ -+ if ((maddr + len - 1) <= maddr || (eaddr + len - 1) <= eaddr) -+ { -+ PRINTF0 (uctx, DBG_PERM, "user_setperm: range failure\n"); -+ return (-EINVAL); -+ } -+ -+ KMEM_ALLOC (nrgn, USER_RGN *, sizeof (USER_RGN), 1); -+ -+ if (nrgn == NULL) -+ return (-ENOMEM); -+ -+ nrgn->rgn_mbase = maddr; -+ nrgn->rgn_ebase = eaddr; -+ nrgn->rgn_len = len; -+ nrgn->rgn_perm = perm; -+ -+ kmutex_lock (&uctx->uctx_rgnmutex); -+ spin_lock (&uctx->uctx_rgnlock); -+ -+ if (user_addrgn_elan (uctx, nrgn) < 0) -+ { -+ PRINTF0 (uctx, DBG_PERM, "user_setperm: elan address exists\n"); -+ spin_unlock (&uctx->uctx_rgnlock); -+ kmutex_unlock (&uctx->uctx_rgnmutex); -+ -+ KMEM_FREE (nrgn, sizeof (USER_RGN)); -+ return (-EINVAL); -+ } -+ -+ if (user_addrgn_main (uctx, nrgn) < 0) -+ { -+ PRINTF0 (uctx, DBG_PERM, "user_setperm: main address exists\n"); -+ user_removergn_elan (uctx, nrgn); -+ -+ spin_unlock (&uctx->uctx_rgnlock); -+ kmutex_unlock (&uctx->uctx_rgnmutex); -+ -+ KMEM_FREE (nrgn, sizeof (USER_RGN)); -+ return (-EINVAL); -+ } -+ spin_unlock (&uctx->uctx_rgnlock); -+ -+ if ((perm & PERM_Preload)) -+ user_preload_main (uctx, maddr, len); -+ -+ kmutex_unlock (&uctx->uctx_rgnmutex); -+ -+ return (0); -+} -+ -+void -+user_clrperm (USER_CTXT *uctx, E4_Addr addr, unsigned long len) -+{ -+ E4_Addr raddr; -+ E4_Addr rtop; -+ USER_RGN *nrgn; -+ USER_RGN *rgn; -+ USER_RGN *rgn_next; -+ unsigned long ssize; -+ int res; -+ -+ PRINTF2 (uctx, DBG_PERM, "user_clrperm: elan %llx len %lx\n", addr, len); -+ -+ raddr = (addr & PAGEMASK); -+ rtop = ((addr + len - 1) & PAGEMASK) + (PAGESIZE-1); -+ -+ kmutex_lock (&uctx->uctx_rgnmutex); -+ -+ for (rgn = user_findrgn_elan (uctx, addr, 0); rgn != NULL; rgn = rgn_next) -+ { -+ if (rtop < rgn->rgn_ebase) /* rtop was in a gap */ -+ break; -+ -+ rgn_next = rgn->rgn_enext; /* Save next region pointer */ -+ -+ PRINTF (uctx, DBG_PERM, " elan %llx->%llx main %p->%p\n", -+ rgn->rgn_ebase, rgn->rgn_ebase + rgn->rgn_len-1, -+ rgn->rgn_mbase, rgn->rgn_mbase + rgn->rgn_len-1); -+ -+ if (raddr <= rgn->rgn_ebase && rtop >= (rgn->rgn_ebase + rgn->rgn_len - 1)) -+ { -+ /* whole region is cleared */ -+ -+ PRINTF (uctx, DBG_PERM, " whole region\n"); -+ PRINTF (uctx, DBG_PERM, " unload elan %llx->%llx\n", rgn->rgn_ebase, rgn->rgn_ebase + rgn->rgn_len-1); -+ user_removergn (uctx, rgn); -+ } -+ else if (raddr <= rgn->rgn_ebase) -+ { -+ /* clearing at beginning, so shrink size and increment base ptrs */ -+ ssize = rtop - rgn->rgn_ebase + 1; -+ -+ PRINTF (uctx, DBG_PERM, " clear at beginning %x\n", ssize); -+ -+ spin_lock (&uctx->uctx_rgnlock); -+ -+ PRINTF (uctx, DBG_PERM, " unload elan %llx->%llx\n", rgn->rgn_ebase, rgn->rgn_ebase + ssize-1); -+ elan4mmu_unload_range (&uctx->uctx_ctxt, 0 /* XXXX tbl */, rgn->rgn_ebase, ssize); -+ -+ rgn->rgn_mbase += ssize; -+ rgn->rgn_ebase += ssize; -+ rgn->rgn_len -= ssize; -+ -+ spin_unlock(&uctx->uctx_rgnlock); -+ } -+ else if (rtop >= (rgn->rgn_ebase + rgn->rgn_len - 1)) -+ { -+ /* clearing at end, so just shrink length of region */ -+ ssize = (rgn->rgn_ebase + rgn->rgn_len - 1) - raddr + 1; -+ -+ PRINTF (uctx, DBG_PERM, " clear at end %x\n", ssize); -+ -+ spin_lock (&uctx->uctx_rgnlock); -+ -+ PRINTF (uctx, DBG_PERM, " unload elan %llx->%llx\n", raddr, raddr+ssize-1); -+ elan4mmu_unload_range (&uctx->uctx_ctxt, 0 /* XXXX tbl */, raddr, ssize); -+ -+ rgn->rgn_len -= ssize; -+ -+ spin_unlock(&uctx->uctx_rgnlock); -+ } -+ else -+ { -+ /* the section to go is in the middle, so need to */ -+ /* split it into two regions */ -+ KMEM_ALLOC (nrgn, USER_RGN *, sizeof (USER_RGN), 1); -+ -+ spin_lock (&uctx->uctx_rgnlock); -+ -+ PRINTF (uctx, DBG_PERM, " unload elan %llx->%llx\n", raddr, rtop); -+ elan4mmu_unload_range (&uctx->uctx_ctxt, 0 /* XXXX tbl */, raddr, rtop - raddr + 1); -+ -+ nrgn->rgn_mbase = rgn->rgn_mbase + (rtop - rgn->rgn_ebase + 1); -+ nrgn->rgn_ebase = rtop + 1; -+ nrgn->rgn_len = (rgn->rgn_ebase + rgn->rgn_len - 1) - rtop; -+ nrgn->rgn_perm = rgn->rgn_perm; -+ -+ PRINTF (uctx, DBG_PERM, " new elan %llx->%llx main %p->%p\n", -+ nrgn->rgn_ebase, nrgn->rgn_ebase + nrgn->rgn_len-1, -+ nrgn->rgn_mbase, nrgn->rgn_mbase + nrgn->rgn_len-1); -+ -+ rgn->rgn_len = (raddr - rgn->rgn_ebase); /* shrink original region */ -+ -+ PRINTF (uctx, DBG_PERM, " old elan %llx->%llx main %p->%p\n", -+ rgn->rgn_ebase, rgn->rgn_ebase + rgn->rgn_len-1, -+ rgn->rgn_mbase, rgn->rgn_mbase + rgn->rgn_len-1); -+ -+ res = user_addrgn_elan (uctx, nrgn); /* insert new region */ -+ ASSERT (res == 0); /* which cannot fail */ -+ -+ res = user_addrgn_main (uctx, nrgn); -+ ASSERT (res == 0); -+ -+ spin_unlock(&uctx->uctx_rgnlock); -+ } -+ } -+ kmutex_unlock (&uctx->uctx_rgnmutex); -+} -+ -+int -+user_checkperm (USER_CTXT *uctx, E4_Addr raddr, unsigned long rsize, unsigned access) -+{ -+ USER_RGN *rgn; -+ -+ PRINTF3 (uctx, DBG_PERM, "user_checkperm: elan %lx len %lx access %x\n", raddr, rsize, access); -+ -+ if ((raddr + rsize - 1) < raddr) -+ return (-ENOMEM); -+ -+ kmutex_lock (&uctx->uctx_rgnmutex); -+ if ((rgn = user_rgnat_elan (uctx, raddr)) == (USER_RGN *) NULL) -+ { -+ kmutex_unlock (&uctx->uctx_rgnmutex); -+ return (-ENOMEM); -+ } -+ else -+ { -+ register int ssize; -+ -+ for (; rsize != 0; rsize -= ssize, raddr += ssize) -+ { -+ if (raddr > (rgn->rgn_ebase + rgn->rgn_len - 1)) -+ { -+ rgn = rgn->rgn_enext; -+ -+ if (rgn == NULL || raddr != rgn->rgn_ebase) -+ { -+ kmutex_unlock (&uctx->uctx_rgnmutex); -+ return (-ENOMEM); -+ } -+ } -+ if ((raddr + rsize - 1) > (rgn->rgn_ebase + rgn->rgn_len - 1)) -+ ssize = ((rgn->rgn_ebase + rgn->rgn_len - 1) - raddr) + 1; -+ else -+ ssize = rsize; -+ -+ PRINTF4 (uctx, DBG_PERM, "user_checkperm : rgn %lx -> %lx perm %x access %x\n", -+ rgn->rgn_ebase, rgn->rgn_ebase + (E4_Addr)rgn->rgn_len, rgn->rgn_perm, access); -+ -+ if (ELAN4_INCOMPAT_ACCESS (rgn->rgn_perm, access)) -+ { -+ kmutex_unlock (&uctx->uctx_rgnmutex); -+ return (-EACCES); -+ } -+ } -+ } -+ -+ kmutex_unlock (&uctx->uctx_rgnmutex); -+ -+ return (0); -+} -+ -+virtaddr_t -+user_elan2main (USER_CTXT *uctx, E4_Addr addr) -+{ -+ USER_RGN *rgn; -+ virtaddr_t raddr; -+ -+ spin_lock (&uctx->uctx_rgnlock); -+ -+ if ((rgn = user_rgnat_elan (uctx, addr)) == (USER_RGN *) NULL) -+ raddr = (virtaddr_t) 0; -+ else -+ raddr = rgn->rgn_mbase + (addr - rgn->rgn_ebase); -+ -+ spin_unlock (&uctx->uctx_rgnlock); -+ -+ return (raddr); -+} -+ -+E4_Addr -+user_main2elan (USER_CTXT *uctx, virtaddr_t addr) -+{ -+ USER_RGN *rgn; -+ E4_Addr raddr; -+ -+ spin_lock (&uctx->uctx_rgnlock); -+ -+ if ((rgn = user_rgnat_main (uctx, addr)) == (USER_RGN *) NULL) -+ raddr = (virtaddr_t) 0; -+ else -+ raddr = rgn->rgn_ebase + (addr - rgn->rgn_mbase); -+ -+ spin_unlock (&uctx->uctx_rgnlock); -+ -+ return (raddr); -+} -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/elan4/routetable.c linux-2.6.9/drivers/net/qsnet/elan4/routetable.c ---- clean/drivers/net/qsnet/elan4/routetable.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan4/routetable.c 2005-04-15 08:38:22.000000000 -0400 -@@ -0,0 +1,254 @@ -+/* -+ * Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: routetable.c,v 1.17 2005/04/15 12:38:22 mike Exp $" -+/* $Source: /cvs/master/quadrics/elan4mod/routetable.c,v $*/ -+ -+#include -+ -+#include -+#include -+#include -+ -+ELAN4_ROUTE_TABLE * -+elan4_alloc_routetable (ELAN4_DEV *dev, unsigned size) -+{ -+ ELAN4_ROUTE_TABLE *tbl; -+ -+ KMEM_ZALLOC (tbl, ELAN4_ROUTE_TABLE *, sizeof (ELAN4_ROUTE_TABLE), 1); -+ -+ if (tbl == (ELAN4_ROUTE_TABLE *) NULL) -+ return (NULL); -+ -+ tbl->tbl_size = (size & E4_VPT_SIZE_MASK); -+ tbl->tbl_entries = elan4_sdram_alloc (dev, (E4_VPT_MIN_ENTRIES << tbl->tbl_size) * sizeof (E4_VirtualProcessEntry)); -+ -+ if (tbl->tbl_entries == 0) -+ { -+ KMEM_FREE (tbl, sizeof (ELAN4_ROUTE_TABLE)); -+ return ((ELAN4_ROUTE_TABLE *) NULL); -+ } -+ -+ spin_lock_init (&tbl->tbl_lock); -+ -+ /* zero the route table */ -+ elan4_sdram_zeroq_sdram (dev, tbl->tbl_entries, (E4_VPT_MIN_ENTRIES << tbl->tbl_size) * sizeof (E4_VirtualProcessEntry)); -+ -+ return (tbl); -+} -+ -+void -+elan4_free_routetable (ELAN4_DEV *dev, ELAN4_ROUTE_TABLE *tbl) -+{ -+ elan4_sdram_free (dev, tbl->tbl_entries, (E4_VPT_MIN_ENTRIES << tbl->tbl_size) * sizeof (E4_VirtualProcessEntry)); -+ -+ spin_lock_destroy (&tbl->tbl_lock); -+ -+ KMEM_FREE (tbl, sizeof (ELAN4_ROUTE_TABLE)); -+} -+ -+void -+elan4_write_route (ELAN4_DEV *dev, ELAN4_ROUTE_TABLE *tbl, unsigned vp, E4_VirtualProcessEntry *entry) -+{ -+ ASSERT (vp < (E4_VPT_MIN_ENTRIES << tbl->tbl_size)); -+ -+ elan4_sdram_writeq (dev, tbl->tbl_entries + (vp * sizeof (E4_VirtualProcessEntry)) + offsetof (E4_VirtualProcessEntry, Values[1]), entry->Values[1]); -+ elan4_sdram_writeq (dev, tbl->tbl_entries + (vp * sizeof (E4_VirtualProcessEntry)) + offsetof (E4_VirtualProcessEntry, Values[0]), entry->Values[0]); -+ pioflush_sdram (dev); -+} -+ -+void -+elan4_read_route (ELAN4_DEV *dev, ELAN4_ROUTE_TABLE *tbl, unsigned vp, E4_VirtualProcessEntry *entry) -+{ -+ ASSERT (vp < (E4_VPT_MIN_ENTRIES << tbl->tbl_size)); -+ -+ entry->Values[0] = elan4_sdram_readq (dev, tbl->tbl_entries + (vp * sizeof (E4_VirtualProcessEntry)) + offsetof (E4_VirtualProcessEntry, Values[0])); -+ entry->Values[1] = elan4_sdram_readq (dev, tbl->tbl_entries + (vp * sizeof (E4_VirtualProcessEntry)) + offsetof (E4_VirtualProcessEntry, Values[1])); -+} -+ -+void -+elan4_invalidate_route (ELAN4_DEV *dev, ELAN4_ROUTE_TABLE *tbl, unsigned vp) -+{ -+ ASSERT (vp < (E4_VPT_MIN_ENTRIES << tbl->tbl_size)); -+ -+ elan4_sdram_writeq (dev, tbl->tbl_entries + (vp * sizeof (E4_VirtualProcessEntry)) + offsetof (E4_VirtualProcessEntry, Values[0]), 0); -+ elan4_sdram_writeq (dev, tbl->tbl_entries + (vp * sizeof (E4_VirtualProcessEntry)) + offsetof (E4_VirtualProcessEntry, Values[1]), 0); -+ pioflush_sdram (dev); -+} -+ -+static void -+pack_them_routes (E4_VirtualProcessEntry *entry, E4_uint16 first, E4_uint8 *packed, unsigned ctx) -+{ -+ E4_uint64 value0 = first; -+ E4_uint64 value1 = ROUTE_CTXT_VALUE(ctx); -+ E4_uint32 ThirdRouteBCastVal; -+ register int i; -+ -+ for (i = 0; i < (ROUTE_NUM_PACKED >> 1); i++) -+ { -+ value0 |= ((E4_uint64) packed[i]) << ((i << 2) + ROUTE_PACKED_OFFSET); -+ value1 |= ((E4_uint64) packed[i+(ROUTE_NUM_PACKED >> 1)]) << ((i << 2)); -+ } -+ -+ /* DMA fix for large broadcast route values that fall into the double issue of route value 3 bug. */ -+ /* NOTE - this is only required when the link is running in Mod45 mode, it could be automatically -+ * disabled when Mod44 is detected */ -+ -+ /* First seach for the alignment type. The bug is only sensitive to an odd bcast aligment on the 3rd word. */ -+ for (i=4;i<16;i++) -+ if (((value0 >> (i*4)) & 0xc) == 4) -+ i++; -+ -+ if (i == 17) -+ { -+ ThirdRouteBCastVal = value1 & 0xcccccccc; -+ if (((value1 & 0xfffff0000000ULL) == 0ULL) && (ThirdRouteBCastVal == 0x04444444)) -+ value1 |= 0x140000000ULL; -+ else if (((value1 & 0xfffffff00000ULL) == 0ULL) && (ThirdRouteBCastVal == 0x00044444)) -+ value1 |= 0x1400000ULL; -+ else if (((value1 & 0xfffffffff000ULL) == 0ULL) && (ThirdRouteBCastVal == 0x00000444)) -+ value1 |= 0x14000ULL; -+ else if (((value1 & 0xfffffffffff0ULL) == 0ULL) && (ThirdRouteBCastVal == 0x00000004)) -+ value1 |= 0x140ULL; -+ } -+ -+ entry->Values[0] = value0; -+ entry->Values[1] = value1; -+} -+ -+int -+elan4_generate_route (ELAN_POSITION *pos, E4_VirtualProcessEntry *route, unsigned ctx, unsigned lowid, unsigned highid, unsigned options) -+{ -+ unsigned int broadcast = (lowid != highid); -+ unsigned int noadaptive = 0; -+ int padbcast = 0; -+ E4_uint16 first; -+ int rb; -+ E4_uint8 packed[ROUTE_NUM_PACKED]; -+ int level, llink, hlink; -+ -+ /* sanity check on lowid highid */ -+ if (highid < lowid) return (-EINVAL); -+ if (lowid < 0) return (-EINVAL); -+ if (highid >= pos->pos_nodes) return (-EINVAL); -+ -+ regenerate_routes: -+ first = 0; -+ rb = 0; -+ -+ switch (pos->pos_mode) -+ { -+ case ELAN_POS_MODE_LOOPBACK: -+ if (lowid != highid || lowid != pos->pos_nodeid) -+ return (-EINVAL); -+ -+ route->Values[0] = FIRST_MYLINK; -+ route->Values[1] = ROUTE_CTXT_VALUE (ctx); -+ return (0); -+ -+ case ELAN_POS_MODE_BACKTOBACK: -+ if (lowid != highid || lowid == pos->pos_nodeid) -+ return (-EINVAL); -+ -+ route->Values[0] = FIRST_MYLINK; -+ route->Values[1] = ROUTE_CTXT_VALUE (ctx); -+ return (0); -+ -+ case ELAN_POS_MODE_SWITCHED: -+ { -+ unsigned char *arityp = &pos->pos_arity[pos->pos_levels - 1]; -+ unsigned int spanned = *arityp; -+ unsigned int broadcasting = 0; -+ -+ bzero (packed, sizeof (packed)); -+ -+ /* XXXX compute noadaptive ? */ -+ -+ for (level = 0; -+ level < pos->pos_levels && ! ((pos->pos_nodeid / spanned) == (lowid / spanned) && -+ (pos->pos_nodeid / spanned) == (highid / spanned)); -+ level++, spanned *= *(--arityp)) -+ { -+ if (first == 0) -+ first = (broadcast || noadaptive) ? FIRST_BCAST_TREE : FIRST_ADAPTIVE; -+ else if (broadcast && padbcast) -+ { -+ padbcast = 0; -+ packed[rb++] = PACKED_BCAST0(4, 4); -+ packed[rb++] = PACKED_BCAST1(4, 4); -+ } -+ else -+ packed[rb++] = (broadcast || noadaptive) ? PACKED_BCAST_TREE : PACKED_ADAPTIVE; -+ } -+ -+ while (level >= 0) -+ { -+ spanned /= *arityp; -+ -+ llink = (lowid / spanned) % *arityp; -+ hlink = (highid / spanned) % *arityp; -+ -+ if (llink != hlink || broadcasting) -+ { -+ broadcasting = 1; -+ -+ if (first == 0) -+ first = FIRST_BCAST (hlink, llink); -+ else -+ { -+ packed[rb++] = PACKED_BCAST0(hlink, llink); -+ -+ if ((rb % 4) == 0 && PACKED_BCAST1(hlink, llink) == 0) -+ { -+ padbcast = 1; -+ goto regenerate_routes; -+ } -+ -+ packed[rb++] = PACKED_BCAST1(hlink, llink); -+ } -+ } -+ else -+ { -+ if (first == 0) -+ first = FIRST_ROUTE(llink); -+ else -+ packed[rb++] = PACKED_ROUTE(llink); -+ } -+ -+ level--; -+ arityp++; -+ } -+ -+ pack_them_routes (route, first | (options & FIRST_OPTIONS_MASK), packed, ctx); -+ return (0); -+ } -+ } -+ -+ return (-EINVAL); -+} -+ -+int -+elan4_check_route (ELAN_POSITION *postiion, ELAN_LOCATION location, E4_VirtualProcessEntry *route, unsigned flags) -+{ -+ /* XXXX - TBD */ -+ return (0); -+} -+ -+EXPORT_SYMBOL(elan4_alloc_routetable); -+EXPORT_SYMBOL(elan4_free_routetable); -+EXPORT_SYMBOL(elan4_write_route); -+EXPORT_SYMBOL(elan4_read_route); -+EXPORT_SYMBOL(elan4_invalidate_route); -+EXPORT_SYMBOL(elan4_generate_route); -+EXPORT_SYMBOL(elan4_check_route); -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/elan4/sdram.c linux-2.6.9/drivers/net/qsnet/elan4/sdram.c ---- clean/drivers/net/qsnet/elan4/sdram.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan4/sdram.c 2005-07-20 07:35:36.000000000 -0400 -@@ -0,0 +1,1039 @@ -+/* -+ * Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: sdram.c,v 1.34.2.1 2005/07/20 11:35:36 mike Exp $" -+/* $Source: /cvs/master/quadrics/elan4mod/sdram.c,v $*/ -+ -+#include -+ -+#include -+#include -+ -+EXPORT_SYMBOL_GPL(elan4_sdram_readb); -+EXPORT_SYMBOL_GPL(elan4_sdram_readw); -+EXPORT_SYMBOL_GPL(elan4_sdram_readl); -+EXPORT_SYMBOL_GPL(elan4_sdram_readq); -+EXPORT_SYMBOL_GPL(elan4_sdram_writeb); -+EXPORT_SYMBOL_GPL(elan4_sdram_writew); -+EXPORT_SYMBOL_GPL(elan4_sdram_writel); -+EXPORT_SYMBOL_GPL(elan4_sdram_writeq); -+EXPORT_SYMBOL_GPL(elan4_sdram_zerob_sdram); -+EXPORT_SYMBOL_GPL(elan4_sdram_zerow_sdram); -+EXPORT_SYMBOL_GPL(elan4_sdram_zerol_sdram); -+EXPORT_SYMBOL_GPL(elan4_sdram_zeroq_sdram); -+EXPORT_SYMBOL_GPL(elan4_sdram_copyb_from_sdram); -+EXPORT_SYMBOL_GPL(elan4_sdram_copyw_from_sdram); -+EXPORT_SYMBOL_GPL(elan4_sdram_copyl_from_sdram); -+EXPORT_SYMBOL_GPL(elan4_sdram_copyq_from_sdram); -+EXPORT_SYMBOL_GPL(elan4_sdram_copyb_to_sdram); -+EXPORT_SYMBOL_GPL(elan4_sdram_copyw_to_sdram); -+EXPORT_SYMBOL_GPL(elan4_sdram_copyl_to_sdram); -+EXPORT_SYMBOL_GPL(elan4_sdram_copyq_to_sdram); -+EXPORT_SYMBOL_GPL(elan4_sdram_alloc); -+EXPORT_SYMBOL_GPL(elan4_sdram_free); -+EXPORT_SYMBOL_GPL(elan4_sdram_flushcache); -+ -+#define SDRAM_MIN_BANK_SIZE ((1 << 15) * 8) /* 256 Kbytes */ -+ -+static inline ELAN4_SDRAM_BANK * -+sdramaddr_to_bank (ELAN4_DEV *dev, sdramaddr_t saddr) -+{ -+ register int i; -+ -+ for (i = 0; i < dev->dev_sdram_numbanks; i++) -+ { -+ ELAN4_SDRAM_BANK *bank = &dev->dev_sdram_banks[i]; -+ -+ if (saddr >= bank->b_base && saddr < (bank->b_base + bank->b_size)) -+ return (bank); -+ } -+ printk ("sdramaddr_to_bank: sdram address %lx not in a sdram bank\n", saddr); -+ BUG(); -+ -+ return (NULL); /* NOTREACHED */ -+} -+ -+static inline int -+sdramaddr_to_bankoffset (ELAN4_DEV *dev, sdramaddr_t saddr) -+{ -+ return (saddr & (sdramaddr_to_bank (dev, saddr)->b_size-1)); -+} -+ -+static inline int -+sdramaddr_to_bit(ELAN4_DEV *dev, int indx, sdramaddr_t saddr) -+{ -+ return (sdramaddr_to_bankoffset(dev, saddr) >> (SDRAM_MIN_BLOCK_SHIFT+(indx))); -+} -+ -+static inline ioaddr_t -+sdramaddr_to_ioaddr (ELAN4_DEV *dev, sdramaddr_t saddr) -+{ -+ ELAN4_SDRAM_BANK *bank = sdramaddr_to_bank (dev, saddr); -+ -+ return (bank->b_ioaddr + (saddr - bank->b_base)); -+} -+ -+unsigned char -+elan4_sdram_readb (ELAN4_DEV *dev, sdramaddr_t off) -+{ -+ return (__elan4_readb (dev, sdramaddr_to_ioaddr(dev, off))); -+} -+ -+unsigned short -+elan4_sdram_readw (ELAN4_DEV *dev, sdramaddr_t off) -+{ -+ return (__elan4_readw (dev, sdramaddr_to_ioaddr(dev, off))); -+} -+ -+unsigned int -+elan4_sdram_readl (ELAN4_DEV *dev, sdramaddr_t off) -+{ -+ return (__elan4_readl (dev, sdramaddr_to_ioaddr(dev, off))); -+} -+ -+unsigned long long -+elan4_sdram_readq (ELAN4_DEV *dev, sdramaddr_t off) -+{ -+ return (__elan4_readq (dev, sdramaddr_to_ioaddr(dev, off))); -+} -+ -+void -+elan4_sdram_writeb (ELAN4_DEV *dev, sdramaddr_t off, unsigned char val) -+{ -+ writeb (val, (void *) sdramaddr_to_ioaddr(dev, off)); -+ -+ mb(); -+} -+ -+void -+elan4_sdram_writew (ELAN4_DEV *dev, sdramaddr_t off, unsigned short val) -+{ -+ writew (val, (void *) sdramaddr_to_ioaddr(dev, off)); -+ -+ mb(); -+} -+ -+void -+elan4_sdram_writel (ELAN4_DEV *dev, sdramaddr_t off, unsigned int val) -+{ -+ writel (val, (void *) (sdramaddr_to_ioaddr(dev, off))); -+ -+ mb(); -+} -+ -+void -+elan4_sdram_writeq (ELAN4_DEV *dev, sdramaddr_t off, unsigned long long val) -+{ -+ writeq (val, (void *) (sdramaddr_to_ioaddr(dev, off))); -+ -+ mb(); -+} -+ -+void -+elan4_sdram_zerob_sdram (ELAN4_DEV *dev, sdramaddr_t to, int nbytes) -+{ -+ ioaddr_t dest = sdramaddr_to_ioaddr (dev, to); -+ ioaddr_t lim = dest + nbytes; -+ -+ for (; dest < lim; dest += sizeof (u8)) -+ writeb (0, (void *) dest); -+} -+ -+void -+elan4_sdram_zerow_sdram (ELAN4_DEV *dev, sdramaddr_t to, int nbytes) -+{ -+ ioaddr_t dest = sdramaddr_to_ioaddr (dev, to); -+ ioaddr_t lim = dest + nbytes; -+ -+ for (; dest < lim; dest += sizeof (u8)) -+ writeb (0, (void *) dest); -+} -+ -+void -+elan4_sdram_zerol_sdram (ELAN4_DEV *dev, sdramaddr_t to, int nbytes) -+{ -+ ioaddr_t dest = sdramaddr_to_ioaddr (dev, to); -+ ioaddr_t lim = dest + nbytes; -+ -+ for (; dest < lim; dest += sizeof (u32)) -+ writel (0, (void *) dest); -+} -+ -+void -+elan4_sdram_zeroq_sdram (ELAN4_DEV *dev, sdramaddr_t to, int nbytes) -+{ -+ ioaddr_t dest = sdramaddr_to_ioaddr (dev, to); -+ ioaddr_t lim = dest + nbytes; -+ -+#ifdef CONFIG_MPSAS -+ if (sas_memset_dev (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM, to, 0, nbytes) == 0) -+ return; -+#endif -+ -+ for (; dest < lim; dest += sizeof (u64)) -+ writeq (0, (void *) dest); -+} -+ -+void -+elan4_sdram_copyb_from_sdram (ELAN4_DEV *dev, sdramaddr_t from, void *to, int nbytes) -+{ -+ ioaddr_t src = sdramaddr_to_ioaddr (dev, from); -+ u8 *dest = (u8 *) to; -+ ioaddr_t lim = src + nbytes; -+ -+ for (; src < lim; src += sizeof (u8)) -+ *dest++ = __elan4_readb (dev, src); -+} -+ -+void -+elan4_sdram_copyw_from_sdram (ELAN4_DEV *dev, sdramaddr_t from, void *to, int nbytes) -+{ -+ ioaddr_t src = sdramaddr_to_ioaddr (dev, from); -+ u16 *dest = (u16 *) to; -+ ioaddr_t lim = src + nbytes; -+ -+ for (; src < lim; src += sizeof (u16)) -+ *dest++ = __elan4_readw (dev, src); -+} -+ -+void -+elan4_sdram_copyl_from_sdram (ELAN4_DEV *dev, sdramaddr_t from, void *to, int nbytes) -+{ -+ ioaddr_t src = sdramaddr_to_ioaddr (dev, from); -+ u32 *dest = (u32 *) to; -+ ioaddr_t lim = src + nbytes; -+ -+ for (; src < lim; src += sizeof (u32)) -+ *dest++ = __elan4_readl (dev, src); -+} -+ -+void -+elan4_sdram_copyq_from_sdram (ELAN4_DEV *dev, sdramaddr_t from, void *to, int nbytes) -+{ -+ ioaddr_t src = sdramaddr_to_ioaddr (dev, from); -+ u64 *dest = (u64 *) to; -+ ioaddr_t lim = src + nbytes; -+ -+#ifdef CONFIG_MPSAS -+ if (sas_copyfrom_dev (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM, from, (unsigned long) to, nbytes) == 0) -+ return; -+#endif -+ -+ for (; src < lim; src += sizeof (u64)) -+ *dest++ = __elan4_readq (dev, src); -+} -+ -+void -+elan4_sdram_copyb_to_sdram (ELAN4_DEV *dev, void *from, sdramaddr_t to, int nbytes) -+{ -+ ioaddr_t dest = sdramaddr_to_ioaddr (dev, to); -+ u8 *src = (u8 *) from; -+ ioaddr_t lim = dest + nbytes; -+ -+ for (; dest < lim; dest += sizeof (u8)) -+ writeb (*src++, (void *) (dest)); -+ -+ mb(); -+} -+ -+void -+elan4_sdram_copyw_to_sdram (ELAN4_DEV *dev, void *from, sdramaddr_t to, int nbytes) -+{ -+ ioaddr_t dest = sdramaddr_to_ioaddr (dev, to); -+ u16 *src = (u16 *) from; -+ ioaddr_t lim = dest + nbytes; -+ -+ for (; dest < lim; dest += sizeof (u16)) -+ writew (*src++, (void *) (dest)); -+ -+ mb(); -+} -+ -+void -+elan4_sdram_copyl_to_sdram (ELAN4_DEV *dev, void *from, sdramaddr_t to, int nbytes) -+{ -+ ioaddr_t dest = sdramaddr_to_ioaddr (dev, to); -+ u32 *src = (u32 *) from; -+ ioaddr_t lim = dest + nbytes; -+ -+ for (; dest < lim; dest += sizeof (u16)) -+ writew (*src++, (void *) (dest)); -+ -+ mb(); -+} -+ -+void -+elan4_sdram_copyq_to_sdram (ELAN4_DEV *dev, void *from, sdramaddr_t to, int nbytes) -+{ -+ ioaddr_t dest = sdramaddr_to_ioaddr (dev, to); -+ u64 *src = (u64 *) from; -+ ioaddr_t lim = dest + nbytes; -+ -+#ifdef CONFIG_MPSAS -+ if (sas_copyto_dev (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM, to, (unsigned long) from, nbytes) == 0) -+ return; -+#endif -+ -+ for (; dest < lim; dest += sizeof (u64)) -+ writeq (*src++, (void *) (dest)); -+ -+ mb(); -+} -+ -+/* sdram buddy allocator */ -+typedef struct sdramblock -+{ -+ sdramaddr_t next; -+ sdramaddr_t prev; -+} sdramblock_t; -+ -+static inline sdramaddr_t -+read_next (ELAN4_DEV *dev, sdramaddr_t block) -+{ -+ return __elan4_readl (dev, sdramaddr_to_ioaddr (dev, block + offsetof (sdramblock_t, next))); -+} -+ -+static inline sdramaddr_t -+read_prev (ELAN4_DEV *dev, sdramaddr_t block) -+{ -+ return __elan4_readl (dev, sdramaddr_to_ioaddr (dev, block + offsetof (sdramblock_t, prev))); -+} -+ -+static inline void -+write_next (ELAN4_DEV *dev, sdramaddr_t block, sdramaddr_t val) -+{ -+ writel (val, (void *) (sdramaddr_to_ioaddr (dev, block + offsetof (sdramblock_t, next)))); -+} -+ -+static inline void -+write_prev (ELAN4_DEV *dev, sdramaddr_t block, sdramaddr_t val) -+{ -+ writel (val, (void *) (sdramaddr_to_ioaddr (dev, block + offsetof (sdramblock_t, prev)))); -+} -+ -+static inline void -+freelist_insert (ELAN4_DEV *dev, int idx, sdramaddr_t block) -+{ -+ sdramaddr_t next = dev->dev_sdram_freelists[(idx)]; -+ -+ /* -+ * block->prev = NULL; -+ * block->next = next; -+ * if (next != NULL) -+ * next->prev = block; -+ * freelist = block; -+ */ -+ write_prev (dev, block, (sdramaddr_t) 0); -+ write_next (dev, block, next); -+ if (next != (sdramaddr_t) 0) -+ write_prev (dev, next, block); -+ dev->dev_sdram_freelists[idx] = block; -+ -+ dev->dev_sdram_freecounts[idx]++; -+ dev->dev_stats.s_sdram_bytes_free += (SDRAM_MIN_BLOCK_SIZE << idx); -+ -+ mb(); -+} -+ -+static inline void -+freelist_remove (ELAN4_DEV *dev,int idx, sdramaddr_t block) -+{ -+ /* -+ * if (block->prev) -+ * block->prev->next = block->next; -+ * else -+ * dev->dev_sdram_freelists[idx] = block->next; -+ * if (block->next) -+ * block->next->prev = block->prev; -+ */ -+ sdramaddr_t blocknext = read_next (dev, block); -+ sdramaddr_t blockprev = read_prev (dev, block); -+ -+ if (blockprev) -+ write_next (dev, blockprev, blocknext); -+ else -+ dev->dev_sdram_freelists[idx] = blocknext; -+ if (blocknext) -+ write_prev (dev, blocknext, blockprev); -+ -+ dev->dev_sdram_freecounts[idx]--; -+ dev->dev_stats.s_sdram_bytes_free -= (SDRAM_MIN_BLOCK_SIZE << idx); -+ -+ mb(); -+} -+ -+static inline void -+freelist_removehead(ELAN4_DEV *dev, int idx, sdramaddr_t block) -+{ -+ sdramaddr_t blocknext = read_next (dev, block); -+ -+ if ((dev->dev_sdram_freelists[idx] = blocknext) != 0) -+ write_prev (dev, blocknext, 0); -+ -+ dev->dev_sdram_freecounts[idx]--; -+ dev->dev_stats.s_sdram_bytes_free -= (SDRAM_MIN_BLOCK_SIZE << idx); -+ -+ mb(); -+} -+ -+#ifdef DEBUG -+static int -+display_blocks (ELAN4_DEV *dev, int indx, char *string) -+{ -+ sdramaddr_t block; -+ int nbytes = 0; -+ -+ PRINTF (DBG_DEVICE, DBG_SDRAM, "%s - indx %d\n", string, indx); -+ for (block = dev->dev_sdram_freelists[indx]; block != (sdramaddr_t) 0; block = read_next (dev, block)) -+ { -+ PRINTF (DBG_DEVICE, DBG_SDRAM, " %x\n", block); -+ nbytes += (SDRAM_MIN_BLOCK_SIZE << indx); -+ } -+ -+ return (nbytes); -+} -+ -+void -+elan4_sdram_display (ELAN4_DEV *dev, char *string) -+{ -+ int indx; -+ int nbytes = 0; -+ -+ PRINTF (DBG_DEVICE, DBG_SDRAM, "elan4_sdram_display: dev=%p\n", dev); -+ for (indx = 0; indx < SDRAM_NUM_FREE_LISTS; indx++) -+ if (dev->dev_sdram_freelists[indx] != (sdramaddr_t) 0) -+ nbytes += display_blocks (dev, indx, string); -+ PRINTF (DBG_DEVICE, DBG_SDRAM, "\n%d bytes free - %d pages free\n", nbytes, nbytes/SDRAM_PAGE_SIZE); -+} -+ -+void -+elan4_sdram_verify (ELAN4_DEV *dev) -+{ -+ int indx, size, nbits, i, b; -+ sdramaddr_t block; -+ -+ for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; indx < SDRAM_NUM_FREE_LISTS; indx++, size <<= 1) -+ { -+ unsigned count = 0; -+ -+ for (block = dev->dev_sdram_freelists[indx]; block; block = read_next (dev, block), count++) -+ { -+ ELAN4_SDRAM_BANK *bank = sdramaddr_to_bank (dev, block); -+ unsigned off = sdramaddr_to_bankoffset (dev, block); -+ int bit = sdramaddr_to_bit (dev, indx, block); -+ -+ if ((block & (size-1)) != 0) -+ printk ("elan4_sdram_verify: block=%lx indx=%x - not aligned\n", block, indx); -+ -+ if (bank == NULL || off > bank->b_size) -+ printk ("elan4_sdram_verify: block=%lx indx=%x - outside bank\n", block, indx); -+ else if (BT_TEST (bank->b_bitmaps[indx], bit) == 0) -+ printk ("elan4_sdram_verify: block=%lx indx=%x - bit not set\n", block, indx); -+ else -+ { -+ for (i = indx-1, nbits = 2; i >= 0; i--, nbits <<= 1) -+ { -+ bit = sdramaddr_to_bit (dev, i, block); -+ -+ for (b = 0; b < nbits; b++) -+ if (BT_TEST(bank->b_bitmaps[i], bit + b)) -+ printk ("elan4_sdram_verify: block=%lx indx=%x - also free i=%d bit=%x\n", block, indx, i, bit+b); -+ } -+ } -+ } -+ -+ if (dev->dev_sdram_freecounts[indx] != count) -+ printk ("elan4_sdram_verify: indx=%x expected %d got %d\n", indx, dev->dev_sdram_freecounts[indx], count); -+ } -+} -+ -+#endif -+ -+static void -+free_block (ELAN4_DEV *dev, sdramaddr_t block, int indx) -+{ -+ ELAN4_SDRAM_BANK *bank = sdramaddr_to_bank (dev, block); -+ unsigned bit = sdramaddr_to_bit (dev, indx, block); -+ unsigned size = SDRAM_MIN_BLOCK_SIZE << indx; -+ -+ PRINTF3 (DBG_DEVICE, DBG_SDRAM, "free_block: block=%x indx=%d bit=%x\n", block, indx, bit); -+ -+ ASSERT ((block & (size-1)) == 0); -+ ASSERT (BT_TEST (bank->b_bitmaps[indx], bit) == 0); -+ -+ while (BT_TEST (bank->b_bitmaps[indx], bit ^ 1)) -+ { -+ sdramaddr_t buddy = block ^ size; -+ -+ PRINTF3 (DBG_DEVICE, DBG_SDRAM, "free_block: merge block=%x buddy=%x indx=%d\n", block, buddy, indx); -+ -+ BT_CLEAR (bank->b_bitmaps[indx], bit ^ 1); -+ -+ freelist_remove (dev, indx, buddy); -+ -+ block = (block < buddy) ? block : buddy; -+ indx++; -+ size <<= 1; -+ bit >>= 1; -+ } -+ -+ PRINTF3 (DBG_DEVICE, DBG_SDRAM, "free_block: free block=%x indx=%d bit=%x\n", block, indx, bit); -+ -+ freelist_insert (dev, indx, block); -+ -+ BT_SET (bank->b_bitmaps[indx], bit); -+} -+ -+void -+elan4_sdram_init (ELAN4_DEV *dev) -+{ -+ int indx; -+ -+ spin_lock_init (&dev->dev_sdram_lock); -+ -+ for (indx = 0; indx < SDRAM_NUM_FREE_LISTS; indx++) -+ { -+ dev->dev_sdram_freelists[indx] = (sdramaddr_t) 0; -+ dev->dev_sdram_freecounts[indx] = 0; -+ } -+} -+ -+void -+elan4_sdram_fini (ELAN4_DEV *dev) -+{ -+ spin_lock_destroy (&dev->dev_sdram_lock); -+} -+ -+#ifdef CONFIG_MPSAS -+/* size of Elan SDRAM in simulation */ -+#define SDRAM_used_addr_bits (16) -+#define SDRAM_SIMULATION_BANK_SIZE ((1 << SDRAM_used_addr_bits) * 8) /* 128 kbytes */ -+ -+static int -+elan4_sdram_probe_bank (ELAN4_DEV *dev, ELAN4_SDRAM_BANK *bank) -+{ -+ printk ("elan%d: memory bank %d is %d Kb\n", dev->dev_instance, (int) (bank - dev->dev_sdram_banks), (int) (SDRAM_SIMULATION_BANK_SIZE / 1024)); -+ -+ bank->b_size = SDRAM_SIMULATION_BANK_SIZE; -+ -+ return 1; -+} -+ -+#else -+ -+static void -+initialise_cache_tags (ELAN4_DEV *dev, unsigned addr) -+{ -+ register int set, line; -+ -+ mb(); -+ -+ /* Initialise the whole cache to hold sdram at "addr" as direct mapped */ -+ -+ for (set = 0; set < E4_NumCacheSets; set++) -+ for (line = 0; line < E4_NumCacheLines; line++) -+ write_tag (dev, Tags[set][line], addr | (set << 13) | (1 << 11)); -+ -+ read_tag (dev, Tags[set][line]); /* read it back to guarantee the memory system is quite again */ -+ mb(); -+} -+ -+static __inline__ int -+sdram_GreyToBinary(int GreyVal, int NoOfBits) -+{ -+ int Bit; -+ int BinaryVal=0; -+ for (Bit=(1 << (NoOfBits-1)); Bit != 0; Bit >>= 1) -+ BinaryVal ^= (GreyVal & Bit) ^ ((BinaryVal >> 1) & Bit); -+ return (BinaryVal); -+} -+ -+static __inline__ int -+sdram_BinaryToGrey(int BinaryVal) -+{ -+ return (BinaryVal ^ (BinaryVal >> 1)); -+} -+ -+void -+elan4_sdram_setup_delay_lines (ELAN4_DEV *dev, int factor) -+{ -+ /* This is used to fix the SDRAM delay line values */ -+ int i, AutoGenDelayValue=0; -+ int NewDelayValue; -+ -+ if (dev->dev_sdram_cfg & SDRAM_FIXED_DELAY_ENABLE) /* already setup. */ -+ return; -+ -+ /* now get an average of 10 dll values */ -+ for (i=0;i<10;i++) -+ AutoGenDelayValue += sdram_GreyToBinary(SDRAM_GET_DLL_DELAY(read_reg64 (dev, SDRamConfigReg)), -+ SDRAM_FIXED_DLL_DELAY_BITS); -+ -+ NewDelayValue = factor + (AutoGenDelayValue / 10); /* Mean of 10 values */ -+ -+ dev->dev_sdram_cfg = (dev->dev_sdram_cfg & ~(SDRAM_FIXED_DLL_DELAY_MASK << SDRAM_FIXED_DLL_DELAY_SHIFT)) | -+ SDRAM_FIXED_DELAY_ENABLE | SDRAM_FIXED_DLL_DELAY(sdram_BinaryToGrey(NewDelayValue)); -+ -+ write_reg64 (dev, SDRamConfigReg, dev->dev_sdram_cfg); /* Put back the new value */ -+ -+ pioflush_reg (dev); -+} -+ -+static int -+elan4_sdram_probe_bank (ELAN4_DEV *dev, ELAN4_SDRAM_BANK *bank) -+{ -+ unsigned long mappedsize = bank->b_size; -+ ioaddr_t ioaddr; -+ unsigned long long value, size; -+ register int i; -+ extern int sdram_bank_limit; -+ -+ if (mappedsize > SDRAM_MAX_BLOCK_SIZE) -+ mappedsize = SDRAM_MAX_BLOCK_SIZE; -+ -+ while ((ioaddr = elan4_map_device (dev, ELAN4_BAR_SDRAM, bank->b_base, mappedsize, &bank->b_handle)) == 0) -+ { -+ if (mappedsize <= (64*1024*1024)) /* boards normally populated with 64mb, so winge if we can't see this much */ -+ printk ("elan%d: could not map bank %d size %dMb\n", dev->dev_instance, (int)(bank - dev->dev_sdram_banks), (int)mappedsize/(1024*1024)); -+ -+ if ((mappedsize >>= 1) < (1024*1024)) -+ return 0; -+ } -+ -+ /* first probe to see if the memory bank is present */ -+ if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA) -+ initialise_cache_tags (dev, E4_CacheSize); -+ -+ for (i = 0; i < 64; i++) -+ { -+ unsigned long long pattern = (1ull << i); -+ -+ writeq (pattern, (void *)ioaddr); /* write pattern at base */ -+ -+ if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA) -+ initialise_cache_tags (dev, 0); -+ -+ writeq (~pattern, (void *)(ioaddr + E4_CacheSize)); /* write ~pattern at cachesize */ -+ -+ if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA) -+ initialise_cache_tags (dev, E4_CacheSize); -+ -+ writeq (~pattern, (void *)(ioaddr + 2*E4_CacheSize)); /* write ~pattern at 2*cachesize */ -+ if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA) -+ initialise_cache_tags (dev, 2*E4_CacheSize); -+ -+ value = __elan4_readq (dev, ioaddr); /* read pattern back at 0 */ -+ -+ if (value != pattern) -+ { -+ printk ("elan%d: sdram bank %d not present\n", dev->dev_instance, (int) (bank - dev->dev_sdram_banks)); -+ elan4_unmap_device (dev, ioaddr, mappedsize, &bank->b_handle); -+ return 0; -+ } -+ } -+ -+ /* sdram bank is present, so work out it's size. We store the maximum size at the base -+ * and then store the address at each address on every power of two address until -+ * we reach the minimum mappable size (PAGESIZE), we then read back the value at the -+ * base to determine the bank size */ -+ writeq (mappedsize, (void *)(ioaddr)); -+ if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA) -+ initialise_cache_tags (dev, 0); -+ -+ for (size = mappedsize >> 1; size > PAGE_SIZE; size >>= 1) -+ { -+ writeq (size, (void *)(ioaddr + (long)size)); -+ if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA) -+ initialise_cache_tags (dev, size); -+ } -+ -+ if ((size = __elan4_readq (dev, ioaddr)) < SDRAM_MIN_BANK_SIZE) -+ { -+ printk ("elan%d: memory bank %d dubious\n", dev->dev_instance, (int) (bank - dev->dev_sdram_banks)); -+ elan4_unmap_device (dev, ioaddr, mappedsize, &bank->b_handle); -+ return 0; -+ } -+ -+ if (sdram_bank_limit == 0 || size <= (sdram_bank_limit * 1024 * 1024)) -+ printk ("elan%d: memory bank %d is %d Mb\n", dev->dev_instance, (int) (bank - dev->dev_sdram_banks), (int) (size / (1024*1024))); -+ else -+ { -+ size = (sdram_bank_limit * 1024 * 1024); -+ printk ("elan%d: limit bank %d to %d Mb\n", dev->dev_instance, (int) (bank - dev->dev_sdram_banks), (int) (size / (1024*1024))); -+ } -+ -+ bank->b_size = size; -+ -+ elan4_unmap_device (dev, ioaddr, mappedsize, &bank->b_handle); -+ return 1; -+} -+#endif -+ -+int -+elan4_sdram_init_bank (ELAN4_DEV *dev, ELAN4_SDRAM_BANK *bank) -+{ -+ int indx, size; -+ -+ bank->b_ioaddr = 0; -+ -+ if (! elan4_sdram_probe_bank (dev, bank)) -+ return 0; -+ -+ if ((bank->b_ioaddr = elan4_map_device (dev, ELAN4_BAR_SDRAM, bank->b_base, bank->b_size, &bank->b_handle)) == (ioaddr_t) 0) -+ { -+ printk ("elan%d: could not map sdrambank %d\n", dev->dev_instance, (int) (bank - dev->dev_sdram_banks)); -+ return 0; -+ } -+ -+ for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; size <= bank->b_size; indx++, size <<= 1) /* allocate the buddy allocator bitmaps */ -+ KMEM_ZALLOC (bank->b_bitmaps[indx], bitmap_t *, sizeof (bitmap_t) * BT_BITOUL(bank->b_size/size), 1); -+ -+ return 1; -+} -+ -+void -+elan4_sdram_fini_bank (ELAN4_DEV *dev, ELAN4_SDRAM_BANK *bank) -+{ -+ int indx, size; -+ -+ for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; size <= bank->b_size; indx++, size <<= 1) -+ KMEM_FREE (bank->b_bitmaps[indx], sizeof (bitmap_t) * BT_BITOUL(bank->b_size/size)); -+ -+ elan4_unmap_device (dev, bank->b_ioaddr, bank->b_size, &bank->b_handle); -+} -+ -+void -+elan4_sdram_add_bank (ELAN4_DEV *dev, ELAN4_SDRAM_BANK *bank) -+{ -+ sdramaddr_t base = bank->b_base; -+ sdramaddr_t top = bank->b_base + bank->b_size; -+ register int indx; -+ register unsigned long size; -+ -+ /* align to the minimum block size */ -+ base = (base + SDRAM_MIN_BLOCK_SIZE - 1) & ~((sdramaddr_t) SDRAM_MIN_BLOCK_SIZE-1); -+ top &= ~((sdramaddr_t) SDRAM_MIN_BLOCK_SIZE-1); -+ -+ /* don't allow 0 as a valid "base" */ -+ if (base == 0) -+ base = SDRAM_MIN_BLOCK_SIZE; -+ -+ /* carve the bottom to the biggest boundary */ -+ for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; indx < SDRAM_NUM_FREE_LISTS; indx++, size <<= 1) -+ { -+ if ((base & size) == 0) -+ continue; -+ -+ if ((base + size) > top) -+ break; -+ -+ free_block (dev, base, indx); -+ -+ base += size; -+ } -+ -+ /* carve the top down to the biggest boundary */ -+ for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; indx < SDRAM_NUM_FREE_LISTS; indx++, size <<= 1) -+ { -+ if ((top & size) == 0) -+ continue; -+ -+ if ((top - size) < base) -+ break; -+ -+ free_block (dev, (top - size), indx); -+ -+ top -= size; -+ } -+ -+ /* now free of the space in between */ -+ while (base < top) -+ { -+ free_block (dev, base, (SDRAM_NUM_FREE_LISTS-1)); -+ -+ base += SDRAM_MAX_BLOCK_SIZE; -+ } -+} -+ -+sdramaddr_t -+elan4_sdram_alloc (ELAN4_DEV *dev, int nbytes) -+{ -+ sdramaddr_t block; -+ register int i, indx; -+ unsigned long size; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&dev->dev_sdram_lock, flags); -+ -+ for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; size < nbytes; indx++, size <<= 1) -+ ; -+ -+ PRINTF2 (DBG_DEVICE, DBG_SDRAM, "elan4_sdram_alloc: nbytes=%d indx=%d\n", nbytes, indx); -+ -+ /* need to split a bigger block up */ -+ for (i = indx; i < SDRAM_NUM_FREE_LISTS; i++, size <<= 1) -+ if (dev->dev_sdram_freelists[i]) -+ break; -+ -+ if (i == SDRAM_NUM_FREE_LISTS) -+ { -+ spin_unlock_irqrestore (&dev->dev_sdram_lock, flags); -+ printk ("elan4_sdram_alloc: %d bytes failed\n", nbytes); -+ return ((sdramaddr_t) 0); -+ } -+ -+ PRINTF2 (DBG_DEVICE, DBG_SDRAM, "elan4_sdram_alloc: use block=%x indx=%d\n", dev->dev_sdram_freelists[i], i); -+ -+ /* remove the block from the free list */ -+ freelist_removehead (dev, i, (block = dev->dev_sdram_freelists[i])); -+ -+ /* clear the approriate bit in the bitmap */ -+ BT_CLEAR (sdramaddr_to_bank (dev, block)->b_bitmaps[i], sdramaddr_to_bit (dev,i, block)); -+ -+ /* and split it up as required */ -+ while (i-- > indx) -+ free_block (dev, block + (size >>= 1), i); -+ -+ spin_unlock_irqrestore (&dev->dev_sdram_lock, flags); -+ -+ ASSERT ((block & ((SDRAM_MIN_BLOCK_SIZE << (indx))-1)) == 0); -+ -+#ifdef CONFIG_MPSAS -+ elan4_sdram_zeroq_sdram (dev, block, sizeof (sdramblock_t)); -+#endif -+ -+ return ((sdramaddr_t) block); -+} -+ -+void -+elan4_sdram_free (ELAN4_DEV *dev, sdramaddr_t block, int nbytes) -+{ -+ register int indx; -+ unsigned long size; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&dev->dev_sdram_lock, flags); -+ -+ for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; size < nbytes; indx++, size <<= 1) -+ ; -+ -+ PRINTF2 (DBG_DEVICE, DBG_SDRAM, "elan4_sdram_free: indx=%d block=%x\n", indx, block); -+ -+ free_block (dev, block, indx); -+ -+ spin_unlock_irqrestore (&dev->dev_sdram_lock, flags); -+} -+ -+void -+elan4_sdram_flushcache (ELAN4_DEV *dev, sdramaddr_t addr, int len) -+{ -+ int set, off; -+ -+ SET_SYSCONTROL (dev, dev_direct_map_pci_writes, CONT_DIRECT_MAP_PCI_WRITES); -+ -+ /* -+ * if flushing more than a single set (8K), then you have to flush the whole cache. -+ * NOTE - in the real world we will probably want to generate a burst across -+ * the pci bus. -+ */ -+ if (len >= E4_CacheSetSize) -+ { -+ PRINTF3 (DBG_DEVICE, DBG_SDRAM, "elan4_sdram_flushcache: addr=%x len=%x (%x) => whole cache\n", addr, len, addr + len); -+ -+#ifdef CONFIG_MPSAS -+ elan4_sdram_zeroq_sdram (dev, dev->dev_cacheflush_space, E4_CacheSize); -+#else -+ for (set = 0; set < E4_NumCacheSets; set++) -+ for (off = 0; off < E4_CacheSetSize; off += E4_CacheLineSize) -+ elan4_sdram_writeq (dev, dev->dev_cacheflush_space + (set * E4_CacheSetSize) + off, 0); -+#endif -+ } -+ else -+ { -+ unsigned base = addr & ~(E4_CACHELINE_SIZE-1); -+ unsigned top = (addr + len + (E4_CACHELINE_SIZE-1)) & ~(E4_CACHELINE_SIZE-1); -+ unsigned baseoff = base & (E4_CacheSetSize-1); -+ unsigned topoff = top & (E4_CacheSetSize-1); -+ -+ if ((base ^ top) & E4_CacheSetSize) /* wraps */ -+ { -+ PRINTF7 (DBG_DEVICE, DBG_SDRAM, "elan4_sdram_flushcache: addr=%x len=%x (%x) => split cache (%x,%x %x,%x)\n", -+ addr, len, addr + len, 0, topoff, baseoff, E4_CacheSetSize); -+ -+#ifdef CONFIG_MPSAS -+ for (set = 0; set < E4_NumCacheSets; set++) -+ { -+ elan4_sdram_zeroq_sdram (dev, dev->dev_cacheflush_space + (set * E4_CacheSetSize), topoff); -+ elan4_sdram_zeroq_sdram (dev, dev->dev_cacheflush_space + (set * E4_CacheSetSize) + baseoff, E4_CacheSetSize - baseoff); -+ } -+#else -+ for (set = 0; set < E4_NumCacheSets; set++) -+ { -+ for (off = 0; off < (top & (E4_CacheSetSize-1)); off += E4_CACHELINE_SIZE) -+ elan4_sdram_writeq (dev, dev->dev_cacheflush_space + (set * E4_CacheSetSize) + off, 0); -+ -+ for (off = (base & (E4_CacheSetSize-1)); off < E4_CacheSetSize; off += E4_CACHELINE_SIZE) -+ elan4_sdram_writeq (dev, dev->dev_cacheflush_space + (set * E4_CacheSetSize) + off, 0); -+ } -+#endif -+ } -+ else -+ { -+ PRINTF5 (DBG_DEVICE, DBG_SDRAM, "elan4_sdram_flushcache: addr=%x len=%x (%x) => part cache (%x,%x)\n", -+ addr, len, addr + len, baseoff, topoff); -+ -+#ifdef CONFIG_MPSAS -+ for (set = 0; set < E4_NumCacheSets; set++) -+ elan4_sdram_zeroq_sdram (dev, dev->dev_cacheflush_space + (set * E4_CacheSetSize) + baseoff, topoff - baseoff); -+#else -+ for (set = 0; set < E4_NumCacheSets; set++) -+ for (off = (base & (E4_CacheSetSize-1)); off < (top & (E4_CacheSetSize-1)); off += E4_CACHELINE_SIZE) -+ elan4_sdram_writeq (dev, dev->dev_cacheflush_space + (set * E4_CacheSetSize) + off, 0); -+#endif -+ } -+ } -+ pioflush_sdram (dev); -+ -+ CLEAR_SYSCONTROL (dev, dev_direct_map_pci_writes, CONT_DIRECT_MAP_PCI_WRITES); -+} -+ -+static char * -+get_correctableErr_bitpos(uint SyndromeBits) -+{ -+ switch (SyndromeBits) -+ { -+ case 0x00: return ("NoErr"); -+ case 0x31: return ("00"); -+ case 0x32: return ("01"); -+ case 0xc4: return ("02"); -+ case 0xc8: return ("03"); -+ case 0x26: return ("04"); -+ case 0x91: return ("05"); -+ case 0x89: return ("06"); -+ case 0x64: return ("07"); -+ case 0xc1: return ("08"); -+ case 0xf2: return ("09"); -+ case 0x34: return ("10"); -+ case 0xf8: return ("11"); -+ case 0xf1: return ("12"); -+ case 0xc2: return ("13"); -+ case 0xf4: return ("14"); -+ case 0x38: return ("15"); -+ case 0xd6: return ("16"); -+ case 0xa1: return ("17"); -+ case 0x79: return ("18"); -+ case 0xa4: return ("19"); -+ case 0xd9: return ("20"); -+ case 0xa2: return ("21"); -+ case 0x76: return ("22"); -+ case 0xa8: return ("23"); -+ case 0xe6: return ("24"); -+ case 0x51: return ("25"); -+ case 0xb9: return ("26"); -+ case 0x54: return ("27"); -+ case 0xe9: return ("28"); -+ case 0x52: return ("29"); -+ case 0xb6: return ("30"); -+ case 0x58: return ("31"); -+ case 0x13: return ("32"); -+ case 0x23: return ("33"); -+ case 0x4c: return ("34"); -+ case 0x8c: return ("35"); -+ case 0x62: return ("36"); -+ case 0x19: return ("37"); -+ case 0x98: return ("38"); -+ case 0x46: return ("39"); -+ case 0x1c: return ("40"); -+ case 0x2f: return ("41"); -+ case 0x43: return ("42"); -+ case 0x8f: return ("43"); -+ case 0x1f: return ("44"); -+ case 0x2c: return ("45"); -+ case 0x4f: return ("46"); -+ case 0x83: return ("47"); -+ case 0x6d: return ("48"); -+ case 0x1a: return ("49"); -+ case 0x97: return ("50"); -+ case 0x4a: return ("51"); -+ case 0x9d: return ("52"); -+ case 0x2a: return ("53"); -+ case 0x67: return ("54"); -+ case 0x8a: return ("55"); -+ case 0x6e: return ("56"); -+ case 0x15: return ("57"); -+ case 0x9b: return ("58"); -+ case 0x45: return ("59"); -+ case 0x9e: return ("60"); -+ case 0x25: return ("61"); -+ case 0x6b: return ("62"); -+ case 0x85: return ("63"); -+ case 0x01: return ("C0"); -+ case 0x02: return ("C1"); -+ case 0x04: return ("C2"); -+ case 0x08: return ("C3"); -+ case 0x10: return ("C4"); -+ case 0x20: return ("C5"); -+ case 0x40: return ("C6"); -+ case 0x80: return ("C7"); -+ -+ case 0x07: case 0x0b: case 0x0d: case 0x0e: case 0x3d: case 0x3e: case 0x70: case 0x7c: // T -+ case 0xb0: case 0xbc: case 0xc7: case 0xcb: case 0xd0: case 0xd3: case 0xe0: case 0xe3: // T -+ return ("triple"); -+ -+ case 0x0f: case 0x55: case 0x5a: case 0xa5: case 0xaa: case 0xf0: case 0xff: // Q -+ return ("quadruple"); -+ -+ case 0x16: case 0x29: case 0x37: case 0x3b: case 0x49: case 0x57: case 0x5b: case 0x5d: case 0x5e: case 0x61: // M -+ case 0x68: case 0x73: case 0x75: case 0x7a: case 0x7f: case 0x86: case 0x92: case 0x94: case 0xa7: case 0xab: // M -+ case 0xad: case 0xae: case 0xb3: case 0xb5: case 0xba: case 0xbf: case 0xcd: case 0xce: case 0xd5: case 0xda: // M -+ case 0xdc: case 0xdf: case 0xe5: case 0xea: case 0xec: case 0xef: case 0xf7: case 0xfb: case 0xfd: case 0xfe: // M -+ return ("multiple"); -+ -+ default: // all other cases -+ return ("double"); -+ } -+} -+ -+char * -+elan4_sdramerr2str (ELAN4_DEV *dev, E4_uint64 status, E4_uint64 ConfigReg, char *str) -+{ -+ E4_uint64 StartupSyndrome = dev->dev_sdram_initial_ecc_val; -+ int RisingDQSsyndrome = ((ECC_RisingDQSSyndrome(status) == ECC_RisingDQSSyndrome(StartupSyndrome)) ? -+ 0 : ECC_RisingDQSSyndrome(status)); -+ int FallingDQSsyndrome = ((ECC_FallingDQSSyndrome(status) == ECC_FallingDQSSyndrome(StartupSyndrome)) ? -+ 0 : ECC_FallingDQSSyndrome(status)); -+ E4_uint64 Addr = ECC_Addr(status); -+ int Bank = (Addr >> 6) & 3; -+ int Cas = ((Addr >> 3) & 7) | ((Addr >> (8 - 3)) & 0xf8) | ((Addr >> (25 - 8)) & 0x100) | -+ ((Addr >> (27 - 9)) & 0x200) | ((Addr >> (29 - 10)) & 0xc00); -+ int Ras = ((Addr >> 13) & 0xfff) | ((Addr >> (26 - 12)) & 0x1000) | ((Addr >> (28 - 13)) & 0x2000) | -+ ((Addr >> (30 - 14)) & 0x4000); -+ -+ sprintf (str, "Addr=%07llx Bank=%x Ras=%x Cas=%x Falling DQS=%s Rising DQS=%s Syndrome=%x%s%s%s%s Type=%s SDRamDelay=%s,%0d", /* 41 + 16 + 8 + 15 + 24 + 13 + 22 + 10 + 10 == 151 */ -+ (long long)Addr, Bank, Ras, Cas, -+ get_correctableErr_bitpos(FallingDQSsyndrome), -+ get_correctableErr_bitpos(RisingDQSsyndrome), -+ (int)ECC_Syndrome(status), -+ ECC_UncorrectableErr(status) ? " Uncorrectable" : "", -+ ECC_MultUncorrectErrs(status) ? " Multiple-Uncorrectable" : "", -+ ECC_CorrectableErr(status) ? " Correctable" : "", -+ ECC_MultCorrectErrs(status) ? " Multiple-Correctable" : "", -+ (status & 0x0010000000000000ull) ? "W" : -+ (status & 0x0020000000000000ull) ? "R" : -+ (status & 0x0030000000000000ull) ? "C" : "-", -+ (ConfigReg & SDRAM_FIXED_DELAY_ENABLE) ? "F" : "A", -+ sdram_GreyToBinary(SDRAM_GET_DLL_DELAY(ConfigReg), SDRAM_FIXED_DLL_DELAY_BITS)); -+ -+ return str; -+} -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/elan4/trap.c linux-2.6.9/drivers/net/qsnet/elan4/trap.c ---- clean/drivers/net/qsnet/elan4/trap.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan4/trap.c 2005-07-20 07:35:36.000000000 -0400 -@@ -0,0 +1,781 @@ -+/* -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: trap.c,v 1.23.2.1 2005/07/20 11:35:36 mike Exp $" -+/* $Source: /cvs/master/quadrics/elan4mod/trap.c,v $*/ -+ -+#include -+ -+#include -+#include -+ -+#include -+#include -+ -+char * const PermTypes[16] = -+{ -+ "Disabled", "Unused", "LocalDataRead", "LocalDataWrite", -+ "LocalRead", "LocalExecute", "ReadOnly", "LocalWrite", -+ "LocalEventOnly", "LocalEventWrite", "RemoteEvent", "RemoteAll", -+ "RemoteReadOnly", "RemoteWriteOnly", "DataReadWrite", "NoFault", -+}; -+ -+char * const AccTypes[] = -+{ -+ "LocalDataRead ", "LocalDataWrite", "RemoteRead ", "RemoteWrite ", -+ "Execute ", "LocalEvent ", "Unused ", "RemoteEvent " -+}; -+char * const DataTypes[] = {"Byte ", "HWord", "Word ", "DWord"}; -+char * const PhysTypes[] = {"Special Read", "Special Write", "Physical Read", "Physical Write"}; -+ -+char * const EProcTrapNames[] = { -+ "EventProcNoFault", -+ "EventProcAddressAlignment", -+ "EventProcMemoryFault", -+ "EventProcCountWrapError", -+}; -+ -+char * const CProcTrapNames[] = { -+ "CommandProcNoFault", -+ "CommandProcInserterError", -+ "CommandProcPermissionTrap", -+ "CommandProcSendTransInvalid", -+ "CommandProcSendTransExpected", -+ "CommandProcDmaQueueOverflow", -+ "CommandProcInterruptQueueOverflow", -+ "CommandProcMemoryFault", -+ "CommandProcRouteFetchFault", -+ "CommandProcFailCountZero", -+ "CommandProcAddressAlignment", -+ "CommandProcWaitTrap", -+ "CommandProcMultipleGuards", -+ "CommandProcOpenOnGuardedChan", -+ "CommandProcThreadQueueOverflow", -+ "CommandProcBadData", -+}; -+ -+char *const CProcInsertError[] = { -+ "No Error", -+ "Overflowed", -+ "Invalid Write Size", -+ "Invalid Write Order", -+}; -+ -+char * const DProcTrapNames[] = { -+ "DmaProcNoFault", -+ "DmaProcRouteFetchFault", -+ "DmaProcFailCountError", -+ "DmaProcPacketAckError", -+ "DmaProcRunQueueReadFault", -+ "DmaProcQueueOverFlow", -+ "DmaProcPrefetcherFault", /* addy: Added new trap type for Prefetcher faults */ -+}; -+ -+char *const IProcTrapNames[] = { -+ "InputNoFault", -+ "InputAddressAlignment", -+ "InputMemoryFault", -+ "InputInvalidTransType", -+ "InputDmaQueueOverflow", -+ "InputEventEngineTrapped", -+ "InputCrcErrorAfterPAckOk", -+ "InputEopErrorOnWaitForEop", -+ "InputEopErrorTrap", -+ "InputDiscardAfterAckOk", -+}; -+ -+char *const TProcTrapNames[] = { -+ "HaltThread", -+ "TrapForTooManyInstructions", -+ "InstAccessException", -+ "Unimplemented", -+ "DataAccessException", -+ "DataAlignmentError", -+ "TrapForUsingBadData", -+}; -+ -+#define declare_spaces(space, str) char space[64]; do { int i; for (i = 0; i < strlen(str); i++) spaces[i] = ' '; space[i] = '\0'; } while (0) -+#define declare_prefix(space, spaces, str) char space[64]; do { strcpy (space, spaces); strcat (space, str); } while (0) -+ -+void -+elan4_display_farea (void *type, int mode, char *str, E4_FaultSave *farea) -+{ -+ E4_uint32 FSR = FaultSaveFSR(farea->FSRAndFaultContext); -+ -+ declare_spaces(spaces, str); -+ -+ elan4_debugf (type, mode, "%s Fault occurred at %016llx for context %4x\n", str, -+ farea->FaultAddress, FaultSaveContext(farea->FSRAndFaultContext)); -+ -+ if (FSR & AT_VirtualWriteAccBit) /* Virtual write access */ -+ elan4_debugf (type, mode, "%s FSR=%x: Virtual Write. DWSize=0x%x EndP=0x%x Access=%s DT=%s\n", -+ spaces, FSR, FSR & AT_VirtualWriteSizeMask, -+ (FSR >> AT_VirtualWriteEndPtrShift) & AT_VirtualWriteEndPtrMask, -+ AccTypes[(FSR >> AT_PermBitsShift) & AT_PermBitsMask], -+ DataTypes[(FSR >> AT_BlkDataTyShift) & AT_BlkDataTyMask]); -+ else if (FSR & AT_VirtualReadAccBit) /* Virtual read access */ -+ elan4_debugf (type, mode, "%s FSR=%x: Virtual Read. DWSize=0x%x Access=%s DT=%s\n", -+ spaces, FSR, FSR & AT_VirtualReadSizeMask, -+ AccTypes[(FSR >> AT_PermBitsShift) & AT_PermBitsMask], -+ DataTypes[(FSR >> AT_BlkDataTyShift) & AT_BlkDataTyMask]); -+ else -+ elan4_debugf (type, mode, "%s FSR=%x: %s. Size=0x%x\n", spaces, -+ FSR, PhysTypes[(FSR >> AT_SelBitsShift) & AT_SelBitsMask], -+ FSR & AT_OtherSizeMask); -+ elan4_debugf (type, mode, "%s FSR: %s %s%s %sWalking\n", spaces, -+ (FSR & AT_NonAlloc) ? "NonAlloc" : "Alloc", -+ (FSR & AT_DmaData) ? "Dma " : "", -+ (FSR & FSR_WalkForThread) ? "ThreadAcc" : "UnitsAcc", -+ (FSR & FSR_Walking) ? "" : "Not"); -+ PRINTF (type, mode, "%s FSR: %s%sHashTable=%s\n", spaces, -+ (FSR & FSR_NoTranslationsFound) ? "NoTranslationsFound " : "", -+ (FSR & FSR_WalkingProtectionFault) ? "WalkingProtectionFault " : "", -+ (FSR & FSR_HashTable1) ? "1" : "0"); -+ if (FSR & (FSR_RouteVProcErr | FSR_FaultForBadData)) -+ elan4_debugf (type, mode, "%s FSR: %s%s\n", spaces, -+ (FSR & FSR_RouteVProcErr) ? "RouteVProcErr " : "", -+ (FSR & FSR_FaultForBadData) ? "FaultForBadData " : ""); -+} -+ -+void -+elan4_display_eproc_trap (void *type, int mode, char *str, ELAN4_EPROC_TRAP *trap) -+{ -+ declare_spaces (spaces, str); -+ -+ elan4_debugf (type, mode, "%s Status=%016llx %s EventAddr=%016llx CountAndType=%016llx\n", str, -+ trap->tr_status, EProcTrapNames[EPROC_TrapType(trap->tr_status)], -+ trap->tr_eventaddr, trap->tr_event.ev_CountAndType); -+ elan4_debugf (type, mode, "%s Param=%016llx.%016llx\n", spaces, -+ trap->tr_event.ev_Params[0], trap->tr_event.ev_Params[1]); -+ -+ elan4_display_farea (type, mode, strcat (spaces, EPROC_Port0Fault(trap->tr_status) ? " EPROC0" : " EPROC1"), &trap->tr_faultarea); -+} -+ -+void -+elan4_display_cproc_trap (void *type, int mode, char *str, ELAN4_CPROC_TRAP *trap) -+{ -+ declare_spaces(spaces, str); -+ -+ elan4_debugf (type, mode, "%s Status=%llx %s Command=%llx\n", str, trap->tr_status, -+ CProcTrapNames[CPROC_TrapType(trap->tr_status)], trap->tr_command); -+ elan4_debugf (type, mode, "%s Desc=%016llx %016llx %016llx %016llx\n", str, -+ trap->tr_qdesc.CQ_QueuePtrs, trap->tr_qdesc.CQ_HoldingValue, -+ trap->tr_qdesc.CQ_AckBuffers, trap->tr_qdesc.CQ_Control); -+ -+ switch (CPROC_TrapType (trap->tr_status)) -+ { -+ case CommandProcInserterError: -+ elan4_debugf (type, mode, "%s %s\n", str, CProcInsertError[CQ_RevB_ErrorType(trap->tr_qdesc.CQ_QueuePtrs)]); -+ break; -+ -+ case CommandProcWaitTrap: -+ elan4_display_eproc_trap (type, mode, spaces, &trap->tr_eventtrap); -+ break; -+ -+ default: -+ elan4_display_farea (type, mode, spaces, &trap->tr_faultarea); -+ break; -+ } -+} -+ -+void -+elan4_display_dproc_trap (void *type, int mode, char *str, ELAN4_DPROC_TRAP *trap) -+{ -+ declare_spaces (spaces, str); -+ -+ elan4_debugf (type, mode, "%s status %llx - %s\n", str, -+ trap->tr_status, DProcTrapNames[DPROC_TrapType(trap->tr_status)]); -+ -+ elan4_debugf (type, mode, "%s DESC %016llx %016llx %016llx %016llx\n", spaces, trap->tr_desc.dma_typeSize, -+ trap->tr_desc.dma_cookie, trap->tr_desc.dma_vproc, trap->tr_desc.dma_srcAddr); -+ elan4_debugf (type, mode, "%s %016llx %016llx %016llx\n", spaces, trap->tr_desc.dma_dstAddr, -+ trap->tr_desc.dma_srcEvent, trap->tr_desc.dma_dstEvent); -+ -+ if (DPROC_PrefetcherFault (trap->tr_status)) -+ elan4_display_farea (type, mode, spaces, &trap->tr_prefetchFault); -+} -+ -+void -+elan4_display_tproc_trap (void *type, int mode, char *str, ELAN4_TPROC_TRAP *trap) -+{ -+ register int i; -+ declare_spaces (spaces, str); -+ -+ elan4_debugf (type, mode, "%s PC=%016llx nPC=%016llx State=%016llx Status=%016llx -%s%s%s%s\n", str, -+ trap->tr_pc, trap->tr_npc, trap->tr_state, trap->tr_status, -+ (trap->tr_state & TS_TrapForTooManyInstructions) ? " TrapForTooManyInstructions" : "", -+ (trap->tr_state & TS_Unimplemented) ? " Unimplemented" : "", -+ (trap->tr_state & TS_DataAlignmentError) ? " DataAlignmentError" : "", -+ (trap->tr_state & TS_InstAccessException) ? " InstAccessException" : "", -+ (trap->tr_state & TS_DataAccessException) ? " DataAlignmentError" : ""); -+ -+ for (i = 0; i < 64; i += 4) -+ elan4_debugf (type, mode, "%s r%d - %016llx %016llx %016llx %016llx\n", spaces, i, -+ trap->tr_regs[i], trap->tr_regs[i+1], trap->tr_regs[i+2], trap->tr_regs[i+3]); -+ -+ if (trap->tr_state & TS_InstAccessException) -+ { -+ declare_prefix (prefix, spaces, "Inst"); -+ -+ elan4_display_farea (type, mode, prefix, &trap->tr_instFault); -+ } -+ -+ if (trap->tr_state & TS_DataAccessException) -+ { -+ declare_prefix (prefix, spaces, "Data"); -+ elan4_display_farea (type, mode, prefix, &trap->tr_dataFault); -+ } -+} -+ -+void -+elan4_display_iproc_trap (void *type, int mode, char *str, ELAN4_IPROC_TRAP *trap) -+{ -+ register int i; -+ declare_spaces (spaces, str); -+ -+ for (i = 0; i < trap->tr_numTransactions; i++) -+ { -+ E4_IprocTrapHeader *hdrp = &trap->tr_transactions[i]; -+ E4_uint64 status = hdrp->IProcStatusCntxAndTrType; -+ E4_Addr addr = hdrp->TrAddr; -+ char *typeString; -+ char buffer[256]; -+ char *ptr = buffer; -+ -+ if (IPROC_EOPTrap(status)) -+ { -+ switch (IPROC_EOPType(status)) -+ { -+ case EOP_GOOD: typeString = "EopGood"; break; -+ case EOP_BADACK: typeString = "EopBadAck"; break; -+ case EOP_ERROR_RESET: typeString = "EopReset"; break; -+ default: typeString = "EopBad"; break; -+ } -+ -+ ptr += sprintf (ptr, "%15s Cntx=%-6d", typeString, IPROC_NetworkContext(status)); -+ } -+ else -+ { -+ if (IPROC_BadLength(status)) -+ typeString = "BadLength"; -+ else if (IPROC_TransCRCStatus(status) == CRC_STATUS_DISCARD) -+ typeString = "DiscardCrc"; -+ else if (IPROC_TransCRCStatus(status) == CRC_STATUS_ERROR) -+ typeString = "ErrorCrc Remote Network error"; -+ else if (IPROC_TransCRCStatus(status) == CRC_STATUS_BAD) -+ typeString = "BadCrc Cable error into this node."; -+ else -+ { -+ if ((IPROC_TransactionType(status) & TR_BLOCK_OPCODE_MASK) == TR_WRITEBLOCK) -+ typeString = "WriteBlock"; -+ else -+ { -+ switch (IPROC_TransactionType(status) & TR_OPCODE_MASK) -+ { -+ case TR_SETEVENT_IDENTIFY & TR_OPCODE_MASK: typeString = "SetEvent"; break; -+ case TR_REMOTEDMA & TR_OPCODE_MASK: typeString = "RemoteDma"; break; -+ case TR_SENDDISCARD & TR_OPCODE_MASK: typeString = "SendDiscard"; break; -+ case TR_GTE & TR_OPCODE_MASK: typeString = "GTE"; break; -+ case TR_LT & TR_OPCODE_MASK: typeString = "LT"; break; -+ case TR_EQ & TR_OPCODE_MASK: typeString = "EQ"; break; -+ case TR_NEQ & TR_OPCODE_MASK: typeString = "NEQ"; break; -+ case TR_IDENTIFY & TR_OPCODE_MASK: typeString = "Idenfity"; break; -+ case TR_ADDWORD & TR_OPCODE_MASK: typeString = "AddWord"; break; -+ case TR_INPUT_Q_COMMIT & TR_OPCODE_MASK: typeString = "InputQCommit"; break; -+ case TR_TESTANDWRITE & TR_OPCODE_MASK: typeString = "TestAndWrite"; break; -+ case TR_INPUT_Q_GETINDEX & TR_OPCODE_MASK: typeString = "InputQGetIndex"; break; -+ case TR_TRACEROUTE_TRANS & TR_OPCODE_MASK: typeString = "TraceRoute"; break; -+ default: typeString = "Unknown"; break; -+ } -+ } -+ } -+ -+ ptr += sprintf (ptr, "%15s Cntx=%-6d Addr=%016llx", typeString, IPROC_NetworkContext(status), (unsigned long long) addr); -+ } -+ -+ -+ if (IPROC_TrapValue(status) != InputNoFault) -+ { -+ ptr += sprintf (ptr, " TrType=%2d ChanTrapped=%x GoodAck=%x BadAck=%x InputterChan=%d", IPROC_TrapValue(status), -+ IPROC_ChannelTrapped(status), IPROC_GoodAckSent(status), IPROC_BadAckSent(status), -+ IPROC_InputterChan(status)); -+ if (IPROC_EOPTrap(status)) -+ ptr += sprintf (ptr, " EOPType=%d", IPROC_EOPType(status)); -+ else -+ ptr += sprintf (ptr, " %s%s%s%s", -+ IPROC_FirstTrans(status) ? " FirstTrans" : "", -+ IPROC_LastTrans(status) ? " LastTrans" : "", -+ (IPROC_TransactionType(status) & TR_WAIT_FOR_EOP) ? " WaitForEop" : "", -+ (IPROC_GoodAckSent(status) & (1 << IPROC_Channel(status))) ? " AckSent" : ""); -+ } -+ -+ elan4_debugf (type, mode, "%s %s\n", str, buffer); -+ -+ str = spaces; -+ } -+ -+ elan4_display_farea (type, mode, spaces, &trap->tr_faultarea); -+} -+ -+#define elan4_sdram_copy_faultarea(dev, unit, farea) \ -+ elan4_sdram_copyq_from_sdram ((dev), (dev)->dev_faultarea + (unit) * sizeof (E4_FaultSave), (E4_uint64 *) farea, sizeof (E4_FaultSave)); -+ -+void -+elan4_extract_eproc_trap (ELAN4_DEV *dev, E4_uint64 status, ELAN4_EPROC_TRAP *trap, int iswaitevent) -+{ -+ /* only one of the memory ports can fault at a time */ -+ ASSERT (EPROC_TrapType(status) != EventProcMemoryFault || (EPROC_Port0Fault(status) ^ EPROC_Port1Fault(status)) == 1); -+ -+ trap->tr_status = status; -+ -+ if (EPROC_Port0Fault(status)) -+ elan4_sdram_copy_faultarea (dev, CUN_EventProc0, &trap->tr_faultarea); -+ if (EPROC_Port1Fault(status)) -+ elan4_sdram_copy_faultarea (dev, CUN_EventProc1, &trap->tr_faultarea); -+ -+ if (iswaitevent) -+ { -+ /* -+ * for waitevents the Event address is always taken from the command processor -+ * -+ * if we trapped during the copy then we take the "Event" from the event processor -+ * since we need to complete the copy. Otherwise we'll be reissuing the original -+ * command again -+ */ -+ E4_uint32 fsr = FaultSaveFSR(trap->tr_faultarea.FSRAndFaultContext); -+ -+ trap->tr_eventaddr = read_reg64 (dev, CommandHold) ^ WAIT_EVENT_CMD; -+ -+ if (EPROC_TrapType(trap->tr_status) == EventProcMemoryFault && -+ (AT_Perm(fsr) == AT_PermLocalDataRead || AT_Perm(fsr) == AT_PermLocalDataWrite)) -+ { -+ trap->tr_event.ev_CountAndType = read_reg64 (dev, EventCountAndType); -+ trap->tr_event.ev_Params[0] = read_reg64 (dev, EventParameters[0]); -+ trap->tr_event.ev_Params[1] = read_reg64 (dev, EventParameters[1]); -+ } -+ else -+ { -+ trap->tr_event.ev_Params[0] = read_reg64 (dev, CommandCopy[5]); -+ trap->tr_event.ev_CountAndType = read_reg64 (dev, CommandCopy[4]); -+ trap->tr_event.ev_Params[1] = read_reg64 (dev, CommandCopy[6]); -+ -+ } -+ } -+ else -+ { -+ trap->tr_eventaddr = read_reg64 (dev, EventAddress); -+ trap->tr_event.ev_CountAndType = read_reg64 (dev, EventCountAndType); -+ trap->tr_event.ev_Params[0] = read_reg64 (dev, EventParameters[0]); -+ trap->tr_event.ev_Params[1] = read_reg64 (dev, EventParameters[1]); -+ } -+ -+ BumpDevStat (dev, s_eproc_trap_types[EPROC_TrapType(status)]); -+} -+ -+int -+cproc_open_extract_vp (ELAN4_DEV *dev, ELAN4_CQ *cq, int chan) -+{ -+ /* cq = ucq->ucq_cq */ -+ if ((cq->cq_perm & CQ_STENEnableBit) != 0) -+ { -+ sdramaddr_t cqdesc = dev->dev_cqaddr + (elan4_cq2num(cq) * sizeof (E4_CommandQueueDesc)); -+ E4_uint64 queuePtrs = elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_QueuePtrs)); -+ sdramaddr_t insertPtr = (queuePtrs & CQ_PtrMask); -+ sdramaddr_t commandPtr = CQ_CompletedPtr (queuePtrs); -+ unsigned int cqSize = CQ_Size ((queuePtrs >> CQ_SizeShift) & CQ_SizeMask); -+ -+ if (dev->dev_devinfo.dev_revision_id != PCI_REVISION_ID_ELAN4_REVA && (queuePtrs & CQ_RevB_ReorderingQueue)) -+ { -+ E4_uint32 oooMask = elan4_sdram_readl (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_HoldingValue)); -+ -+ for (; (oooMask & 1) != 0; oooMask >>= 1) -+ insertPtr = (insertPtr & ~(cqSize-1)) | ((insertPtr + sizeof (E4_uint64)) & (cqSize-1)); -+ } -+ -+ while (commandPtr != insertPtr) -+ { -+ E4_uint64 command = elan4_sdram_readq (dev, commandPtr); -+ unsigned int cmdSize; -+ -+ switch (__categorise_command (command, &cmdSize)) -+ { -+ case 0: -+ (void) __whole_command (&commandPtr, insertPtr, cqSize, cmdSize); -+ break; -+ -+ case 1: /* open */ -+ if (((chan << 4) == (command & (1<<4)))) -+ /* Matches supplied channel */ -+ return (command >> 32); -+ else -+ (void) __whole_command (&commandPtr, insertPtr, cqSize, cmdSize); -+ break; -+ -+ case 2: -+ (void) __whole_command (&commandPtr, insertPtr, cqSize, cmdSize); -+ case 3: -+ printk ("cproc_open_extract_vp: invalid command %llx\n", (long long)command); -+ return -1; -+ } -+ } /* while */ -+ } -+ -+ return -1; -+} -+ -+void -+elan4_extract_cproc_trap (ELAN4_DEV *dev, E4_uint64 status, ELAN4_CPROC_TRAP *trap, unsigned cqnum) -+{ -+ /* extract the state from the device */ -+ elan4_sdram_copy_faultarea (dev, CUN_CommandProc, &trap->tr_faultarea); -+ -+ trap->tr_status = status; -+ trap->tr_command = read_reg64 (dev, CommandHold); -+ -+ elan4_sdram_copyq_from_sdram (dev, dev->dev_cqaddr + (cqnum * sizeof (E4_CommandQueueDesc)), &trap->tr_qdesc, sizeof (E4_CommandQueueDesc)); -+ -+ if (CPROC_TrapType (status) == CommandProcWaitTrap) -+ elan4_extract_eproc_trap (dev, read_reg64 (dev, EProcStatus), &trap->tr_eventtrap, 1); -+ -+ BumpDevStat (dev, s_cproc_trap_types[CPROC_TrapType(status)]); -+ -+ if (PackValue(trap->tr_qdesc.CQ_AckBuffers, 0) == PackTimeout || PackValue(trap->tr_qdesc.CQ_AckBuffers, 1) == PackTimeout) -+ BumpDevStat (dev, s_cproc_timeout); -+} -+ -+void -+elan4_extract_dproc_trap (ELAN4_DEV *dev, E4_uint64 status, ELAN4_DPROC_TRAP *trap, unsigned unit) -+{ -+ trap->tr_status = status; -+ -+ if (unit == 0) -+ { -+ trap->tr_desc.dma_typeSize = read_reg64 (dev, Dma0Desc.dma_typeSize); -+ trap->tr_desc.dma_cookie = read_reg64 (dev, Dma0Desc.dma_cookie); -+ trap->tr_desc.dma_vproc = read_reg64 (dev, Dma0Desc.dma_vproc); -+ trap->tr_desc.dma_srcAddr = read_reg64 (dev, Dma0Desc.dma_srcAddr); -+ trap->tr_desc.dma_dstAddr = read_reg64 (dev, Dma0Desc.dma_dstAddr); -+ trap->tr_desc.dma_srcEvent = read_reg64 (dev, Dma0Desc.dma_srcEvent); -+ trap->tr_desc.dma_dstEvent = read_reg64 (dev, Dma0Desc.dma_dstEvent); -+ -+ elan4_sdram_copy_faultarea (dev, CUN_DProcPA0, &trap->tr_packAssemFault); -+ } -+ else -+ { -+ trap->tr_desc.dma_typeSize = read_reg64 (dev, Dma1Desc.dma_typeSize); -+ trap->tr_desc.dma_cookie = read_reg64 (dev, Dma1Desc.dma_cookie); -+ trap->tr_desc.dma_vproc = read_reg64 (dev, Dma1Desc.dma_vproc); -+ trap->tr_desc.dma_srcAddr = read_reg64 (dev, Dma1Desc.dma_srcAddr); -+ trap->tr_desc.dma_dstAddr = read_reg64 (dev, Dma1Desc.dma_dstAddr); -+ trap->tr_desc.dma_srcEvent = read_reg64 (dev, Dma1Desc.dma_srcEvent); -+ trap->tr_desc.dma_dstEvent = read_reg64 (dev, Dma1Desc.dma_dstEvent); -+ -+ elan4_sdram_copy_faultarea (dev, CUN_DProcPA1, &trap->tr_packAssemFault); -+ } -+ -+ if (DPROC_PrefetcherFault (trap->tr_status)) -+ { -+ elan4_sdram_copy_faultarea (dev, (CUN_DProcData0 | DPROC_FaultUnitNo(trap->tr_status)), &trap->tr_prefetchFault); -+ /* addy: Added new trap type for Prefetcher faults */ -+ BumpDevStat (dev, s_dproc_trap_types[6]); -+ } -+ else if (DPROC_PacketTimeout (trap->tr_status)) -+ BumpDevStat (dev, s_dproc_timeout); -+ else -+ BumpDevStat (dev, s_dproc_trap_types[DPROC_TrapType(status)]); -+} -+ -+void -+elan4_extract_tproc_trap (ELAN4_DEV *dev, E4_uint64 status, ELAN4_TPROC_TRAP *trap) -+{ -+ int i; -+ -+ trap->tr_status = status; -+ trap->tr_state = read_reg64 (dev, Thread_Trap_State); -+ trap->tr_pc = read_reg64 (dev, PC_W); -+ trap->tr_npc = read_reg64 (dev, nPC_W); -+ trap->tr_dirty = read_reg64 (dev, DirtyBits); -+ trap->tr_bad = read_reg64 (dev, BadBits); -+ -+#ifdef CONFIG_MPSAS -+ if (sas_copyfrom_dev (dev->dev_osdep.pdev, ELAN4_BAR_REGISTERS, -+ ((dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA) ? ELAN4_REVA_REG_OFFSET : ELAN4_REVB_REG_OFFSET) + -+ offsetof (E4_Registers, Regs.TProcRegs), (unsigned long) &trap->tr_regs, 64*sizeof (E4_uint64)) < 0) -+ { -+ for (i = 0; i < 64; i++) -+ if (trap->tr_dirty & ((E4_uint64) 1 << i)) -+ trap->tr_regs[i] = read_reg64 (dev, TProcRegs[i]); -+ } -+ -+ for (i = 0; i < 64; i++) -+ if (! (trap->tr_dirty & ((E4_uint64) 1 << i))) -+ trap->tr_regs[i] = 0xdeadbabedeadbabeULL; -+#else -+ for (i = 0; i < 64; i++) -+ { -+ if (trap->tr_dirty & ((E4_uint64) 1 << i)) -+ trap->tr_regs[i] = read_reg64 (dev, TProcRegs[i]); -+ else -+ trap->tr_regs[i] = 0xdeadbabedeadbabeULL; -+ } -+#endif -+ -+ if (trap->tr_state & TS_DataAccessException) -+ elan4_sdram_copy_faultarea (dev, CUN_TProcData0 | TS_DataPortNo (trap->tr_state), &trap->tr_dataFault); -+ -+ if (trap->tr_state & TS_InstAccessException) -+ elan4_sdram_copy_faultarea (dev, CUN_TProcInst, &trap->tr_instFault); -+ -+ for (i = 0; i < 7; i++) -+ if (trap->tr_state & (1 << i)) -+ BumpDevStat (dev, s_tproc_trap_types[i]); -+} -+ -+void -+elan4_extract_iproc_trap (ELAN4_DEV *dev, E4_uint64 status, ELAN4_IPROC_TRAP *trap, unsigned unit) -+{ -+ sdramaddr_t hdroff = dev->dev_inputtraparea + offsetof (E4_IprocTrapState, TrHeader[0][unit]); -+ sdramaddr_t dataoff = dev->dev_inputtraparea + offsetof (E4_IprocTrapState, TrData[0][unit]); -+ register int i, j; -+ int CurrUnitNo = (unit >= 2) ? CUN_IProcHighPri : CUN_IProcLowPri; -+ sdramaddr_t CurrFaultArea = dev->dev_faultarea + (CurrUnitNo * sizeof (E4_FaultSave)); -+ -+ /* Finally copy the fault area */ -+ elan4_sdram_copy_faultarea (dev, CurrUnitNo, &trap->tr_faultarea); -+ -+ /* -+ * Clear out the fault save area after reading to allow a fault on the write of the back pointer of -+ * an InputQCommit to be obsurved if a simultaneous event proc trap occurs. -+ */ -+ elan4_sdram_writeq (dev, CurrFaultArea + offsetof(E4_FaultSave, FSRAndFaultContext), 0x0ULL); -+ elan4_sdram_writeq (dev, CurrFaultArea + offsetof(E4_FaultSave, FaultAddress), 0x0ULL); -+ -+ /* copy the transaction headers */ -+ trap->tr_transactions[0].IProcStatusCntxAndTrType = status; -+ trap->tr_transactions[0].TrAddr = elan4_sdram_readq (dev, hdroff + offsetof (E4_IprocTrapHeader, TrAddr)); -+ -+ for (i = 0; !IPROC_EOPTrap(trap->tr_transactions[i].IProcStatusCntxAndTrType);) -+ { -+ if (IPROC_BadLength (trap->tr_transactions[i].IProcStatusCntxAndTrType)) -+ BumpDevStat (dev, s_bad_length); -+ else if (IPROC_TransCRCStatus (trap->tr_transactions[i].IProcStatusCntxAndTrType) == CRC_STATUS_BAD) -+ BumpDevStat (dev, s_crc_bad); -+ else if (IPROC_TransCRCStatus (trap->tr_transactions[i].IProcStatusCntxAndTrType) == CRC_STATUS_ERROR) -+ BumpDevStat (dev, s_crc_error); -+ -+ BumpDevStat (dev, s_iproc_trap_types[IPROC_TrapValue (trap->tr_transactions[i].IProcStatusCntxAndTrType)]); -+ -+ hdroff += NO_OF_INPUT_CHANNELS*sizeof (E4_IprocTrapHeader); -+ -+ if (++i == MAX_TRAPPED_TRANS) -+ break; -+ -+ elan4_sdram_copyq_from_sdram (dev, hdroff, &trap->tr_transactions[i], sizeof (E4_IprocTrapHeader)); -+ } -+ -+ if (IPROC_EOPType (trap->tr_transactions[i].IProcStatusCntxAndTrType) == EOP_ERROR_RESET) -+ BumpDevStat (dev, s_eop_reset); -+ -+ /* Remember the number of transactions we've copied */ -+ trap->tr_numTransactions = i + 1; -+ -+ /* Copy all the data blocks in one go */ -+ for (i = 0; i < MIN (trap->tr_numTransactions, MAX_TRAPPED_TRANS); i++, dataoff += NO_OF_INPUT_CHANNELS*sizeof (E4_IprocTrapData)) -+ { -+ if (IPROC_BadLength(status) || IPROC_TransCRCStatus (status) != CRC_STATUS_GOOD) -+ elan4_sdram_copyq_from_sdram (dev, dataoff, trap->tr_dataBuffers[i].Data, TRANS_DATA_DWORDS*sizeof(E4_uint64)); -+ else -+ { -+ int trtype = IPROC_TransactionType(trap->tr_transactions[i].IProcStatusCntxAndTrType); -+ int ndwords = (trtype & TR_SIZE_MASK) >> TR_SIZE_SHIFT; -+ -+ elan4_sdram_copyq_from_sdram (dev, dataoff, trap->tr_dataBuffers[i].Data, ndwords*sizeof(E4_uint64)); -+ -+ for (j = ndwords; j < TRANS_DATA_DWORDS; j++) -+ trap->tr_dataBuffers[i].Data[j] = 0xbeec0f212345678ull; -+ } -+ } -+ -+} -+ -+void -+elan4_inspect_iproc_trap (ELAN4_IPROC_TRAP *trap) -+{ -+ int i; -+ -+ trap->tr_flags = 0; -+ trap->tr_trappedTrans = TR_TRANS_INVALID; -+ trap->tr_waitForEopTrans = TR_TRANS_INVALID; -+ trap->tr_identifyTrans = TR_TRANS_INVALID; -+ -+ if (trap->tr_numTransactions > MAX_TRAPPED_TRANS) -+ trap->tr_flags = TR_FLAG_TOOMANY_TRANS; -+ -+ /* -+ * Now scan all the transactions received -+ */ -+ for (i = 0; i < MIN(trap->tr_numTransactions, MAX_TRAPPED_TRANS) ; i++) -+ { -+ E4_IprocTrapHeader *hdrp = &trap->tr_transactions[i]; -+ E4_uint64 status = hdrp->IProcStatusCntxAndTrType; -+ -+ if (trap->tr_identifyTrans == TR_TRANS_INVALID) -+ { -+ switch (IPROC_TransactionType (status) & (TR_OPCODE_MASK | TR_SIZE_MASK)) -+ { -+ case TR_IDENTIFY & (TR_OPCODE_MASK | TR_SIZE_MASK): -+ case TR_REMOTEDMA & (TR_OPCODE_MASK | TR_SIZE_MASK): -+ case TR_SETEVENT_IDENTIFY & (TR_OPCODE_MASK | TR_SIZE_MASK): -+ case TR_INPUT_Q_COMMIT & (TR_OPCODE_MASK | TR_SIZE_MASK): -+ case TR_ADDWORD & (TR_OPCODE_MASK | TR_SIZE_MASK): -+ case TR_TESTANDWRITE & (TR_OPCODE_MASK | TR_SIZE_MASK): -+ trap->tr_identifyTrans = i; -+ break; -+ } -+ } -+ -+ if (IPROC_TrapValue(status) == InputNoFault) /* We're looking at transactions stored before the trap */ -+ continue; /* these should only be identifies */ -+ -+ if (trap->tr_trappedTrans == TR_TRANS_INVALID) /* Remember the transaction which caused the */ -+ trap->tr_trappedTrans = i; /* trap */ -+ -+ if (IPROC_GoodAckSent (status) & (1 << IPROC_InputterChan (status))) -+ trap->tr_flags |= TR_FLAG_ACK_SENT; -+ -+ if (IPROC_EOPTrap(status)) /* Check for EOP */ -+ { -+ ASSERT (i == trap->tr_numTransactions - 1); -+ -+ switch (IPROC_EOPType(status)) -+ { -+ case EOP_GOOD: -+ /* if we get an EOP_GOOD then the outputer should have received a PAckOk. */ -+ /* unless it was a flood, in which case someone must have sent an ack */ -+ /* but not necessarily us */ -+ break; -+ -+ case EOP_BADACK: -+ /* if we get an EOP_BADACK then the outputer did not receive a PAckOk even if -+ * we sent a PAckOk. WFlag this to ignore the AckSent. */ -+ trap->tr_flags |= TR_FLAG_EOP_BAD; -+ break; -+ -+ case EOP_ERROR_RESET: -+ /* if we get an EOP_ERROR_RESET then the outputer may or may not have got a PAckOk. */ -+ trap->tr_flags |= TR_FLAG_EOP_ERROR; -+ break; -+ -+ default: -+ printk ("elan4_inspect_iproc_trap: unknown eop type %d", IPROC_EOPType(status)); -+ BUG(); -+ /* NOTREACHED */ -+ } -+ continue; -+ } -+ else -+ { -+ if (IPROC_BadLength(status) || (IPROC_TransCRCStatus (status) == CRC_STATUS_ERROR || -+ IPROC_TransCRCStatus (status) == CRC_STATUS_BAD)) -+ { -+ { -+ register int j; -+ if (IPROC_BadLength(status)) -+ PRINTF2 (DBG_DEVICE, DBG_INTR, "LinkError: Trapped on bad length data. status=%016llx Address=%016llx\n", -+ status, hdrp->TrAddr); -+ else -+ PRINTF2 (DBG_DEVICE, DBG_INTR, "LinkError: Trapped with bad CRC. status=%016llx Address=%016llx\n", -+ status, hdrp->TrAddr); -+ for (j = 0; j < TRANS_DATA_DWORDS; j++) -+ PRINTF2 (DBG_DEVICE, DBG_INTR, "LinkError: DataBuffers[%d] : %016llx\n", j, trap->tr_dataBuffers[i].Data[j]); -+ } -+ -+ trap->tr_flags |= TR_FLAG_BAD_TRANS; -+ continue; -+ } -+ -+ if (IPROC_TransCRCStatus (status) == CRC_STATUS_DISCARD) -+ continue; -+ -+ if ((((IPROC_TransactionType(status) & TR_BLOCK_OPCODE_MASK) == TR_WRITEBLOCK) || -+ (IPROC_TransactionType(status) == TR_TRACEROUTE_TRANS)) && -+ (trap->tr_flags & TR_FLAG_ACK_SENT) && trap->tr_identifyTrans == TR_TRANS_INVALID) -+ { -+ /* -+ * Writeblock after the ack is sent without an identify transaction - this is -+ * considered to be a DMA packet and requires the next packet to be nacked - since -+ * the DMA processor will send this in a deterministic time and there's an upper -+ * limit on the network latency (the output timeout) we just need to hold the context -+ * filter up for a while. -+ */ -+ trap->tr_flags |= TR_FLAG_DMA_PACKET; -+ } -+ -+ if (IPROC_LastTrans(status) && (IPROC_TransactionType(status) & TR_WAIT_FOR_EOP)) -+ { -+ /* -+ * WaitForEop transactions - if we have to do network error fixup -+ * then we may need to execute/ignore this transaction dependant -+ * on whether the source will be resending it. -+ */ -+ trap->tr_waitForEopTrans = i; -+ } -+ -+ /* -+ * This is a special case caused by a minor input processor bug. -+ * If simultaneous InputMemoryFault and InputEventEngineTrapped occur then the chip will probably return -+ * InputEventEngineTrapped even though the write of the back pointer has not occured and must be done by -+ * the trap handler. -+ * In this case the fault address will equal q->q_bptr. If there has been only EventEngineTrap then the -+ * the fault address should be zero as the trap handler now always zeros this after every input trap. -+ */ -+ if ((IPROC_TransactionType (status) & TR_OPCODE_MASK) == (TR_INPUT_Q_COMMIT & TR_OPCODE_MASK) && -+ trap->tr_faultarea.FaultAddress == hdrp->TrAddr + offsetof(E4_InputQueue, q_bptr) && -+ IPROC_TrapValue(status) == InputEventEngineTrapped) -+ { -+ hdrp->IProcStatusCntxAndTrType = (status & 0xFFFFFFF0FFFFFFFFull) | ((E4_uint64) InputMemoryFault << 32); -+ } -+ } -+ -+ PRINTF (DBG_DEVICE, DBG_INTR, "inspect[%d] status=%llx TrapValue=%d -> flags %x\n", i, status, IPROC_TrapValue(status), trap->tr_flags); -+ } -+} -+ -+E4_uint64 -+elan4_trapped_open_command (ELAN4_DEV *dev, ELAN4_CQ *cq) -+{ -+ sdramaddr_t cqdesc = dev->dev_cqaddr + elan4_cq2num(cq) * sizeof (E4_CommandQueueDesc); -+ E4_uint64 cqcontrol = elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_Control)); -+ E4_uint32 extractOff = CQ_ExtractPtr (cqcontrol) & (CQ_Size(cq->cq_size)-1); -+ -+ if (extractOff == 0) -+ extractOff = CQ_Size(cq->cq_size) - sizeof (E4_uint64); -+ else -+ extractOff -= sizeof (E4_uint64); -+ -+ return (elan4_sdram_readq (dev, cq->cq_space + extractOff)); -+} -+ -+EXPORT_SYMBOL(elan4_extract_eproc_trap); -+EXPORT_SYMBOL(elan4_display_eproc_trap); -+EXPORT_SYMBOL(elan4_extract_cproc_trap); -+EXPORT_SYMBOL(elan4_display_cproc_trap); -+EXPORT_SYMBOL(elan4_extract_dproc_trap); -+EXPORT_SYMBOL(elan4_display_dproc_trap); -+EXPORT_SYMBOL(elan4_extract_tproc_trap); -+EXPORT_SYMBOL(elan4_display_tproc_trap); -+EXPORT_SYMBOL(elan4_extract_iproc_trap); -+EXPORT_SYMBOL(elan4_inspect_iproc_trap); -+EXPORT_SYMBOL(elan4_display_iproc_trap); -+ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/elan4/user.c linux-2.6.9/drivers/net/qsnet/elan4/user.c ---- clean/drivers/net/qsnet/elan4/user.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan4/user.c 2005-07-19 09:45:36.000000000 -0400 -@@ -0,0 +1,3443 @@ -+/* -+ * Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: user.c,v 1.89.2.2 2005/07/19 13:45:36 daniel Exp $" -+/* $Source: /cvs/master/quadrics/elan4mod/user.c,v $*/ -+ -+#include -+#include -+ -+#include -+#include -+#include -+#include -+ -+#include -+#include -+ -+#include -+ -+/* allow this code to compile against an Eagle elanmod */ -+#ifdef __ELANMOD_DEVICE_H -+#define elan_attach_cap(cap,rnum,args,func) elanmod_attach_cap(cap,args,func) -+#define elan_detach_cap(cap,rnum) elanmod_detach_cap(cap) -+#endif -+ -+#define NETERR_MSGS 16 -+ -+int user_p2p_route_options = FIRST_TIMEOUT(3); -+int user_bcast_route_options = FIRST_TIMEOUT(3); -+int user_dproc_retry_count = 15; -+int user_cproc_retry_count = 2; -+int user_ioproc_enabled = 1; -+int user_pagefault_enabled = 1; -+ -+int num_fault_save = 30; -+int min_fault_pages = 1; -+int max_fault_pages = 128; -+ -+static int -+user_validate_cap (USER_CTXT *uctx, ELAN_CAPABILITY *cap, unsigned use) -+{ -+ /* Don't allow a user process to attach to system context */ -+ if (ELAN4_SYSTEM_CONTEXT (cap->cap_lowcontext) || ELAN4_SYSTEM_CONTEXT (cap->cap_highcontext)) -+ { -+ PRINTF3 (DBG_DEVICE, DBG_VP,"user_validate_cap: lctx %x hctx %x high %x\n", cap->cap_lowcontext, cap->cap_highcontext, ELAN4_KCOMM_BASE_CONTEXT_NUM); -+ PRINTF0 (DBG_DEVICE, DBG_VP,"user_validate_cap: user process cant attach to system cap\n"); -+ return (EINVAL); -+ } -+ -+ return elanmod_classify_cap(&uctx->uctx_position, cap, use); -+} -+ -+static __inline__ void -+__user_signal_trap (USER_CTXT *uctx) -+{ -+ switch (uctx->uctx_trap_state) -+ { -+ case UCTX_TRAP_IDLE: -+ PRINTF (uctx, DBG_TRAP, "user_signal_trap: deliver signal %d to pid %d\n", uctx->uctx_trap_signo, uctx->uctx_trap_pid); -+ -+ if (uctx->uctx_trap_signo) -+ kill_proc (uctx->uctx_trap_pid, uctx->uctx_trap_signo, 1); -+ break; -+ -+ case UCTX_TRAP_SLEEPING: -+ PRINTF (uctx, DBG_TRAP, "user_signal_trap: wakeup sleeping trap handler\n"); -+ -+ kcondvar_wakeupone (&uctx->uctx_wait, &uctx->uctx_spinlock); -+ break; -+ } -+ uctx->uctx_trap_state = UCTX_TRAP_SIGNALLED; -+} -+ -+static void -+user_signal_timer (unsigned long arg) -+{ -+ USER_CTXT *uctx = (USER_CTXT *) arg; -+ unsigned long flags; -+ -+ PRINTF (uctx, DBG_TRAP, "user_signal_timer: state=%d pid=%d signal=%d (now %d start %d)\n", -+ uctx->uctx_trap_state, uctx->uctx_trap_pid, uctx->uctx_trap_signo, jiffies, -+ uctx->uctx_int_start); -+ -+ spin_lock_irqsave (&uctx->uctx_spinlock, flags); -+ __user_signal_trap (uctx); -+ spin_unlock_irqrestore (&uctx->uctx_spinlock, flags); -+} -+ -+#define MAX_INTS_PER_TICK 50 -+#define MIN_INTS_PER_TICK 20 -+ -+static void -+user_shuffle_signal_trap (USER_CTXT *uctx) -+{ -+ ASSERT (SPINLOCK_HELD (&uctx->uctx_spinlock)); -+ -+ PRINTF (uctx, DBG_TRAP, "user_shuffle_signal_trap: signal=%d%s\n", -+ uctx->uctx_trap_signo, timer_pending(&uctx->uctx_shuffle_timer) ? " (timer-pending)" : ""); -+ -+ if (timer_pending (&uctx->uctx_shuffle_timer)) -+ return; -+ -+ uctx->uctx_shuffle_timer.expires = jiffies + (HZ*2); -+ -+ add_timer (&uctx->uctx_shuffle_timer); -+} -+ -+static void -+user_signal_trap (USER_CTXT *uctx) -+{ -+ ASSERT (SPINLOCK_HELD (&uctx->uctx_spinlock)); -+ -+ PRINTF (uctx, DBG_TRAP, "user_signal_trap: state=%d pid=%d signal=%d%s\n", uctx->uctx_trap_state, -+ uctx->uctx_trap_pid, uctx->uctx_trap_signo, timer_pending(&uctx->uctx_int_timer) ? " (timer-pending)" : ""); -+ -+ uctx->uctx_int_count++; -+ -+ if (timer_pending (&uctx->uctx_int_timer)) -+ return; -+ -+ if (uctx->uctx_int_count > ((int)(jiffies - uctx->uctx_int_start) * MAX_INTS_PER_TICK)) -+ { -+ PRINTF (uctx, DBG_TRAP, "user_signal_trap: deferring signal for %d ticks (count %d ticks %d -> %d)\n", -+ uctx->uctx_int_delay + 1, uctx->uctx_int_count, (int) (jiffies - uctx->uctx_int_start), -+ ((int)(jiffies - uctx->uctx_int_start) * MAX_INTS_PER_TICK)); -+ -+ /* We're interrupting too fast, so defer this signal */ -+ uctx->uctx_int_timer.expires = jiffies + (++uctx->uctx_int_delay); -+ -+ add_timer (&uctx->uctx_int_timer); -+ } -+ else -+ { -+ __user_signal_trap (uctx); -+ -+ PRINTF (uctx, DBG_TRAP, "user_signal_trap: check signal for %d ticks (count %d ticks %d -> %d)\n", -+ uctx->uctx_int_delay + 1, uctx->uctx_int_count, (int) (jiffies - uctx->uctx_int_start), -+ (int)(jiffies - uctx->uctx_int_start) * MIN_INTS_PER_TICK); -+ -+ if (uctx->uctx_int_count < ((int) (jiffies - uctx->uctx_int_start)) * MIN_INTS_PER_TICK) -+ { -+ PRINTF (uctx, DBG_TRAP, "user_signal_trap: reset interrupt throttle (count %d ticks %d)\n", -+ uctx->uctx_int_count, (int) (jiffies - uctx->uctx_int_start)); -+ -+ uctx->uctx_int_start = jiffies; -+ uctx->uctx_int_count = 0; -+ uctx->uctx_int_delay = 0; -+ } -+ } -+} -+ -+static void -+user_neterr_timer (unsigned long arg) -+{ -+ USER_CTXT *uctx = (USER_CTXT *) arg; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&uctx->uctx_spinlock, flags); -+ -+ uctx->uctx_status |= UCTX_NETERR_TIMER; -+ -+ user_signal_trap (uctx); -+ -+ spin_unlock_irqrestore (&uctx->uctx_spinlock, flags); -+} -+ -+static void -+user_flush_dma_runqueue (ELAN4_DEV *dev, USER_CTXT *uctx, int qfull) -+{ -+ E4_uint64 qptrs = read_reg64 (dev, DProcLowPriPtrs); -+ E4_uint32 qsize = E4_QueueSize (E4_QueueSizeValue (qptrs)); -+ E4_uint32 qfptr = E4_QueueFrontPointer (qptrs); -+ E4_uint32 qbptr = E4_QueueBackPointer (qptrs); -+ E4_DProcQueueEntry qentry; -+ -+ while ((qfptr != qbptr) || qfull) -+ { -+ E4_uint64 typeSize = elan4_sdram_readq (dev, qfptr + offsetof (E4_DProcQueueEntry, Desc.dma_typeSize)); -+ -+ if (DMA_Context (typeSize) == uctx->uctx_ctxt.ctxt_num) -+ { -+ elan4_sdram_copyq_from_sdram (dev, qfptr, &qentry, sizeof (E4_DProcQueueEntry)); -+ -+ PRINTF4 (uctx, DBG_SWAP, "user_flush_dma_runqueue: %016llx %016llx %016llx %016llx\n", qentry.Desc.dma_typeSize, -+ qentry.Desc.dma_cookie, qentry.Desc.dma_vproc, qentry.Desc.dma_srcAddr); -+ PRINTF3 (uctx, DBG_SWAP, " %016llx %016llx %016llx\n", qentry.Desc.dma_dstAddr, -+ qentry.Desc.dma_srcEvent, qentry.Desc.dma_dstEvent); -+ -+ if (RING_QUEUE_REALLY_FULL (uctx->uctx_dmaQ)) -+ { -+ PRINTF (uctx, DBG_SWAP, "user_flush_dma_runqueue: queue overflow\n"); -+ uctx->uctx_status |= UCTX_DPROC_QUEUE_OVERFLOW; -+ } -+ else -+ { -+ *RING_QUEUE_BACK (uctx->uctx_dmaQ, uctx->uctx_dmas) = qentry.Desc; -+ (void) RING_QUEUE_ADD (uctx->uctx_dmaQ); -+ } -+ -+ qentry.Desc.dma_typeSize = DMA_ShMemWrite | dev->dev_ctxt.ctxt_num; -+ qentry.Desc.dma_cookie = 0; -+ qentry.Desc.dma_vproc = 0; -+ qentry.Desc.dma_srcAddr = 0; -+ qentry.Desc.dma_dstAddr = 0; -+ qentry.Desc.dma_srcEvent = 0; -+ qentry.Desc.dma_dstEvent = 0; -+ -+ elan4_sdram_copyq_to_sdram (dev, &qentry, qfptr, sizeof (E4_DProcQueueEntry)); -+ } -+ -+ qfptr = (qfptr & ~(qsize-1)) | ((qfptr + sizeof (E4_DProcQueueEntry)) & (qsize-1)); -+ qfull = 0; -+ } -+} -+ -+static void -+user_flush_thread_runqueue (ELAN4_DEV *dev, USER_CTXT *uctx, int qfull) -+{ -+ E4_uint64 qptrs = read_reg64 (dev, TProcLowPriPtrs); -+ E4_uint32 qsize = E4_QueueSize (E4_QueueSizeValue (qptrs)); -+ E4_uint32 qfptr = E4_QueueFrontPointer (qptrs); -+ E4_uint32 qbptr = E4_QueueBackPointer (qptrs); -+ E4_TProcQueueEntry qentry; -+ -+ while ((qfptr != qbptr) || qfull) -+ { -+ E4_uint64 context = elan4_sdram_readq (dev, qfptr + offsetof (E4_TProcQueueEntry, Context)); -+ -+ if (TPROC_Context (context) == uctx->uctx_ctxt.ctxt_num) -+ { -+ elan4_sdram_copyq_from_sdram (dev, qfptr, &qentry, sizeof (E4_TProcQueueEntry)); -+ -+ PRINTF (uctx, DBG_SWAP, "user_flush_thread_runqueue: %016llx %016llx %016llx %016llx\n", qentry.Regs.Registers[0], -+ qentry.Regs.Registers[1], qentry.Regs.Registers[2], qentry.Regs.Registers[3]); -+ PRINTF (uctx, DBG_SWAP, " %016llx %016llx %016llx\n", -+ qentry.Regs.Registers[4], qentry.Regs.Registers[5], qentry.Regs.Registers[6]); -+ -+ if (RING_QUEUE_REALLY_FULL (uctx->uctx_threadQ)) -+ uctx->uctx_status |= UCTX_TPROC_QUEUE_OVERFLOW; -+ else -+ { -+ *RING_QUEUE_BACK (uctx->uctx_threadQ, uctx->uctx_threads) = qentry.Regs; -+ (void) RING_QUEUE_ADD (uctx->uctx_threadQ); -+ } -+ -+ /* change the thread to execute the suspend sequence */ -+ qentry.Regs.Registers[0] = dev->dev_tproc_suspend; -+ qentry.Regs.Registers[1] = dev->dev_tproc_space; -+ qentry.Context = dev->dev_ctxt.ctxt_num; -+ -+ elan4_sdram_copyq_to_sdram (dev, &qentry, qfptr, sizeof (E4_TProcQueueEntry)); -+ } -+ -+ qfptr = (qfptr & ~(qsize-1)) | ((qfptr + sizeof (E4_TProcQueueEntry)) & (qsize-1)); -+ qfull = 0; -+ } -+} -+ -+static void -+user_flush_dmas (ELAN4_DEV *dev, void *arg, int qfull) -+{ -+ USER_CTXT *uctx = (USER_CTXT *) arg; -+ unsigned long flags; -+ -+ ASSERT ((read_reg32 (dev, InterruptReg) & INT_DProcHalted) != 0); -+ -+ spin_lock_irqsave (&uctx->uctx_spinlock, flags); -+ -+ if ((uctx->uctx_status & (UCTX_SWAPPED_REASONS|UCTX_STOPPED_REASONS)) == 0) -+ { -+ PRINTF1 (uctx, DBG_SWAP, "user_flush_dmas: status %x - no more reasons\n", uctx->uctx_status); -+ -+ uctx->uctx_status &= ~UCTX_STOPPING; -+ -+ user_signal_trap (uctx); -+ } -+ else -+ { -+ user_flush_dma_runqueue (dev, uctx, qfull); -+ -+ uctx->uctx_status = (uctx->uctx_status | UCTX_STOPPED) & ~UCTX_STOPPING; -+ -+ PRINTF1 (uctx, DBG_SWAP, "user_flush_dmas: statux %x - stopped\n", uctx->uctx_status); -+ -+ kcondvar_wakeupall (&uctx->uctx_wait, &uctx->uctx_spinlock); -+ } -+ -+ spin_unlock_irqrestore (&uctx->uctx_spinlock, flags); -+} -+ -+static void -+user_flush (ELAN4_DEV *dev, void *arg) -+{ -+ USER_CTXT *uctx = (USER_CTXT *) arg; -+ struct list_head *entry; -+ unsigned long flags; -+ -+ ASSERT ((read_reg32 (dev, InterruptReg) & (INT_Halted|INT_Discarding)) == (INT_Halted|INT_Discarding)); -+ -+ spin_lock_irqsave (&uctx->uctx_spinlock, flags); -+ -+ if ((uctx->uctx_status & (UCTX_SWAPPED_REASONS|UCTX_STOPPED_REASONS)) == 0) -+ { -+ PRINTF1 (uctx, DBG_SWAP, "user_flush: status %x - no more reasons\n", uctx->uctx_status); -+ -+ uctx->uctx_status &= ~UCTX_STOPPING; -+ -+ user_signal_trap (uctx); -+ } -+ else -+ { -+ PRINTF1 (uctx, DBG_SWAP, "user_flush: status %x - flushing context\n", uctx->uctx_status); -+ -+ list_for_each (entry, &uctx->uctx_cqlist) { -+ USER_CQ *ucq = list_entry (entry, USER_CQ, ucq_link); -+ -+ if (ucq->ucq_state == UCQ_RUNNING) -+ { -+ /* NOTE: since the inserter can still be running we modify the permissions -+ * to zero then when the extractor starts up again it will trap */ -+ PRINTF1 (uctx, DBG_SWAP, "user_flush: stopping cq indx=%d\n", elan4_cq2idx(ucq->ucq_cq)); -+ -+ elan4_updatecq (dev, ucq->ucq_cq, 0, 0); -+ } -+ } -+ -+ user_flush_thread_runqueue (dev, uctx, TPROC_LowRunQueueFull(read_reg64 (dev, TProcStatus))); -+ -+ /* since we can't determine whether the dma run queue is full or empty, we use a dma -+ * halt operation to do the flushing - as the reason for halting the dma processor -+ * will be released when we return, we keep it halted until the flush has completed */ -+ elan4_queue_dma_flushop (dev, &uctx->uctx_dma_flushop, 0); -+ -+ if (uctx->uctx_status & UCTX_EXITING) -+ elan4_flush_icache_halted (&uctx->uctx_ctxt); -+ } -+ -+ spin_unlock_irqrestore (&uctx->uctx_spinlock, flags); -+} -+ -+static void -+user_set_filter (USER_CTXT *uctx, E4_uint32 state) -+{ -+ struct list_head *entry; -+ -+ ASSERT (SPINLOCK_HELD (&uctx->uctx_spinlock)); -+ -+ list_for_each (entry, &uctx->uctx_cent_list) { -+ USER_CTXT_ENTRY *cent = list_entry (entry, USER_CTXT_ENTRY, cent_link); -+ -+ elan4_set_filter (&uctx->uctx_ctxt, cent->cent_cap->cap_mycontext, state); -+ } -+} -+ -+static void -+user_start_nacking (USER_CTXT *uctx, unsigned reason) -+{ -+ PRINTF2 (uctx, DBG_SWAP, "user_start_nacking: status %x reason %x\n", uctx->uctx_status, reason); -+ -+ ASSERT (SPINLOCK_HELD (&uctx->uctx_spinlock)); -+ -+ if (UCTX_NACKING(uctx)) -+ uctx->uctx_status |= reason; -+ else -+ { -+ uctx->uctx_status |= reason; -+ -+ user_set_filter (uctx, E4_FILTER_STATS | E4_FILTER_DISCARD_ALL); -+ } -+} -+ -+static void -+user_stop_nacking (USER_CTXT *uctx, unsigned reason) -+{ -+ PRINTF2 (uctx, DBG_SWAP, "user_stop_nacking: status %x reason %x\n", uctx->uctx_status, reason); -+ -+ ASSERT (SPINLOCK_HELD (&uctx->uctx_spinlock)); -+ -+ uctx->uctx_status &= ~reason; -+ -+ if (! UCTX_NACKING (uctx)) -+ user_set_filter (uctx, E4_FILTER_STATS); -+} -+ -+static void -+user_start_stopping (USER_CTXT *uctx, unsigned reason) -+{ -+ ELAN4_DEV *dev =uctx->uctx_ctxt.ctxt_dev; -+ -+ PRINTF2 (uctx, DBG_SWAP, "user_start_stopping: status %x reason %x\n", uctx->uctx_status, reason); -+ -+ ASSERT (! (uctx->uctx_status & UCTX_STOPPED)); -+ -+ user_start_nacking (uctx, reason); -+ -+ if ((uctx->uctx_status & UCTX_STOPPING) != 0) -+ return; -+ -+ uctx->uctx_status |= UCTX_STOPPING; -+ -+ /* queue the halt operation to remove all threads/dmas/cqs from the run queues */ -+ /* and also flush through the context filter change */ -+ elan4_queue_haltop (dev, &uctx->uctx_haltop); -+} -+ -+static void -+user_stop_stopping (USER_CTXT *uctx, unsigned reason) -+{ -+ PRINTF2 (uctx, DBG_SWAP, "user_stop_stopping: status %x reason %x\n", uctx->uctx_status, reason); -+ -+ user_stop_nacking (uctx, reason); -+ -+ if (UCTX_RUNNABLE (uctx)) -+ { -+ uctx->uctx_status &= ~UCTX_STOPPED; -+ -+ PRINTF1 (uctx, DBG_SWAP, "user_stop_stopping: no more reasons => %x\n", uctx->uctx_status); -+ -+ user_signal_trap (uctx); -+ } -+} -+ -+void -+user_swapout (USER_CTXT *uctx, unsigned reason) -+{ -+ ELAN4_DEV *dev = uctx->uctx_ctxt.ctxt_dev; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&uctx->uctx_spinlock, flags); -+ -+ PRINTF2 (uctx, DBG_SWAP, "user_swapout: status %x reason %x\n", uctx->uctx_status, reason); -+ -+ user_start_nacking (uctx, reason); -+ -+ while (uctx->uctx_status & (UCTX_SWAPPING|UCTX_STOPPING) && /* wait for someone else to finish */ -+ uctx->uctx_trap_count > 0) /* and for trap handlers to notice */ -+ { /* and exit */ -+ PRINTF1 (uctx, DBG_SWAP, "user_swapout: waiting for %d trap handlers to exit/previous swapout\n", uctx->uctx_trap_count); -+ -+ kcondvar_wakeupall (&uctx->uctx_wait, &uctx->uctx_spinlock); -+ kcondvar_wait (&uctx->uctx_wait, &uctx->uctx_spinlock, &flags); -+ } -+ -+ if (uctx->uctx_status & UCTX_SWAPPED) /* already swapped out */ -+ { -+ spin_unlock_irqrestore (&uctx->uctx_spinlock, flags); -+ return; -+ } -+ -+ uctx->uctx_status |= (UCTX_SWAPPING|UCTX_STOPPING); /* mark the context as swapping & stopping */ -+ -+ /* queue the halt operation to remove all threads/dmas/cqs from the run queues */ -+ /* and also flush through the context filter change */ -+ elan4_queue_haltop (dev, &uctx->uctx_haltop); -+ -+ while (! (uctx->uctx_status & UCTX_STOPPED)) -+ kcondvar_wait (&uctx->uctx_wait, &uctx->uctx_spinlock, &flags); -+ -+ /* all state has been removed from the elan - we can now "tidy" it up */ -+ -+ PRINTF0 (uctx, DBG_SWAP, "user_swapout: swapped out\n"); -+ -+ uctx->uctx_status = (uctx->uctx_status & ~UCTX_SWAPPING) | UCTX_SWAPPED; -+ -+ kcondvar_wakeupall (&uctx->uctx_wait, &uctx->uctx_spinlock); -+ -+ PRINTF1 (uctx, DBG_SWAP, "user_swapout: all done - status %x\n", uctx->uctx_status); -+ -+ spin_unlock_irqrestore (&uctx->uctx_spinlock, flags); -+} -+ -+void -+user_swapin (USER_CTXT *uctx, unsigned reason) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave (&uctx->uctx_spinlock, flags); -+ -+ ASSERT (uctx->uctx_status & UCTX_SWAPPED_REASONS); -+ -+ PRINTF2 (uctx, DBG_SWAP, "user_swapin: status %x reason %x\n", uctx->uctx_status, reason); -+ -+ while (uctx->uctx_status & (UCTX_SWAPPING|UCTX_STOPPING)) /* wait until other threads have */ -+ kcondvar_wait (&uctx->uctx_wait, &uctx->uctx_spinlock, &flags); /* completed their swap operation */ -+ -+ ASSERT (uctx->uctx_status & (UCTX_SWAPPED | UCTX_STOPPED)); -+ -+ user_stop_nacking (uctx, reason); -+ -+ if (! (uctx->uctx_status & UCTX_SWAPPED_REASONS)) -+ { -+ uctx->uctx_status &= ~UCTX_SWAPPED; -+ -+ /* no longer swapped out - wakeup anyone sleeping waiting for swapin */ -+ kcondvar_wakeupall (&uctx->uctx_wait, &uctx->uctx_spinlock); -+ -+ if (! (uctx->uctx_status & UCTX_STOPPED_REASONS)) -+ { -+ uctx->uctx_status &= ~UCTX_STOPPED; -+ user_signal_trap (uctx); -+ } -+ } -+ -+ PRINTF1 (uctx, DBG_SWAP, "user_swapin: all done - status %x\n", uctx->uctx_status); -+ -+ spin_unlock_irqrestore (&uctx->uctx_spinlock, flags); -+} -+ -+void -+user_destroy_callback (void *arg, ELAN_CAPABILITY *cap, ELAN_CAPABILITY *map) -+{ -+ USER_CTXT *uctx = (USER_CTXT *) arg; -+ -+ PRINTF (uctx, DBG_VP, "user_destroy_callback: %s\n", map == NULL ? "cap destoyed" : "map destroyed"); -+} -+ -+int -+user_attach (USER_CTXT *uctx, ELAN_CAPABILITY *cap) -+{ -+ ELAN4_DEV *dev = uctx->uctx_ctxt.ctxt_dev; -+ USER_CTXT_ENTRY *cent; -+ unsigned long flags; -+ int ctype, res; -+ -+ if ((ctype = user_validate_cap (uctx, cap, ELAN_USER_ATTACH)) < 0) -+ return ctype; -+ -+ if ((ctype == ELAN_CAP_RMS) && (res = elan_attach_cap (cap, dev->dev_devinfo.dev_rail, uctx, user_destroy_callback)) != 0) -+ { -+ /* NOTE: elan_attach_cap returns +ve errnos */ -+ return -res; -+ } -+ -+ KMEM_ALLOC (cent, USER_CTXT_ENTRY *, sizeof (USER_CTXT_ENTRY), 1); -+ if (cent == NULL) -+ { -+ if (ctype == ELAN_CAP_RMS) -+ elan_detach_cap (cap, dev->dev_devinfo.dev_rail); -+ -+ return -ENOMEM; -+ } -+ -+ KMEM_ALLOC (cent->cent_cap, ELAN_CAPABILITY *, ELAN_CAP_SIZE(cap), 1); -+ if (cent->cent_cap == NULL) -+ { -+ if (ctype == ELAN_CAP_RMS) -+ elan_detach_cap (cap, dev->dev_devinfo.dev_rail); -+ -+ KMEM_FREE (cent, sizeof (USER_CTXT_ENTRY)); -+ return -ENOMEM; -+ } -+ -+ memcpy (cent->cent_cap, cap, ELAN_CAP_SIZE(cap)); -+ -+ if ((res = elan4_attach_filter (&uctx->uctx_ctxt, cap->cap_mycontext)) != 0) -+ { -+ if (ctype == ELAN_CAP_RMS) -+ elan_detach_cap (cap, dev->dev_devinfo.dev_rail); -+ -+ KMEM_FREE (cent->cent_cap, ELAN_CAP_SIZE (cap)); -+ KMEM_FREE (cent, sizeof (USER_CTXT_ENTRY)); -+ -+ return res; -+ } -+ -+ spin_lock_irqsave (&uctx->uctx_spinlock, flags); -+ -+ list_add_tail (¢->cent_link, &uctx->uctx_cent_list); -+ -+ if (! UCTX_NACKING (uctx)) -+ user_set_filter (uctx, E4_FILTER_STATS); -+ -+ spin_unlock_irqrestore (&uctx->uctx_spinlock, flags); -+ -+ return (0); -+ -+} -+ -+void -+user_detach (USER_CTXT *uctx, ELAN_CAPABILITY *cap) -+{ -+ ELAN4_DEV *dev = uctx->uctx_ctxt.ctxt_dev; -+ struct list_head *entry; -+ struct list_head *next; -+ struct list_head list; -+ unsigned long flags; -+ -+ INIT_LIST_HEAD (&list); -+ -+ spin_lock_irqsave (&uctx->uctx_spinlock, flags); -+ -+ PRINTF (uctx, DBG_NETWORK_CTX, cap ? "user_detach: network context %d\n" : "user_detach: all network contexts\n", cap ? cap->cap_mycontext : 0); -+ -+ list_for_each_safe (entry, next, &uctx->uctx_cent_list) { -+ USER_CTXT_ENTRY *cent = list_entry (entry, USER_CTXT_ENTRY, cent_link); -+ -+ if (cap == NULL || ELAN_CAP_MATCH (cap, cent->cent_cap)) -+ { -+ PRINTF1 (uctx, DBG_NETWORK_CTX, "user_detach: detach from network context %d\n", cent->cent_cap->cap_mycontext); -+ -+ elan4_detach_filter (&uctx->uctx_ctxt, cent->cent_cap->cap_mycontext); -+ -+ list_del (¢->cent_link); -+ list_add_tail (¢->cent_link, &list); -+ } -+ } -+ -+ spin_unlock_irqrestore (&uctx->uctx_spinlock, flags); -+ -+ while (! list_empty (&list)) -+ { -+ USER_CTXT_ENTRY *cent = list_entry (list.next, USER_CTXT_ENTRY, cent_link); -+ -+ list_del (¢->cent_link); -+ -+ if (user_validate_cap (uctx, cent->cent_cap, ELAN_USER_DETACH) == ELAN_CAP_RMS) -+ elan_detach_cap (cent->cent_cap, dev->dev_devinfo.dev_rail); -+ -+ KMEM_FREE (cent->cent_cap, ELAN_CAP_SIZE (cent->cent_cap)); -+ KMEM_FREE (cent, sizeof (USER_CTXT_ENTRY)); -+ } -+} -+ -+void -+user_block_inputter (USER_CTXT *uctx, unsigned blocked) -+{ -+ unsigned long flags; -+ int isblocked; -+ -+ spin_lock_irqsave (&uctx->uctx_spinlock, flags); -+ -+ isblocked = (uctx->uctx_status & UCTX_USER_FILTERING); -+ -+ if (blocked && !isblocked) -+ user_start_nacking (uctx, UCTX_USER_FILTERING); -+ -+ if (!blocked && isblocked) -+ user_stop_nacking (uctx, UCTX_USER_FILTERING); -+ -+ spin_unlock_irqrestore (&uctx->uctx_spinlock, flags); -+} -+ -+static USER_VPSEG * -+user_install_vpseg (USER_CTXT *uctx, unsigned process, unsigned entries) -+{ -+ struct list_head *entry; -+ USER_VPSEG *seg; -+ -+ if ((process + entries) > (E4_VPT_MIN_ENTRIES << uctx->uctx_routetable->tbl_size)) -+ return (NULL); -+ -+ ASSERT (kmutex_is_locked (&uctx->uctx_vpseg_lock)); -+ -+ list_for_each (entry, &uctx->uctx_vpseg_list) { -+ seg = list_entry (entry, USER_VPSEG, vps_link); -+ -+ if (process <= (seg->vps_process + seg->vps_entries-1) && -+ (process + entries - 1) >= seg->vps_process) -+ return ((USER_VPSEG *) NULL); -+ } -+ -+ KMEM_ZALLOC (seg, USER_VPSEG *, sizeof (USER_VPSEG), 1); -+ -+ if (seg == (USER_VPSEG *) NULL) -+ return ((USER_VPSEG *) NULL); -+ -+ seg->vps_process = process; -+ seg->vps_entries = entries; -+ -+ list_add_tail (&seg->vps_link, &uctx->uctx_vpseg_list); -+ -+ return (seg); -+} -+ -+static void -+user_remove_vpseg (USER_CTXT *uctx, USER_VPSEG *seg) -+{ -+ ASSERT (kmutex_is_locked (&uctx->uctx_vpseg_lock)); -+ -+ list_del (&seg->vps_link); -+ -+ switch (seg->vps_type) -+ { -+ case USER_VPSEG_P2P: -+ /* These pointers (union) are only valid for P2P segs */ -+ if (seg->vps_p2p_routes) -+ KMEM_FREE (seg->vps_p2p_routes, sizeof (E4_VirtualProcessEntry) * seg->vps_entries); -+ -+ if (seg->vps_p2p_cap) -+ KMEM_FREE (seg->vps_p2p_cap, ELAN_CAP_SIZE(seg->vps_p2p_cap)); -+ -+ break; -+ -+ case USER_VPSEG_BCAST: -+ ; -+ } -+ -+ KMEM_FREE (seg, sizeof (USER_VPSEG)); -+} -+ -+static USER_VPSEG * -+user_find_vpseg (USER_CTXT *uctx, unsigned low, unsigned high) -+{ -+ struct list_head *entry; -+ -+ ASSERT (kmutex_is_locked (&uctx->uctx_vpseg_lock)); -+ -+ list_for_each (entry, &uctx->uctx_vpseg_list) { -+ USER_VPSEG *seg = list_entry (entry, USER_VPSEG, vps_link); -+ -+ if (seg->vps_process <= low && (seg->vps_process + seg->vps_entries) > high) -+ return (seg); -+ } -+ -+ return ((USER_VPSEG *) NULL); -+} -+ -+static ELAN_LOCATION -+user_process2location (USER_CTXT *uctx, USER_VPSEG *seg, unsigned process) -+{ -+ ELAN_LOCATION location; -+ int nnodes, nctxs; -+ int nodeOff, ctxOff, vpOff; -+ -+ location.loc_node = ELAN_INVALID_NODE; -+ location.loc_context = -1; -+ -+ if (seg == NULL) -+ seg = user_find_vpseg (uctx, process, process); -+ -+ if (seg == NULL || (seg->vps_type != USER_VPSEG_P2P)) -+ return (location); -+ -+ nnodes = ELAN_CAP_NUM_NODES (seg->vps_p2p_cap); -+ nctxs = ELAN_CAP_NUM_CONTEXTS (seg->vps_p2p_cap); -+ -+ switch (seg->vps_p2p_cap->cap_type & ELAN_CAP_TYPE_MASK) -+ { -+ case ELAN_CAP_TYPE_BLOCK: -+ for (nodeOff = 0, vpOff = 0; nodeOff < nnodes; nodeOff++) -+ { -+ for (ctxOff = 0; ctxOff < nctxs; ctxOff++) -+ { -+ if ((seg->vps_p2p_cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (seg->vps_p2p_cap->cap_bitmap, ctxOff + (nodeOff * nctxs))) -+ { -+ if (vpOff++ == (process - seg->vps_process)) -+ { -+ location.loc_node = seg->vps_p2p_cap->cap_lownode + nodeOff; -+ location.loc_context = seg->vps_p2p_cap->cap_lowcontext + ctxOff; -+ goto found; -+ } -+ } -+ } -+ } -+ break; -+ -+ case ELAN_CAP_TYPE_CYCLIC: -+ for (ctxOff = 0, vpOff = 0; ctxOff < nctxs; ctxOff++) -+ { -+ for (nodeOff = 0; nodeOff < nnodes; nodeOff++) -+ { -+ if ((seg->vps_p2p_cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (seg->vps_p2p_cap->cap_bitmap, nodeOff + (ctxOff * nnodes))) -+ { -+ if (vpOff++ == (process - seg->vps_process)) -+ { -+ location.loc_node = seg->vps_p2p_cap->cap_lownode + nodeOff; -+ location.loc_context = seg->vps_p2p_cap->cap_lowcontext + ctxOff; -+ goto found; -+ } -+ } -+ } -+ } -+ break; -+ } -+ -+ found: -+ return (location); -+} -+ -+static unsigned -+user_location2process (USER_CTXT *uctx, ELAN_LOCATION location) -+{ -+ unsigned int process = ELAN_INVALID_PROCESS; -+ struct list_head *entry; -+ int nnodes, nctxs; -+ int nodeOff, ctxOff, vpOff; -+ -+ kmutex_lock (&uctx->uctx_vpseg_lock); -+ list_for_each (entry, &uctx->uctx_vpseg_list) { -+ USER_VPSEG *seg = list_entry (entry, USER_VPSEG, vps_link); -+ -+ if (seg->vps_type != USER_VPSEG_P2P) -+ continue; -+ -+ if (location.loc_node >= seg->vps_p2p_cap->cap_lownode && location.loc_node <= seg->vps_p2p_cap->cap_highnode && -+ location.loc_context >= seg->vps_p2p_cap->cap_lowcontext && location.loc_context <= seg->vps_p2p_cap->cap_highcontext) -+ { -+ nnodes = ELAN_CAP_NUM_NODES (seg->vps_p2p_cap); -+ nctxs = ELAN_CAP_NUM_CONTEXTS (seg->vps_p2p_cap); -+ -+ switch (seg->vps_p2p_cap->cap_type & ELAN_CAP_TYPE_MASK) -+ { -+ case ELAN_CAP_TYPE_BLOCK: -+ for (nodeOff = 0, vpOff = 0; nodeOff < nnodes; nodeOff++) -+ { -+ for (ctxOff = 0; ctxOff < nctxs; ctxOff++) -+ { -+ if ((seg->vps_p2p_cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (seg->vps_p2p_cap->cap_bitmap, ctxOff + (nodeOff * nctxs))) -+ { -+ if (location.loc_node == seg->vps_p2p_cap->cap_lownode + nodeOff && -+ location.loc_context == seg->vps_p2p_cap->cap_lowcontext + ctxOff) -+ { -+ process = seg->vps_process + vpOff; -+ goto found; -+ } -+ vpOff++; -+ } -+ } -+ } -+ break; -+ -+ case ELAN_CAP_TYPE_CYCLIC: -+ for (ctxOff = 0, vpOff = 0; ctxOff < nctxs; ctxOff++) -+ { -+ for (nodeOff = 0; nodeOff < nnodes; nodeOff++) -+ { -+ if ((seg->vps_p2p_cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (seg->vps_p2p_cap->cap_bitmap, nodeOff + (ctxOff * nnodes))) -+ { -+ if (location.loc_node == seg->vps_p2p_cap->cap_lownode + nodeOff && -+ location.loc_context == seg->vps_p2p_cap->cap_lowcontext + ctxOff) -+ { -+ process = seg->vps_process + vpOff; -+ goto found; -+ } -+ vpOff++; -+ } -+ } -+ } -+ break; -+ } -+ } -+ } -+ found: -+ kmutex_unlock (&uctx->uctx_vpseg_lock); -+ -+ return (process); -+} -+ -+static void -+user_loadroute_vpseg (USER_CTXT *uctx, USER_VPSEG *seg, ELAN_POSITION *pos) -+{ -+ ELAN4_DEV *dev = uctx->uctx_ctxt.ctxt_dev; -+ ELAN_CAPABILITY *cap = seg->vps_p2p_cap; -+ unsigned nnodes = ELAN_CAP_NUM_NODES (cap); -+ unsigned nctxs = ELAN_CAP_NUM_CONTEXTS (cap); -+ E4_VirtualProcessEntry route; -+ unsigned nodeOff; -+ unsigned ctxOff; -+ unsigned vpOff; -+ -+ switch (cap->cap_type & ELAN_CAP_TYPE_MASK) -+ { -+ case ELAN_CAP_TYPE_BLOCK: -+ for (nodeOff = 0, vpOff = 0; nodeOff < nnodes; nodeOff++) -+ { -+ for (ctxOff = 0; ctxOff < nctxs; ctxOff++) -+ { -+ if ((cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (cap->cap_bitmap, ctxOff + (nodeOff * nctxs))) -+ { -+ if (seg->vps_p2p_routes != NULL) -+ route = seg->vps_p2p_routes[vpOff]; -+ else if (elan4_generate_route (&uctx->uctx_position, &route, cap->cap_lowcontext + ctxOff, -+ cap->cap_lownode + nodeOff, cap->cap_lownode + nodeOff, user_p2p_route_options) < 0) -+ { -+ vpOff++; -+ continue; -+ } -+ -+ PRINTF5 (uctx, DBG_VP, "user_loadroute_vpseg: virtual process %d -> node %d context %d [%016llx.%016llx]\n", -+ seg->vps_process + vpOff, cap->cap_lownode + nodeOff, cap->cap_lowcontext + ctxOff, -+ route.Values[0], route.Values[1]); -+ -+ elan4_write_route (dev, uctx->uctx_routetable, seg->vps_process + vpOff, &route); -+ -+ vpOff++; -+ } -+ } -+ } -+ break; -+ -+ case ELAN_CAP_TYPE_CYCLIC: -+ for (ctxOff = 0, vpOff = 0; ctxOff < nctxs; ctxOff++) -+ { -+ for (nodeOff = 0; nodeOff < nnodes; nodeOff++) -+ { -+ if ((cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (cap->cap_bitmap, nodeOff + (ctxOff * nnodes))) -+ { -+ if (seg->vps_p2p_routes != NULL) -+ route = seg->vps_p2p_routes[vpOff]; -+ else if (elan4_generate_route (&uctx->uctx_position, &route, cap->cap_lowcontext + ctxOff, -+ cap->cap_lownode + nodeOff, cap->cap_lownode + nodeOff, user_p2p_route_options) < 0) -+ { -+ vpOff++; -+ continue; -+ } -+ -+ PRINTF5 (uctx, DBG_VP, "user_loadroute_vpseg: virtual process %d -> node %d context %d [%016llx.%016llx]\n", -+ seg->vps_process + vpOff, cap->cap_lownode + nodeOff, cap->cap_lowcontext + ctxOff, -+ route.Values[0], route.Values[1]); -+ -+ elan4_write_route (dev, uctx->uctx_routetable, seg->vps_process + vpOff, &route); -+ -+ vpOff++; -+ } -+ } -+ } -+ break; -+ } -+} -+ -+static int -+user_loadroute_bcast (USER_CTXT *uctx, USER_VPSEG *seg) -+{ -+ ELAN4_DEV *dev = uctx->uctx_ctxt.ctxt_dev; -+ ELAN_POSITION *pos = &uctx->uctx_position; -+ E4_VirtualProcessEntry route; -+ USER_VPSEG *aseg; -+ int res; -+ ELAN_LOCATION low; -+ ELAN_LOCATION high; -+ -+ if ((aseg = user_find_vpseg (uctx, seg->vps_bcast_lowvp, seg->vps_bcast_highvp)) == NULL || aseg->vps_type != USER_VPSEG_P2P) -+ return (-EINVAL); -+ -+#ifdef use_elanmod -+ if ((res = user_validate_cap (dev, aseg->vps_p2p_cap, ELAN_USER_BROADCAST)) < 0) -+ return (res); -+#endif -+ -+ low = user_process2location (uctx, aseg, seg->vps_bcast_lowvp); -+ high = user_process2location (uctx, aseg, seg->vps_bcast_highvp); -+ -+ if (low.loc_context != high.loc_context) -+ return (-EINVAL); -+ -+ /* NOTE: if loopback can only broadcast to ourself - -+ * if back-to-back can only broadcast to other node */ -+ if ((pos->pos_mode == ELAN_POS_MODE_LOOPBACK && low.loc_node != high.loc_node && low.loc_node != pos->pos_nodeid) || -+ (pos->pos_mode == ELAN_POS_MODE_BACKTOBACK && low.loc_node != high.loc_node && low.loc_node == pos->pos_nodeid)) -+ { -+ return (-EINVAL); -+ } -+ -+ if ((res = elan4_generate_route (pos, &route, low.loc_context, low.loc_node, high.loc_node, user_bcast_route_options)) < 0) -+ return (res); -+ -+ PRINTF (uctx, DBG_VP, "user_loadroute_bcast: virtual process %d -> nodes %d.%d context %d [%016llx.%016llx]\n", -+ seg->vps_process, low.loc_node, high.loc_node, low.loc_context, route.Values[0], route.Values[1]); -+ -+ elan4_write_route (dev, uctx->uctx_routetable, seg->vps_process, &route); -+ return (0); -+} -+ -+int -+user_add_p2pvp (USER_CTXT *uctx, unsigned process, ELAN_CAPABILITY *cap) -+{ -+ USER_VPSEG *seg; -+ ELAN_CAPABILITY *ncap; -+ unsigned entries; -+ -+ if ((cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) == 0) -+ entries = bt_nbits (cap->cap_bitmap , ELAN_CAP_BITMAPSIZE(cap)); -+ else -+ entries = ELAN_CAP_BITMAPSIZE(cap); -+ -+ if ((process + entries) > (E4_VPT_MIN_ENTRIES << uctx->uctx_routetable->tbl_size)) -+ return (-EINVAL); -+ -+ KMEM_ALLOC (ncap, ELAN_CAPABILITY *, ELAN_CAP_SIZE (cap), 1); -+ -+ if (ncap == NULL) -+ return (-ENOMEM); -+ -+ memcpy (ncap, cap, ELAN_CAP_SIZE (cap)); -+ -+ kmutex_lock (&uctx->uctx_vpseg_lock); -+ -+ if ((seg = user_install_vpseg (uctx, process, entries)) == NULL) -+ { -+ kmutex_unlock (&uctx->uctx_vpseg_lock); -+ return (-EINVAL); -+ } -+ -+ seg->vps_type = USER_VPSEG_P2P; -+ seg->vps_p2p_cap = ncap; -+ seg->vps_p2p_routes = NULL; -+ -+ user_loadroute_vpseg (uctx, seg, &uctx->uctx_position); -+ -+ kmutex_unlock (&uctx->uctx_vpseg_lock); -+ -+ return (0); -+} -+ -+int -+user_add_bcastvp (USER_CTXT *uctx, unsigned process, unsigned lowvp, unsigned highvp) -+{ -+ USER_VPSEG *seg; -+ int res; -+ -+ if (lowvp > highvp || process >= (E4_VPT_MIN_ENTRIES << uctx->uctx_routetable->tbl_size)) -+ return (-EINVAL); -+ -+ kmutex_lock (&uctx->uctx_vpseg_lock); -+ -+ if ((seg = user_install_vpseg (uctx, process, 1)) == NULL) -+ { -+ kmutex_unlock (&uctx->uctx_vpseg_lock); -+ return (-EINVAL); -+ } -+ -+ seg->vps_type = USER_VPSEG_BCAST; -+ seg->vps_bcast_lowvp = lowvp; -+ seg->vps_bcast_highvp = highvp; -+ -+ if ((res = user_loadroute_bcast (uctx, seg)) < 0) -+ user_remove_vpseg (uctx, seg); -+ -+ kmutex_unlock (&uctx->uctx_vpseg_lock); -+ return (res); -+} -+ -+int -+user_removevp (USER_CTXT *uctx, unsigned process) -+{ -+ USER_VPSEG *seg; -+ -+ kmutex_lock (&uctx->uctx_vpseg_lock); -+ -+ if (process == ELAN_INVALID_PROCESS) -+ seg = list_entry (uctx->uctx_vpseg_list.next, USER_VPSEG, vps_link); -+ else -+ seg = user_find_vpseg (uctx, process, process); -+ -+ if (seg == NULL) -+ { -+ kmutex_unlock (&uctx->uctx_vpseg_lock); -+ return (-EINVAL); -+ } -+ -+ do { -+ ELAN4_DEV *dev = uctx->uctx_ctxt.ctxt_dev; -+ int i; -+ -+ for (i = 0; i < seg->vps_entries; i++) -+ elan4_invalidate_route (dev, uctx->uctx_routetable, seg->vps_process + i); -+ -+ user_remove_vpseg (uctx, seg); -+ -+ } while (process == ELAN_INVALID_PROCESS && (seg = list_entry (uctx->uctx_vpseg_list.next, USER_VPSEG, vps_link)) != NULL); -+ -+ kmutex_unlock (&uctx->uctx_vpseg_lock); -+ -+ return (0); -+} -+ -+int -+user_set_route (USER_CTXT *uctx, unsigned process, E4_VirtualProcessEntry *route) -+{ -+ ELAN4_DEV *dev = uctx->uctx_ctxt.ctxt_dev; -+ USER_VPSEG *seg; -+ ELAN_LOCATION location; -+ -+ kmutex_lock (&uctx->uctx_vpseg_lock); -+ -+ if ((seg = user_find_vpseg (uctx, process, process)) == NULL || seg->vps_type != USER_VPSEG_P2P) -+ { -+ kmutex_unlock (&uctx->uctx_vpseg_lock); -+ return (-EINVAL); -+ } -+ -+ /* check that the route supplied is valid and goes to the correct place */ -+ location = user_process2location (uctx, seg, process); -+ -+ if (elan4_check_route (&uctx->uctx_position, location, route, 0) != 0) -+ { -+ kmutex_unlock (&uctx->uctx_vpseg_lock); -+ return (-EINVAL); -+ } -+ -+ if (seg->vps_p2p_routes == NULL) -+ KMEM_ZALLOC (seg->vps_p2p_routes, E4_VirtualProcessEntry *, sizeof (E4_VirtualProcessEntry) * seg->vps_entries, 1); -+ -+ if (seg->vps_p2p_routes == NULL) -+ { -+ kmutex_unlock (&uctx->uctx_vpseg_lock); -+ return (-ENOMEM); -+ } -+ -+ seg->vps_p2p_routes[process - seg->vps_process].Values[0] = route->Values[0]; -+ seg->vps_p2p_routes[process - seg->vps_process].Values[1] = ROUTE_CTXT_VALUE(location.loc_context) | (route->Values[1] & ~ROUTE_CTXT_MASK); -+ -+ PRINTF (uctx, DBG_ROUTE, "user_set_route: vp=%d -> %016llx%016llx\n", process, -+ seg->vps_p2p_routes[process - seg->vps_process].Values[1], seg->vps_p2p_routes[process - seg->vps_process].Values[0]); -+ -+ elan4_write_route (dev, uctx->uctx_routetable, process, &seg->vps_p2p_routes[process - seg->vps_process]); -+ -+ kmutex_unlock (&uctx->uctx_vpseg_lock); -+ -+ return (0); -+} -+ -+int -+user_reset_route (USER_CTXT *uctx, unsigned process) -+{ -+ ELAN4_DEV *dev = uctx->uctx_ctxt.ctxt_dev; -+ E4_VirtualProcessEntry route; -+ ELAN_LOCATION location; -+ USER_VPSEG *seg; -+ -+ kmutex_lock (&uctx->uctx_vpseg_lock); -+ -+ if ((seg = user_find_vpseg (uctx, process, process)) == NULL || seg->vps_type != USER_VPSEG_P2P) -+ { -+ kmutex_unlock (&uctx->uctx_vpseg_lock); -+ return (-EINVAL); -+ } -+ -+ if (seg->vps_p2p_routes != NULL) -+ { -+ seg->vps_p2p_routes[process - seg->vps_process].Values[0] = 0; -+ seg->vps_p2p_routes[process - seg->vps_process].Values[1] = 0; -+ } -+ -+ /* generate the default route to this location */ -+ location = user_process2location (uctx, seg, process); -+ -+ PRINTF (uctx, DBG_ROUTE, "user_reset_route: vp=%d\n", process); -+ -+ if (elan4_generate_route (&uctx->uctx_position, &route, location.loc_context, location.loc_node, location.loc_node, 0) < 0) -+ elan4_invalidate_route (dev, uctx->uctx_routetable, process); -+ else -+ elan4_write_route (dev, uctx->uctx_routetable, process, &route); -+ -+ kmutex_unlock (&uctx->uctx_vpseg_lock); -+ -+ return (0); -+} -+ -+int -+user_get_route (USER_CTXT *uctx, unsigned process, E4_VirtualProcessEntry *route) -+{ -+ ELAN4_DEV *dev = uctx->uctx_ctxt.ctxt_dev; -+ USER_VPSEG *seg; -+ -+ kmutex_lock (&uctx->uctx_vpseg_lock); -+ -+ if ((seg = user_find_vpseg (uctx, process, process)) == NULL || seg->vps_type != USER_VPSEG_P2P) -+ { -+ kmutex_unlock (&uctx->uctx_vpseg_lock); -+ return (-EINVAL); -+ } -+ -+ elan4_read_route (dev, uctx->uctx_routetable, process, route); -+ -+ kmutex_unlock (&uctx->uctx_vpseg_lock); -+ return (0); -+} -+ -+int -+user_check_route (USER_CTXT *uctx, unsigned process, E4_VirtualProcessEntry *route, unsigned *error) -+{ -+ ELAN4_DEV *dev = uctx->uctx_ctxt.ctxt_dev; -+ USER_VPSEG *seg; -+ -+ kmutex_lock (&uctx->uctx_vpseg_lock); -+ -+ if ((seg = user_find_vpseg (uctx, process, process)) == NULL || seg->vps_type != USER_VPSEG_P2P) -+ { -+ kmutex_unlock (&uctx->uctx_vpseg_lock); -+ return (-EINVAL); -+ } -+ -+ elan4_read_route (dev, uctx->uctx_routetable, process, route); -+ -+ *error = elan4_check_route (&uctx->uctx_position, user_process2location (uctx, seg, process), route, 0); -+ -+ kmutex_unlock (&uctx->uctx_vpseg_lock); -+ return (0); -+} -+ -+int -+user_send_neterr_msg (USER_CTXT *uctx, unsigned int vp, unsigned int nctx, unsigned int retries, ELAN4_NETERR_MSG *msg) -+{ -+ USER_VPSEG *seg; -+ ELAN_LOCATION location; -+ unsigned long flags; -+ int res, found = 0; -+ struct list_head *el; -+ -+ kmutex_lock (&uctx->uctx_vpseg_lock); -+ /* determine the location of the virtual process */ -+ if ((seg = user_find_vpseg (uctx, vp, vp)) == NULL) -+ { -+ PRINTF (uctx, DBG_NETERR, "user_send_neterr_msg: vp=%d has no vpseg\n", vp); -+ -+ kmutex_unlock (&uctx->uctx_vpseg_lock); -+ return -EINVAL; -+ } -+ -+ switch (seg->vps_type) -+ { -+ case USER_VPSEG_P2P: -+ location = user_process2location (uctx, seg, vp); -+ break; -+ -+ case USER_VPSEG_BCAST: -+ PRINTF (uctx, DBG_NETERR, "user_send_neterr_msg: vp=%d is a bcast vp\n", vp); -+ kmutex_unlock (&uctx->uctx_vpseg_lock); -+ return -EINVAL; -+ } -+ -+ /* check that we're attached to the network context */ -+ spin_lock_irqsave (&uctx->uctx_spinlock, flags); -+ list_for_each (el , &uctx->uctx_cent_list) { -+ USER_CTXT_ENTRY *cent = list_entry (el, USER_CTXT_ENTRY, cent_link); -+ -+ if (cent->cent_cap->cap_mycontext == nctx) -+ found++; -+ } -+ spin_unlock_irqrestore (&uctx->uctx_spinlock, flags); -+ -+ if (! found) -+ { -+ PRINTF (uctx, DBG_NETERR, "user_send_neterr_msg: nctx=%d not attached\n", nctx); -+ kmutex_unlock (&uctx->uctx_vpseg_lock); -+ -+ return -EINVAL; -+ } -+ -+ /* Update the fields which the user might have "faked" */ -+ msg->msg_context = location.loc_context; -+ msg->msg_sender.loc_node = uctx->uctx_position.pos_nodeid; -+ msg->msg_sender.loc_context = nctx; -+ -+ res = elan4_neterr_sendmsg (uctx->uctx_ctxt.ctxt_dev, location.loc_node, retries, msg); -+ -+ kmutex_unlock (&uctx->uctx_vpseg_lock); -+ -+ return (res); -+} -+ -+ -+static int -+user_resolvevp (USER_CTXT *uctx, unsigned process) -+{ -+ int res = 0; -+ USER_VPSEG *seg; -+ ELAN_LOCATION location; -+ E4_VirtualProcessEntry route; -+ -+ PRINTF1 (uctx, DBG_VP, "user_resolvevp: process=%d\n", process); -+ -+ kmutex_lock (&uctx->uctx_vpseg_lock); -+ -+ if ((seg = user_find_vpseg (uctx, process, process)) == NULL) -+ { -+ kmutex_unlock (&uctx->uctx_vpseg_lock); -+ return (-EINVAL); -+ } -+ -+ switch (seg->vps_type) -+ { -+ case USER_VPSEG_P2P: -+#ifdef use_elanmod -+ if ((res = user_validate_cap (uctx, seg->vps_p2p_cap, ELAN_USER_P2P)) != 0) -+ break; -+#endif -+ -+ location = user_process2location (uctx, seg, process); -+ -+ PRINTF (uctx, DBG_VP, "user_resolvevp: vp=%d -> node=%d ctx=%d\n", process, location.loc_node, location.loc_context); -+ -+ if (seg->vps_p2p_routes != NULL && seg->vps_p2p_routes[process - seg->vps_process].Values[0] != 0) -+ route = seg->vps_p2p_routes[process - seg->vps_process]; -+ else if ((res = elan4_generate_route (&uctx->uctx_position, &route, location.loc_context, location.loc_node, location.loc_node, user_p2p_route_options)) < 0) -+ break;; -+ -+ elan4_write_route (uctx->uctx_ctxt.ctxt_dev, uctx->uctx_routetable, process, &route); -+ break; -+ -+ case USER_VPSEG_BCAST: -+ res = user_loadroute_bcast (uctx, seg); -+ break; -+ -+ default: -+ res = -EINVAL; -+ break; -+ } -+ -+ kmutex_unlock (&uctx->uctx_vpseg_lock); -+ return (res); -+} -+ -+static void -+user_eproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status) -+{ -+ USER_CTXT *uctx = (USER_CTXT *) ctxt; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&uctx->uctx_spinlock, flags); -+ -+ if (RING_QUEUE_REALLY_FULL (uctx->uctx_eprocTrapQ) || (uctx->uctx_status & UCTX_STOPPED)) -+ { -+ PRINTF (uctx, DBG_EPROC, "user_eproc_trap: %s\n", (uctx->uctx_status & UCTX_STOPPED) ? "context stopped" : "trap queue overflow"); -+ -+ uctx->uctx_status |= UCTX_EPROC_QUEUE_ERROR; -+ } -+ else -+ { -+ elan4_extract_eproc_trap (ctxt->ctxt_dev, status, RING_QUEUE_BACK (uctx->uctx_eprocTrapQ, uctx->uctx_eprocTraps), 0); -+ -+ DBGCMD (ctxt, DBG_EPROC, elan4_display_eproc_trap (ctxt, DBG_EPROC, "user_eproc_trap", RING_QUEUE_BACK(uctx->uctx_eprocTrapQ, uctx->uctx_eprocTraps))); -+ -+ if (RING_QUEUE_ADD (uctx->uctx_eprocTrapQ)) -+ user_start_stopping (uctx, UCTX_EPROC_QUEUE_FULL); -+ } -+ -+ user_signal_trap (uctx); -+ -+ spin_unlock_irqrestore (&uctx->uctx_spinlock, flags); -+} -+ -+static void -+user_cproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status, unsigned cqnum) -+{ -+ USER_CTXT *uctx = (USER_CTXT *) ctxt; -+ USER_CQ *ucq = NULL; -+ struct list_head *entry; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&uctx->uctx_spinlock, flags); -+ -+ list_for_each (entry, &uctx->uctx_cqlist) { -+ ucq = list_entry (entry, USER_CQ, ucq_link); -+ -+ if (elan4_cq2num(ucq->ucq_cq) == cqnum) -+ break; -+ } -+ -+ ASSERT (ucq != NULL); -+ -+ if (ucq->ucq_state != UCQ_RUNNING && CPROC_TrapType (status) == CommandProcInserterError) -+ { -+ PRINTF (ctxt, DBG_TRAP, "user_cproc_trap CommandProcInserterError\n"); -+ ucq->ucq_errored++; -+ } -+ else -+ { -+ ASSERT (ucq->ucq_state == UCQ_RUNNING); -+ -+ elan4_extract_cproc_trap (ctxt->ctxt_dev, status, &ucq->ucq_trap, cqnum); -+ -+ DBGCMD (ctxt, DBG_CPROC, elan4_display_cproc_trap (ctxt, DBG_CPROC, "user_cproc_trap", &ucq->ucq_trap)); -+ -+ ucq->ucq_state = UCQ_TRAPPED; -+ -+ } -+ -+ user_signal_trap (uctx); -+ -+ spin_unlock_irqrestore (&uctx->uctx_spinlock, flags); -+} -+ -+static void -+user_dproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status, unsigned unit) -+{ -+ USER_CTXT *uctx = (USER_CTXT *) ctxt; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&uctx->uctx_spinlock, flags); -+ -+ if (RING_QUEUE_REALLY_FULL (uctx->uctx_dprocTrapQ) || (uctx->uctx_status & UCTX_STOPPED)) -+ { -+ PRINTF (uctx, DBG_DPROC, "user_dproc_trap: %s\n", (uctx->uctx_status & UCTX_STOPPED) ? "context stopped" : "trap queue overflow"); -+ -+ uctx->uctx_status |= UCTX_DPROC_QUEUE_ERROR; -+ } -+ else -+ { -+ ELAN4_DPROC_TRAP *trap = RING_QUEUE_BACK (uctx->uctx_dprocTrapQ, uctx->uctx_dprocTraps); -+ -+ elan4_extract_dproc_trap (ctxt->ctxt_dev, status, trap, unit); -+ -+ DBGCMD (ctxt, DBG_DPROC, elan4_display_dproc_trap (ctxt, DBG_DPROC, "user_dproc_trap", trap)); -+ -+ if (!DPROC_PrefetcherFault (status) && DPROC_TrapType(status) == DmaProcFailCountError && !RING_QUEUE_FULL (uctx->uctx_dmaQ)) -+ { -+ trap->tr_desc.dma_typeSize |= DMA_FailCount (user_dproc_retry_count); -+ -+ *RING_QUEUE_BACK (uctx->uctx_dmaQ, uctx->uctx_dmas) = trap->tr_desc; -+ -+ (void) RING_QUEUE_ADD (uctx->uctx_dmaQ); -+ } -+ else -+ { -+ if (RING_QUEUE_ADD (uctx->uctx_dprocTrapQ)) -+ user_start_stopping (uctx, UCTX_DPROC_QUEUE_FULL); -+ } -+ } -+ -+ user_signal_trap (uctx); -+ -+ spin_unlock_irqrestore (&uctx->uctx_spinlock, flags); -+} -+ -+static void -+user_tproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status) -+{ -+ USER_CTXT *uctx = (USER_CTXT *) ctxt; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&uctx->uctx_spinlock, flags); -+ -+ if (RING_QUEUE_REALLY_FULL (uctx->uctx_tprocTrapQ) || (uctx->uctx_status & UCTX_STOPPED)) -+ { -+ PRINTF (uctx, DBG_TPROC, "user_tproc_trap: %s\n", (uctx->uctx_status & UCTX_STOPPED) ? "context stopped" : "trap queue overflow"); -+ -+ uctx->uctx_status |= UCTX_TPROC_QUEUE_ERROR; -+ } -+ else -+ { -+ elan4_extract_tproc_trap (ctxt->ctxt_dev, status, RING_QUEUE_BACK (uctx->uctx_tprocTrapQ, uctx->uctx_tprocTraps)); -+ -+ DBGCMD (ctxt, DBG_TPROC, elan4_display_tproc_trap (ctxt, DBG_TPROC, "user_tproc_trap", RING_QUEUE_BACK (uctx->uctx_tprocTrapQ, uctx->uctx_tprocTraps))); -+ -+ if (RING_QUEUE_ADD (uctx->uctx_tprocTrapQ)) -+ user_start_stopping (uctx, UCTX_TPROC_QUEUE_FULL); -+ } -+ user_signal_trap (uctx); -+ -+ spin_unlock_irqrestore (&uctx->uctx_spinlock, flags); -+} -+ -+static void -+user_iproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status, unsigned unit) -+{ -+ USER_CTXT *uctx = (USER_CTXT *) ctxt; -+ USER_IPROC_TRAP *utrap = &uctx->uctx_iprocTrap[unit & 1]; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&uctx->uctx_spinlock, flags); -+ -+ ASSERT (utrap->ut_state == UTS_IPROC_RUNNING); -+ -+ elan4_extract_iproc_trap (ctxt->ctxt_dev, status, &utrap->ut_trap, unit); -+ DBGCMD (ctxt, DBG_IPROC, elan4_display_iproc_trap (ctxt, DBG_IPROC, "user_iproc_trap", &utrap->ut_trap)); -+ -+ utrap->ut_state = UTS_IPROC_TRAPPED; -+ -+ user_start_nacking (uctx, unit ? UCTX_IPROC_CH0_TRAPPED : UCTX_IPROC_CH1_TRAPPED); -+ -+ user_signal_trap (uctx); -+ -+ spin_unlock_irqrestore (&uctx->uctx_spinlock, flags); -+} -+ -+static void -+user_interrupt (ELAN4_CTXT *ctxt, E4_uint64 cookie) -+{ -+ USER_CTXT *uctx = (USER_CTXT *) ctxt; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&uctx->uctx_spinlock, flags); -+ -+ PRINTF1 (uctx, DBG_TRAP, "user_interrupt: cookie=%llx\n", cookie); -+ -+ switch (cookie) -+ { -+ case ELAN4_INT_COOKIE_DDCQ: -+ uctx->uctx_ddcq_intr--; -+ -+ user_signal_trap (uctx); -+ break; -+ -+ default: -+ if (uctx->uctx_intcookie_table == NULL || intcookie_fire (uctx->uctx_intcookie_table, cookie) != 0) -+ { -+ PRINTF2 (uctx, DBG_TRAP, "user_interrupt: cookie=%llx %s\n", cookie, uctx->uctx_intcookie_table ? "not found" : "no table"); -+ uctx->uctx_status |= UCTX_EPROC_QUEUE_ERROR; -+ user_signal_trap (uctx); -+ } -+ break; -+ } -+ -+ spin_unlock_irqrestore (&uctx->uctx_spinlock, flags); -+} -+static void -+user_needs_shuffle (ELAN4_CTXT *ctxt, int tbl, int hashidx) -+{ -+ USER_CTXT *uctx = (USER_CTXT *) ctxt; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&uctx->uctx_spinlock, flags); -+ -+ elan4mmu_set_shuffle(ctxt, tbl, hashidx); -+ -+ if (ctxt->shuffle_needed[tbl]) -+ user_shuffle_signal_trap (uctx); -+ -+ spin_unlock_irqrestore (&uctx->uctx_spinlock, flags); -+ -+} -+static void -+user_neterrmsg (ELAN4_CTXT *ctxt, ELAN4_NETERR_MSG *msg) -+{ -+ USER_CTXT *uctx = (USER_CTXT *) ctxt; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&uctx->uctx_spinlock, flags); -+ -+ if (! RING_QUEUE_FULL (uctx->uctx_msgQ)) -+ { -+ memcpy (RING_QUEUE_BACK (uctx->uctx_msgQ, uctx->uctx_msgs), msg, sizeof (ELAN4_NETERR_MSG)); -+ -+ (void) RING_QUEUE_ADD (uctx->uctx_msgQ); -+ -+ user_signal_trap (uctx); -+ } -+ -+ spin_unlock_irqrestore (&uctx->uctx_spinlock, flags); -+} -+ -+ELAN4_TRAP_OPS user_trap_ops = -+{ -+ user_eproc_trap, -+ user_cproc_trap, -+ user_dproc_trap, -+ user_tproc_trap, -+ user_iproc_trap, -+ user_interrupt, -+ user_neterrmsg, -+ user_needs_shuffle, -+}; -+ -+static int -+deliver_trap (ELAN4_USER_TRAP *utrapp, int type, unsigned proc, void *trap, ...) -+{ -+ register int i, len; -+ va_list ap; -+ -+ PRINTF (NULL, DBG_TRAP, "deliver_trap: type=%d proc=%d\n", type, proc); -+ -+ switch (proc) -+ { -+ case UTS_CPROC: len = sizeof (ELAN4_CPROC_TRAP); break; -+ case UTS_DPROC: len = sizeof (ELAN4_DPROC_TRAP); break; -+ case UTS_EPROC: len = sizeof (ELAN4_EPROC_TRAP); break; -+ case UTS_IPROC: len = sizeof (ELAN4_IPROC_TRAP); break; -+ case UTS_TPROC: len = sizeof (ELAN4_TPROC_TRAP); break; -+ case UTS_NETERR_MSG: len = sizeof (ELAN4_NETERR_MSG); break; -+ default: len = 0; break; -+ } -+ -+ if (put_user (type, &utrapp->ut_type) || put_user (proc, &utrapp->ut_proc) || copy_to_user (&utrapp->ut_trap, trap, len)) -+ return (UTS_EFAULT); -+ -+ va_start (ap, trap); -+ for (i = 0; i < sizeof (utrapp->ut_args)/sizeof (utrapp->ut_args[0]); i++) -+ if (put_user (va_arg (ap, unsigned long), &utrapp->ut_args[i])) -+ return (UTS_EFAULT); -+ va_end (ap); -+ -+ return (type); -+} -+ -+static int -+user_pagefault (USER_CTXT *uctx, E4_FaultSave *farea) -+{ -+ E4_Addr addr = farea->FaultAddress; -+ E4_uint32 fsr = FaultSaveFSR(farea->FSRAndFaultContext); -+ FAULT_SAVE *entry; -+ FAULT_SAVE **predp; -+ int count; -+ -+ PRINTF2 (uctx, DBG_FAULT, "user_pagefault: addr=%llx fsr %x\n", (unsigned long long) addr, fsr); -+ -+ if ((fsr & FSR_FaultForBadData) != 0) /* Memory ECC error during walk */ -+ { -+ PRINTF0 (uctx, DBG_FAULT, "user_pagefault: ECC error during walk\n"); -+ return (-EFAULT); -+ } -+ -+ if ((fsr & FSR_FaultForMaxChainCount) != 0) /* Have walked a chain of 1024 items */ -+ { -+ PRINTF0 (uctx, DBG_FAULT, "user_pagefault: pte chain too long\n"); -+ return (-EFAULT); -+ } -+ -+ if (! user_pagefault_enabled) -+ return (-EFAULT); -+ -+ if (uctx->uctx_num_fault_save) -+ { -+ spin_lock (&uctx->uctx_fault_lock); -+ for( predp = &uctx->uctx_fault_list; (entry = *predp)->next != NULL; predp = &entry->next) -+ { -+ if (entry->addr == (addr & ~((E4_Addr) PAGE_SIZE-1))) -+ break; -+ } -+ -+ *predp = entry->next; -+ entry->next = uctx->uctx_fault_list; -+ uctx->uctx_fault_list = entry; -+ -+ if (entry->addr == (addr & ~((E4_Addr) PAGE_SIZE-1))) -+ { -+ if ((entry->count <<= 1) > max_fault_pages) -+ entry->count = max_fault_pages; -+ } -+ else -+ entry->count = min_fault_pages; -+ -+ entry->addr = (addr & ~((E4_Addr) PAGE_SIZE-1))+(entry->count * PAGE_SIZE); -+ count = entry->count; -+ spin_unlock (&uctx->uctx_fault_lock); -+ -+ if (user_load_range (uctx, addr & ~((E4_Addr) PAGE_SIZE-1), count * PAGESIZE, fsr) == 0) -+ return 0; -+ -+ /* else pre-faulting has failed, try just this page */ -+ } -+ -+ return (user_load_range (uctx, addr & ~((E4_Addr) PAGE_SIZE-1), PAGE_SIZE, fsr)); -+ -+} -+ -+static int -+queue_dma_for_retry (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp, E4_DMA *dma) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave (&uctx->uctx_spinlock, flags); -+ -+ if (RING_QUEUE_FULL (uctx->uctx_dmaQ)) -+ { -+ spin_unlock_irqrestore (&uctx->uctx_spinlock, flags); -+ -+ PRINTF (uctx, DBG_DPROC, "queue_dma_for_retry: overflow\n"); -+ -+ return (deliver_trap (utrapp, UTS_QUEUE_OVERFLOW, UTS_NOPROC, NULL, UCTX_DPROC_QUEUE_OVERFLOW)); -+ } -+ -+ *RING_QUEUE_BACK (uctx->uctx_dmaQ, uctx->uctx_dmas) = *dma; -+ -+ (void) RING_QUEUE_ADD (uctx->uctx_dmaQ); -+ spin_unlock_irqrestore (&uctx->uctx_spinlock, flags); -+ -+ return (UTS_FINISHED); -+} -+ -+static int -+queue_thread_for_retry (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp, E4_ThreadRegs *regs) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave (&uctx->uctx_spinlock, flags); -+ -+ if (RING_QUEUE_FULL (uctx->uctx_threadQ)) -+ { -+ spin_unlock_irqrestore (&uctx->uctx_spinlock, flags); -+ -+ PRINTF (uctx, DBG_TPROC, "queue_thread_for_retry: overflow\n"); -+ -+ return (deliver_trap (utrapp, UTS_QUEUE_OVERFLOW, UTS_NOPROC, NULL, UCTX_TPROC_QUEUE_OVERFLOW)); -+ } -+ -+ *RING_QUEUE_BACK (uctx->uctx_threadQ, uctx->uctx_threads) = *regs; -+ (void) RING_QUEUE_ADD (uctx->uctx_threadQ); -+ -+ spin_unlock_irqrestore (&uctx->uctx_spinlock, flags); -+ -+ return (UTS_FINISHED); -+} -+ -+static int -+fixup_eproc_trap (USER_CTXT *uctx, ELAN4_EPROC_TRAP *trap, int waitevent) -+{ -+ E4_FaultSave *farea = &trap->tr_faultarea; -+ E4_uint32 fsr = FaultSaveFSR(farea->FSRAndFaultContext); -+ E4_uint64 CountAndType; -+ E4_uint64 CopySource; -+ E4_uint64 CopyDest; -+ -+ /* -+ * Event processor can trap as follows : -+ * 1) Event location read (faddr == event location & Event Permission) -+ * 2) Event location write (faddr == event location & Event Permission) -+ * 3) Copy Source read Read Access -+ * 4) Copy/Write dest write other -+ * -+ * NOTE - it is possible to see both 3) and 4) together - but only with physical errors. -+ */ -+ if (AT_Perm(fsr) == AT_PermLocalDataRead || AT_Perm(fsr) == AT_PermLocalDataWrite) -+ { -+ /* -+ * We complete the copy/write by issuing a waitevent 0 of the approriate type. -+ * - NB mask off bottom bits of EventAddr in case of partial setevent -+ */ -+ E4_uint64 EventAddr = trap->tr_eventaddr & ~((E4_uint64) E4_EVENT_ALIGN-1); -+ -+ if (! user_ddcq_check (uctx, 4)) -+ return (0); -+ -+ if ((trap->tr_event.ev_CountAndType & E4_EVENT_COPY_TYPE_MASK) == E4_EVENT_WRITE) -+ { -+ /* case 4) faulted on write word to destination */ -+ -+ CountAndType = trap->tr_event.ev_CountAndType & E4_EVENT_TYPE_MASK; -+ -+ PRINTF (uctx, DBG_TRAP, "fixup_eproc_trap: write Event=%llx CountAndType=%llx\n", EventAddr, CountAndType); -+ PRINTF (uctx, DBG_TRAP, " WritePtr=%llx WriteValue=%llx\n", -+ trap->tr_event.ev_WritePtr, trap->tr_event.ev_WriteValue); -+ -+ user_ddcq_waitevent (uctx, EventAddr, CountAndType, trap->tr_event.ev_WritePtr, trap->tr_event.ev_WriteValue); -+ } -+ else -+ { -+ /* case 3) or case 4) faulted on read/write of copy */ -+ if (AT_Perm (fsr) == AT_PermLocalDataRead) -+ { -+ CountAndType = (trap->tr_event.ev_CountAndType & E4_EVENT_DATA_TYPE_MASK) | EPROC_CopySize(trap->tr_status); -+ CopySource = trap->tr_event.ev_CopySource - EVENT_COPY_BLOCK_SIZE; -+ CopyDest = trap->tr_event.ev_CopyDest; -+ } -+ else -+ { -+ CountAndType = ((trap->tr_event.ev_CountAndType & E4_EVENT_DATA_TYPE_MASK) | -+ ((EPROC_CopySize(trap->tr_status) + EVENT_COPY_NDWORDS) & E4_EVENT_COPY_SIZE_MASK)); -+ CopySource = trap->tr_event.ev_CopySource - EVENT_COPY_BLOCK_SIZE; -+ CopyDest = trap->tr_event.ev_CopyDest - EVENT_COPY_BLOCK_SIZE; -+ } -+ -+ PRINTF (uctx, DBG_TRAP, "fixup_eproc_trap: copy Event=%llx CountAndType=%llx\n", EventAddr, CountAndType); -+ PRINTF (uctx, DBG_TRAP, " CopySource=%llx CopyDest=%llx\n", CopySource, CopyDest); -+ -+ user_ddcq_waitevent (uctx, EventAddr, CountAndType, CopySource, CopyDest); -+ } -+ } -+ else -+ { -+ E4_uint64 EventAddr = trap->tr_eventaddr & ~((E4_uint64) E4_EVENT_ALIGN-1); -+ E4_uint32 EventCount = trap->tr_eventaddr & (E4_EVENT_ALIGN-1); -+ -+ /* case 1) or 2) - just reissue the event */ -+ if (! waitevent) -+ PRINTF (uctx, DBG_TRAP, "fixup_eproc_trap: setevent EventAddr=%llx EventCount=%x\n", EventAddr, EventCount); -+ else -+ { -+ PRINTF (uctx, DBG_TRAP, "fixup_eproc_trap: waitevent Event=%llx CountAndType=%llx\n", EventAddr, trap->tr_event.ev_CountAndType); -+ PRINTF (uctx, DBG_TRAP, " Param[0]=%llx Param[1]=%llx\n", -+ trap->tr_event.ev_Params[0], trap->tr_event.ev_Params[1]); -+ } -+ -+ if (! user_ddcq_check (uctx, waitevent ? 4 : 2)) -+ return (0); -+ -+ if (waitevent) -+ user_ddcq_waitevent (uctx, EventAddr, trap->tr_event.ev_CountAndType, -+ trap->tr_event.ev_Params[0], trap->tr_event.ev_Params[1]); -+ else -+ user_ddcq_seteventn (uctx, EventAddr, EventCount); -+ } -+ -+ return (1); -+} -+ -+ -+static int -+resolve_eproc_trap (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp, ELAN4_EPROC_TRAP *trap) -+{ -+ switch (EPROC_TrapType (trap->tr_status)) -+ { -+ case EventProcNoFault: -+ PRINTF (uctx, DBG_TRAP, "resolve_eproc_trap: EventProcNoFault\n"); -+ -+ return (UTS_FINISHED); -+ -+ case EventProcAddressAlignment: -+ return (deliver_trap (utrapp, UTS_ALIGNMENT_ERROR, UTS_EPROC, trap)); -+ -+ case EventProcMemoryFault: -+ PRINTF (uctx, DBG_TRAP, "resolve_eproc_trap: EventProcMemoryFault @ %llx\n", trap->tr_faultarea.FaultAddress); -+ -+ if (user_pagefault (uctx, &trap->tr_faultarea) != 0) -+ return (deliver_trap (utrapp, UTS_INVALID_ADDR, UTS_EPROC, trap)); -+ -+ return (UTS_FINISHED); -+ -+ case EventProcCountWrapError: -+ return (deliver_trap (utrapp, UTS_BAD_TRAP, UTS_EPROC, trap)); -+ -+ default: -+ printk ("resolve_eproc_trap: bad trap type %d\n", EPROC_TrapType (trap->tr_status)); -+ BUG(); -+ } -+ -+ return (UTS_FINISHED); -+} -+ -+static int -+resolve_cproc_trap (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp, USER_CQ *ucq) -+{ -+ ELAN4_DEV *dev = uctx->uctx_ctxt.ctxt_dev; -+ ELAN4_CPROC_TRAP *trap = &ucq->ucq_trap; -+ E4_uint64 command; -+ int res; -+ int chan; -+ -+ ELAN_LOCATION location; -+ int vp, node; -+ -+ PRINTF2 (uctx, DBG_CPROC, "resolve_cproc_trap: cq %p is trapped - Status %lx\n", ucq, trap->tr_status); -+ -+ switch (CPROC_TrapType (trap->tr_status)) -+ { -+ case CommandProcDmaQueueOverflow: -+ PRINTF (uctx, DBG_CPROC, "resolve_cproc_trap: CommandProcDmaQueueOverflow\n"); -+ /* -+ * XXXX: should wait for the queue to become emptier if we're -+ * responsible for it being very full -+ */ -+ ucq->ucq_state = UCQ_NEEDS_RESTART; -+ break; -+ -+ case CommandProcInterruptQueueOverflow: -+ PRINTF (uctx, DBG_CPROC, "resolve_cproc_trap: CommandProcInterruptQueueOverflow\n"); -+ /* -+ * XXXX: should wait for the queue to become emptier if we're -+ * responsible for it being very full -+ */ -+ ucq->ucq_state = UCQ_NEEDS_RESTART; -+ break; -+ -+ case CommandProcWaitTrap: -+ PRINTF0 (uctx, DBG_CPROC, "resolve_cproc_trap: CommandProcWaitTrap\n"); -+ -+ if ((res = resolve_eproc_trap (uctx, utrapp, &trap->tr_eventtrap)) != UTS_FINISHED) -+ { -+ ucq->ucq_state = UCQ_STOPPED; -+ -+ return (res); -+ } -+ -+ if (fixup_eproc_trap (uctx, &trap->tr_eventtrap, 1) == 0) -+ return UTS_RESCHEDULE; -+ -+ ucq->ucq_state = UCQ_NEEDS_RESTART; -+ break; -+ -+ case CommandProcMemoryFault: -+ PRINTF1 (uctx, DBG_CPROC, "resolve_cproc_trap: CommandProcMemoryFault at %llx\n", trap->tr_faultarea.FaultAddress); -+ if (user_pagefault (uctx, &trap->tr_faultarea) != 0) -+ { -+ ucq->ucq_state = UCQ_STOPPED; -+ -+ return (deliver_trap (utrapp, UTS_INVALID_ADDR, UTS_CPROC, trap, elan4_cq2idx(ucq->ucq_cq))); -+ } -+ -+ ucq->ucq_state = UCQ_NEEDS_RESTART; -+ break; -+ -+ case CommandProcRouteFetchFault: -+ command = elan4_trapped_open_command (dev, ucq->ucq_cq); -+ -+ PRINTF1 (uctx, DBG_CPROC, "resolve_cproc_trap: CommandProcRouteFetchFault to vp %d\n", (int) (command >> 32)); -+ -+ if (user_resolvevp (uctx, (unsigned) (command >> 32)) != 0) -+ { -+ ucq->ucq_state = UCQ_STOPPED; -+ -+ return (deliver_trap (utrapp, UTS_INVALID_VPROC, UTS_CPROC, trap, elan4_cq2idx(ucq->ucq_cq), (long) (command >> 32))); -+ } -+ -+ ucq->ucq_state = UCQ_NEEDS_RESTART; -+ break; -+ -+ case CommandProcFailCountZero: -+ PRINTF0 (uctx, DBG_CPROC, "resolve_cproc_trap: CommandProcFailCountZero - reset failcount\n"); -+ -+ /* Update CPROC timeout route statistics */ -+ for (chan = 0; chan <= 1; chan++) -+ { -+ /* Was there a timeout on this channel ? */ -+ if (PackValue(trap->tr_qdesc.CQ_AckBuffers, chan) == PackTimeout) -+ { -+ /* Find the last open command for that channel to extract the relevant vp */ -+ if ((vp = cproc_open_extract_vp(uctx->uctx_ctxt.ctxt_dev, ucq->ucq_cq, chan)) != -1) -+ { -+ E4_VirtualProcessEntry route; -+ -+ kmutex_lock (&uctx->uctx_vpseg_lock); -+ location = user_process2location(uctx, NULL, vp); -+ elan4_read_route (uctx->uctx_ctxt.ctxt_dev, uctx->uctx_routetable, vp, &route); -+ kmutex_unlock (&uctx->uctx_vpseg_lock); -+ node = location.loc_node; -+ -+ kmutex_lock(&uctx->uctx_ctxt.ctxt_dev->dev_lock); -+ -+ if ((node >= 0) && (node < uctx->uctx_ctxt.ctxt_dev->dev_position.pos_nodes)) -+ { -+ uctx->uctx_ctxt.ctxt_dev->dev_cproc_timeout[node]++; -+ -+ elan4_ringbuf_store(&uctx->uctx_ctxt.ctxt_dev->dev_cproc_timeout_routes, -+ &route, uctx->uctx_ctxt.ctxt_dev); -+ } -+ -+ kmutex_unlock(&uctx->uctx_ctxt.ctxt_dev->dev_lock); -+ } -+ } -+ } -+ -+ /* NOTE - we must not modify the ChannelNotCompleted bits - so modify */ -+ /* the restart count with a part-word store */ -+ elan4_updatecq (dev, ucq->ucq_cq, ucq->ucq_cq->cq_perm, user_cproc_retry_count); -+ -+ ucq->ucq_state = UCQ_NEEDS_RESTART; -+ break; -+ -+ case CommandProcAddressAlignment: -+ ucq->ucq_state = UCQ_STOPPED; -+ -+ return (deliver_trap (utrapp, UTS_ALIGNMENT_ERROR, UTS_CPROC, trap, elan4_cq2idx(ucq->ucq_cq))); -+ -+ case CommandProcPermissionTrap: -+ { -+ sdramaddr_t cqdesc = dev->dev_cqaddr + (elan4_cq2num(ucq->ucq_cq) * sizeof (E4_CommandQueueDesc)); -+ E4_uint64 control = elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_Control)); -+ -+ PRINTF (uctx, DBG_CPROC, "resolve_cproc_trap: CommandProcPermissionTrap - %s\n", -+ (control & CQ_PermissionMask) != ucq->ucq_cq->cq_perm ? "resume from stop" : "permission denied"); -+ -+ if ((control & CQ_PermissionMask) == ucq->ucq_cq->cq_perm) -+ return (deliver_trap (utrapp, UTS_PERMISSION_DENIED, UTS_CPROC, trap, elan4_cq2idx(ucq->ucq_cq))); -+ -+ elan4_updatecq (dev, ucq->ucq_cq, ucq->ucq_cq->cq_perm, 0); -+ -+ ucq->ucq_state = UCQ_NEEDS_RESTART; -+ break; -+ } -+ -+ case CommandProcBadData: -+ ucq->ucq_state = UCQ_STOPPED; -+ -+ return (deliver_trap (utrapp, UTS_INVALID_COMMAND, UTS_CPROC, trap, elan4_cq2idx(ucq->ucq_cq))); -+ -+ default: -+ ucq->ucq_state = UCQ_STOPPED; -+ -+ return (deliver_trap (utrapp, UTS_BAD_TRAP, UTS_CPROC, trap, elan4_cq2idx(ucq->ucq_cq))); -+ } -+ -+ return (UTS_FINISHED); -+} -+ -+static int -+resolve_dproc_trap (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp, ELAN4_DPROC_TRAP *trap) -+{ -+ ELAN_LOCATION location; -+ int node; -+ E4_VirtualProcessEntry route; -+ -+ if (DPROC_PrefetcherFault (trap->tr_status)) -+ { -+ PRINTF (uctx, DBG_DPROC, "resolve_dproc_trap: PrefetcherFault at %llx\n", trap->tr_prefetchFault.FaultAddress); -+ -+ if (user_pagefault (uctx, &trap->tr_prefetchFault) != 0) -+ return (deliver_trap (utrapp, UTS_INVALID_ADDR, UTS_DPROC, trap)); -+ -+ return (queue_dma_for_retry (uctx, utrapp, &trap->tr_desc)); -+ } -+ -+ switch (DPROC_TrapType (trap->tr_status)) -+ { -+ case DmaProcRouteFetchFault: -+ PRINTF (uctx, DBG_DPROC, "resolve_dproc_trap: DmaProcRouteFetchFault vp %d\n", trap->tr_desc.dma_vproc); -+ -+ if (user_resolvevp (uctx, trap->tr_desc.dma_vproc) != 0) -+ return (deliver_trap (utrapp, UTS_INVALID_VPROC, UTS_DPROC, trap, trap->tr_desc.dma_vproc)); -+ -+ return (queue_dma_for_retry (uctx, utrapp, &trap->tr_desc /* immediate */)); -+ -+ case DmaProcFailCountError: -+ PRINTF (uctx, DBG_DPROC, "resolve_dproc_trap: DmaProcFailCountError - vp %d cookie %llx\n", -+ trap->tr_desc.dma_vproc, trap->tr_desc.dma_cookie); -+ -+ trap->tr_desc.dma_typeSize |= DMA_FailCount (user_dproc_retry_count); -+ -+ return (queue_dma_for_retry (uctx, utrapp, &trap->tr_desc /* XXXX - backoff for some time later */)); -+ -+ case DmaProcPacketAckError: -+ PRINTF (uctx, DBG_DPROC, "resolve_dproc_trap: DmaProcPacketAckError - %d%s\n", DPROC_PacketAckValue (trap->tr_status), -+ DPROC_PacketTimeout (trap->tr_status) ? " timeout" : ""); -+ -+ kmutex_lock (&uctx->uctx_vpseg_lock); -+ location = user_process2location(uctx, NULL, trap->tr_desc.dma_vproc); -+ elan4_read_route(uctx->uctx_ctxt.ctxt_dev, uctx->uctx_routetable, trap->tr_desc.dma_vproc, &route); -+ kmutex_unlock (&uctx->uctx_vpseg_lock); -+ node = location.loc_node; -+ -+ /* Update dproc route timeout statistics */ -+ if ((node >= 0) && (node < uctx->uctx_ctxt.ctxt_dev->dev_position.pos_nodes)) -+ { -+ kmutex_lock(&uctx->uctx_ctxt.ctxt_dev->dev_lock); -+ -+ if ((route.Values[0] != 0) || (route.Values[1] != 0)) -+ { -+ if (DPROC_PacketTimeout (trap->tr_status)) -+ { -+ uctx->uctx_ctxt.ctxt_dev->dev_dproc_timeout[node]++; -+ elan4_ringbuf_store(&uctx->uctx_ctxt.ctxt_dev->dev_dproc_timeout_routes, -+ &route, uctx->uctx_ctxt.ctxt_dev); -+ } -+ else -+ { -+ uctx->uctx_ctxt.ctxt_dev->dev_ack_errors[node]++; -+ elan4_ringbuf_store(&uctx->uctx_ctxt.ctxt_dev->dev_ack_error_routes, -+ &route, uctx->uctx_ctxt.ctxt_dev); -+ } -+ } -+ -+ kmutex_unlock(&uctx->uctx_ctxt.ctxt_dev->dev_lock); -+ } -+ -+ return (queue_dma_for_retry (uctx, utrapp, &trap->tr_desc /* XXXX - backoff for some time later */)); -+ -+ case DmaProcQueueOverflow: -+ PRINTF (uctx, DBG_DPROC, "resolve_dproc_trap: DmaProcQueueOverflow\n"); -+ return (queue_dma_for_retry (uctx, utrapp, &trap->tr_desc /* XXXX - backoff for some time later */)); -+ -+ case DmaProcRunQueueReadFault: -+ return (deliver_trap (utrapp, UTS_BAD_TRAP, UTS_DPROC, trap)); -+ -+ default: -+ printk ("resolve_dproc_trap: unknown trap type : %d\n", DPROC_TrapType(trap->tr_status)); -+ BUG(); -+ } -+ return UTS_FINISHED; -+} -+ -+int -+resolve_tproc_trap (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp, ELAN4_TPROC_TRAP *trap) -+{ -+ PRINTF (uctx, DBG_TPROC, "resolve_tproc_trap: trap state = %lx\n", trap->tr_state); -+ -+ if (trap->tr_state & TS_TrapForTooManyInstructions) -+ return (deliver_trap (utrapp, UTS_BAD_TRAP, UTS_TPROC, trap)); -+ -+ if (trap->tr_state & TS_Unimplemented) -+ return (deliver_trap (utrapp, UTS_UNIMP_INSTR, UTS_TPROC, trap)); -+ -+ if (trap->tr_state & TS_DataAlignmentError) -+ return (deliver_trap (utrapp, UTS_ALIGNMENT_ERROR, UTS_TPROC, trap)); -+ -+ if ((trap->tr_state & TS_InstAccessException) && user_pagefault (uctx, &trap->tr_instFault) != 0) -+ return (deliver_trap (utrapp, UTS_INVALID_ADDR, UTS_TPROC, trap)); -+ -+ if ((trap->tr_state & TS_DataAccessException) && user_pagefault (uctx, &trap->tr_dataFault) != 0) -+ return (deliver_trap (utrapp, UTS_INVALID_ADDR, UTS_TPROC, trap)); -+ -+ /* If we're restarting from trap - then just need to re-issue it */ -+ if (trap->tr_pc == uctx->uctx_trestart_addr || (trap->tr_state & TS_TrappedFlag)) -+ { -+ PRINTF (uctx, DBG_TPROC, "resolve_tproc_trap: trapped in trap code PC=%llx SP=%llx\n", trap->tr_pc, trap->tr_regs[1]); -+ -+ trap->tr_regs[0] = uctx->uctx_trestart_addr; -+ } -+ else -+ { -+ E4_uint64 *sp = (E4_uint64 *) user_elan2main (uctx, trap->tr_regs[1]); -+ int i, reload; -+ -+ /* need to store the register on the stack see */ -+ /* lib_tproc_trampoline_elan4_thread.S for stack layout */ -+#define TS_STACK_OFF(REG) ((((REG)&7)) - (((REG)>>3)*8) - 8) -+ for (reload = 0, i = 0; i < 64; i++) -+ { -+ if (trap->tr_dirty & ((E4_uint64) 1 << i)) -+ { -+ PRINTF (uctx, DBG_TPROC, "resolve_tproc_trap: %%r%d [%016llx] -> %p\n", i, trap->tr_regs[i], &sp[TS_STACK_OFF(i)]); -+ -+ sulonglong ((u64 *) &sp[TS_STACK_OFF(i)], trap->tr_regs[i]); -+ -+ reload |= (1 << (i >> 3)); -+ } -+ } -+#undef TS_STACK_OFF -+ -+ PRINTF (uctx, DBG_TPROC, "resolve_tproc_trap: pc %llx npc %llx\n", trap->tr_pc, trap->tr_npc); -+ PRINTF (uctx, DBG_TPROC, "resolve_tproc_trap: CC %x reload %x\n", (int) (trap->tr_state >> TS_XCCshift), reload); -+ -+ trap->tr_regs[0] = uctx->uctx_trestart_addr; -+ trap->tr_regs[2] = trap->tr_pc; -+ trap->tr_regs[3] = trap->tr_npc; -+ trap->tr_regs[4] = (trap->tr_state >> TS_XCCshift) & TS_XCCmask; -+ trap->tr_regs[5] = reload; -+ } -+ -+ return (queue_thread_for_retry (uctx, utrapp, (E4_ThreadRegs *) trap->tr_regs)); -+} -+ -+static int -+resolve_iproc_trap (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp, int channel) -+{ -+ USER_IPROC_TRAP *utrap = &uctx->uctx_iprocTrap[channel]; -+ ELAN4_IPROC_TRAP *trap = &utrap->ut_trap; -+ unsigned long flags; -+ -+ elan4_inspect_iproc_trap (trap); -+ -+ if (trap->tr_flags & TR_FLAG_TOOMANY_TRANS) -+ return (deliver_trap (utrapp, UTS_INVALID_TRANS, UTS_IPROC, trap, channel)); -+ -+ ASSERT (trap->tr_trappedTrans >= 0 && trap->tr_trappedTrans < trap->tr_numTransactions); -+ -+ switch (IPROC_TrapValue (trap->tr_transactions[trap->tr_trappedTrans].IProcStatusCntxAndTrType)) -+ { -+ case InputMemoryFault: -+ if (user_pagefault (uctx, &trap->tr_faultarea) != 0) -+ { -+ utrap->ut_state = UTS_IPROC_STOPPED; -+ -+ return (deliver_trap (utrapp, UTS_INVALID_ADDR, UTS_IPROC, trap, channel)); -+ } -+ break; -+ -+ case InputDmaQueueOverflow: -+ case InputEventEngineTrapped: -+ /* nothing to do for these 2 - restarting will simulate the transactions */ -+ break; -+ -+ case InputEopErrorOnWaitForEop: -+ case InputEopErrorTrap: -+ break; -+ -+ case InputCrcErrorAfterPAckOk: -+ PRINTF (DBG_DEVICE, DBG_IPROC, "InputCrcErrorAfterPAckOk: flags %x\n", trap->tr_flags); -+ -+ ASSERT ((trap->tr_flags & TR_FLAG_ACK_SENT) && ((trap->tr_flags & (TR_FLAG_DMA_PACKET|TR_FLAG_BAD_TRANS)) || -+ ((trap->tr_flags & TR_FLAG_EOP_ERROR) && trap->tr_identifyTrans == TR_TRANS_INVALID))); -+ break; -+ -+ case InputDiscardAfterAckOk: -+ return (deliver_trap (utrapp, UTS_INVALID_TRANS, UTS_IPROC, trap, channel)); -+ -+ case InputAddressAlignment: -+ return (deliver_trap (utrapp, UTS_ALIGNMENT_ERROR, UTS_IPROC, trap, channel)); -+ -+ case InputInvalidTransType: -+ return (deliver_trap (utrapp, UTS_INVALID_TRANS, UTS_IPROC, trap, channel)); -+ -+ default: -+ printk ("resolve_iproc_trap: unknown trap type %d\n", IPROC_TrapValue (trap->tr_transactions[trap->tr_trappedTrans].IProcStatusCntxAndTrType)); -+ BUG(); -+ /* NOTREACHED */ -+ } -+ -+ if (! (trap->tr_flags & TR_FLAG_ACK_SENT) || (trap->tr_flags & TR_FLAG_EOP_BAD)) -+ { -+ spin_lock_irqsave (&uctx->uctx_spinlock, flags); -+ -+ utrap->ut_state = UTS_IPROC_RUNNING; -+ -+ user_stop_nacking (uctx, channel ? UCTX_IPROC_CH0_TRAPPED : UCTX_IPROC_CH1_TRAPPED); -+ -+ spin_unlock_irqrestore (&uctx->uctx_spinlock, flags); -+ } -+ else if ((trap->tr_flags & (TR_FLAG_DMA_PACKET | TR_FLAG_BAD_TRANS)) || ((trap->tr_flags & TR_FLAG_EOP_ERROR) && (trap->tr_identifyTrans == TR_TRANS_INVALID))) -+ { -+ /* -+ * TR_FLAG_DMA_PACKET means a DMA packet has faulted. -+ * -+ * TR_FLAG_BAD_TRANS means we have a transaction with a bad CRC after the transaction -+ * which sent the ack - this implies it's an overlapped ack DMA packet -+ * -+ * TR_FLAG_EOP_ERROR means we've received an EOP reset - if we hadn't seen an identify -+ * transaction then it's a DMA packet. -+ * -+ * To ensure that the DMA processor works correctly the next packet must be NACKed to -+ * cause it to resend this one. -+ */ -+ PRINTF (uctx, DBG_IPROC, "resolve_iproc_trap: %s during DMA packet\n", -+ (trap->tr_flags & TR_FLAG_BAD_TRANS) ? "BadTransaction" : (trap->tr_flags & TR_FLAG_EOP_ERROR) ? "EopError" : "trap"); -+ -+ spin_lock_irqsave (&uctx->uctx_spinlock, flags); -+ -+ if (trap->tr_flags & TR_FLAG_DMA_PACKET) -+ { -+ if (! (trap->tr_flags & TR_FLAG_BAD_TRANS)) -+ utrap->ut_state = UTS_IPROC_EXECUTE_PACKET; -+ else -+ { -+ kcondvar_t waithere; -+ -+ /* We must ensure that the next packet is always nacked, so -+ * we wait here for an output timeout before dropping the -+ * context filter - we just pause here for 4 mS */ -+ kcondvar_init (&waithere); -+ kcondvar_timedwait (&waithere, &uctx->uctx_spinlock, &flags, lbolt + (HZ/250) + 1);; -+ kcondvar_destroy (&waithere); -+ -+ utrap->ut_state = UTS_IPROC_RUNNING; -+ -+ user_stop_nacking (uctx, channel ? UCTX_IPROC_CH0_TRAPPED : UCTX_IPROC_CH1_TRAPPED); -+ } -+ } -+ else -+ { -+ utrap->ut_state = UTS_IPROC_RUNNING; -+ -+ user_stop_nacking (uctx, channel ? UCTX_IPROC_CH0_TRAPPED : UCTX_IPROC_CH1_TRAPPED); -+ } -+ -+ spin_unlock_irqrestore (&uctx->uctx_spinlock, flags); -+ } -+ else if (trap->tr_flags & TR_FLAG_EOP_ERROR) -+ { -+ PRINTF (uctx, DBG_IPROC, "resolve_iproc_trap: EopError with identify\n"); -+ -+ utrap->ut_state = UTS_IPROC_NETWORK_ERROR; -+ } -+ else -+ { -+ PRINTF (uctx, DBG_IPROC, "resolve_iproc_trap: execute packet\n"); -+ -+ utrap->ut_state = UTS_IPROC_EXECUTE_PACKET; -+ } -+ -+ return UTS_FINISHED; -+} -+ -+ -+static int -+resolve_cproc_traps (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp) -+{ -+ struct list_head *entry; -+ int res = UTS_FINISHED; -+ -+ kmutex_lock (&uctx->uctx_cqlock); -+ list_for_each (entry, &uctx->uctx_cqlist) { -+ USER_CQ *ucq = list_entry (entry, USER_CQ, ucq_link); -+ -+ if (ucq->ucq_state == UCQ_TRAPPED) -+ { -+ res = resolve_cproc_trap (uctx, utrapp, ucq); -+ -+ if (res != UTS_FINISHED) -+ break; -+ } -+ -+ if (ucq->ucq_errored) -+ { -+ ucq->ucq_errored = 0; -+ res = deliver_trap (utrapp, UTS_CPROC_ERROR, UTS_CPROC, &ucq->ucq_trap, elan4_cq2idx(ucq->ucq_cq)); -+ break; -+ } -+ } -+ kmutex_unlock (&uctx->uctx_cqlock); -+ -+ return (res); -+} -+ -+static int -+resolve_eproc_traps (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp) -+{ -+ unsigned long flags; -+ int res; -+ -+ spin_lock_irqsave (&uctx->uctx_spinlock, flags); -+ while (! RING_QUEUE_EMPTY (uctx->uctx_eprocTrapQ)) -+ { -+ ELAN4_EPROC_TRAP trap = *RING_QUEUE_FRONT (uctx->uctx_eprocTrapQ, uctx->uctx_eprocTraps); -+ -+ (void) RING_QUEUE_REMOVE (uctx->uctx_eprocTrapQ); -+ -+ spin_unlock_irqrestore (&uctx->uctx_spinlock, flags); -+ -+ if ((res = resolve_eproc_trap (uctx, utrapp, &trap)) != UTS_FINISHED) -+ return (res); -+ -+ if (fixup_eproc_trap (uctx, &trap, 0) == 0) -+ { -+ PRINTF (uctx, DBG_EPROC, "resolve_eproc_trap: could not fixup eproc trap - requeue it\n"); -+ -+ spin_lock_irqsave (&uctx->uctx_spinlock, flags); -+ if (RING_QUEUE_REALLY_FULL(uctx->uctx_eprocTrapQ)) -+ { -+ PRINTF (uctx, DBG_EPROC, "resolve_eproc_trap: queue overflow\n"); -+ uctx->uctx_status |= UCTX_EPROC_QUEUE_OVERFLOW; -+ } -+ else -+ { -+ /* Requeue at front to preserve setevent ordering */ -+ /* GNAT 7504: Must move fptr before writing over it */ -+ (void) RING_QUEUE_ADD_FRONT(uctx->uctx_eprocTrapQ); -+ -+ *RING_QUEUE_FRONT(uctx->uctx_eprocTrapQ, uctx->uctx_eprocTraps) = trap; -+ } -+ spin_unlock_irqrestore (&uctx->uctx_spinlock, flags); -+ -+ return UTS_RESCHEDULE; -+ } -+ -+ spin_lock_irqsave (&uctx->uctx_spinlock, flags); -+ } -+ -+ if (uctx->uctx_status & UCTX_EPROC_QUEUE_FULL) -+ user_stop_stopping (uctx, UCTX_EPROC_QUEUE_FULL); -+ -+ spin_unlock_irqrestore (&uctx->uctx_spinlock, flags); -+ return (UTS_FINISHED); -+} -+ -+static int -+resolve_dproc_traps (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp) -+{ -+ unsigned long flags; -+ int res; -+ -+ spin_lock_irqsave (&uctx->uctx_spinlock, flags); -+ while (! RING_QUEUE_EMPTY (uctx->uctx_dprocTrapQ)) -+ { -+ ELAN4_DPROC_TRAP trap = *RING_QUEUE_FRONT(uctx->uctx_dprocTrapQ, uctx->uctx_dprocTraps); -+ -+ (void) RING_QUEUE_REMOVE (uctx->uctx_dprocTrapQ); -+ -+ spin_unlock_irqrestore (&uctx->uctx_spinlock, flags); -+ -+ if ((res = resolve_dproc_trap (uctx, utrapp, &trap)) != UTS_FINISHED) -+ return (res); -+ -+ spin_lock_irqsave (&uctx->uctx_spinlock, flags); -+ } -+ -+ if (uctx->uctx_status & UCTX_DPROC_QUEUE_FULL) -+ user_stop_stopping (uctx, UCTX_DPROC_QUEUE_FULL); -+ -+ spin_unlock_irqrestore (&uctx->uctx_spinlock, flags); -+ return (UTS_FINISHED); -+} -+ -+static int -+resolve_tproc_traps (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp) -+{ -+ unsigned long flags; -+ int res; -+ -+ spin_lock_irqsave (&uctx->uctx_spinlock, flags); -+ while (! RING_QUEUE_EMPTY (uctx->uctx_tprocTrapQ)) -+ { -+ ELAN4_TPROC_TRAP trap = *RING_QUEUE_FRONT(uctx->uctx_tprocTrapQ, uctx->uctx_tprocTraps); -+ -+ (void) RING_QUEUE_REMOVE (uctx->uctx_tprocTrapQ); -+ -+ spin_unlock_irqrestore (&uctx->uctx_spinlock, flags); -+ -+ if ((res = resolve_tproc_trap (uctx, utrapp, &trap)) != UTS_FINISHED) -+ return (res); -+ -+ spin_lock_irqsave (&uctx->uctx_spinlock, flags); -+ } -+ -+ if (uctx->uctx_status & UCTX_TPROC_QUEUE_FULL) -+ user_stop_stopping (uctx, UCTX_TPROC_QUEUE_FULL); -+ -+ spin_unlock_irqrestore (&uctx->uctx_spinlock, flags); -+ return (UTS_FINISHED); -+} -+ -+static int -+resolve_iproc_traps (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp) -+{ -+ unsigned long flags; -+ int i, res; -+ -+ spin_lock_irqsave (&uctx->uctx_spinlock, flags); -+ for (i = 0; i < 2; i++) -+ if (uctx->uctx_iprocTrap[i].ut_state == UTS_IPROC_TRAPPED) -+ { -+ uctx->uctx_iprocTrap[i].ut_state = UTS_IPROC_RESOLVING; -+ spin_unlock_irqrestore(&uctx->uctx_spinlock, flags); -+ -+ if ((res = resolve_iproc_trap (uctx, utrapp, i)) != UTS_FINISHED) -+ return (res); -+ -+ spin_lock_irqsave (&uctx->uctx_spinlock, flags); -+ } -+ spin_unlock_irqrestore (&uctx->uctx_spinlock, flags); -+ -+ return (UTS_FINISHED); -+} -+ -+static int -+resolve_all_traps (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp) -+{ -+ int res; -+ -+ if ((res = resolve_iproc_traps (uctx, utrapp)) != UTS_FINISHED || -+ (res = resolve_cproc_traps (uctx, utrapp)) != UTS_FINISHED || -+ (res = resolve_eproc_traps (uctx, utrapp)) != UTS_FINISHED || -+ (res = resolve_dproc_traps (uctx, utrapp)) != UTS_FINISHED || -+ (res = resolve_tproc_traps (uctx, utrapp)) != UTS_FINISHED) -+ return (res); -+ -+ if (uctx->uctx_status & UCTX_OVERFLOW_REASONS) -+ { -+ PRINTF (uctx, DBG_TRAP, "resolve_all_traps: overflow reasons %x\n", uctx->uctx_status); -+ -+ return (deliver_trap (utrapp, UTS_QUEUE_OVERFLOW, UTS_NOPROC, NULL, uctx->uctx_status)); -+ } -+ -+ if (uctx->uctx_status & UCTX_ERROR_REASONS) -+ return (deliver_trap (utrapp, UTS_QUEUE_ERROR, UTS_NOPROC, NULL, uctx->uctx_status)); -+ -+ return (UTS_FINISHED); -+} -+ -+static int -+execute_iproc_traps (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp) -+{ -+ unsigned long flags; -+ int i; -+ -+ spin_lock_irqsave (&uctx->uctx_spinlock, flags); -+ for (i = 0; i < 2; i++) -+ switch (uctx->uctx_iprocTrap[i].ut_state) -+ { -+ case UTS_IPROC_EXECUTE_PACKET: -+ uctx->uctx_iprocTrap[i].ut_state = UTS_IPROC_EXECUTING; -+ spin_unlock_irqrestore(&uctx->uctx_spinlock, flags); -+ -+ return (deliver_trap (utrapp, UTS_EXECUTE_PACKET, UTS_IPROC, &uctx->uctx_iprocTrap[i].ut_trap, i)); -+ -+ case UTS_IPROC_NETWORK_ERROR: -+ spin_unlock_irqrestore(&uctx->uctx_spinlock, flags); -+ -+ return (deliver_trap (utrapp, UTS_NETWORK_ERROR_TRAP, UTS_IPROC, &uctx->uctx_iprocTrap[i].ut_trap, i)); -+ } -+ spin_unlock_irqrestore (&uctx->uctx_spinlock, flags); -+ -+ return (UTS_FINISHED); -+} -+ -+static int -+progress_neterr (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave (&uctx->uctx_spinlock, flags); -+ if (! RING_QUEUE_EMPTY (uctx->uctx_msgQ)) -+ { -+ ELAN4_NETERR_MSG msg = *RING_QUEUE_FRONT (uctx->uctx_msgQ, uctx->uctx_msgs); -+ -+ (void) RING_QUEUE_REMOVE (uctx->uctx_msgQ); -+ spin_unlock_irqrestore (&uctx->uctx_spinlock, flags); -+ -+ return deliver_trap (utrapp, UTS_NETWORK_ERROR_MSG, UTS_NETERR_MSG, &msg, user_location2process (uctx, msg.msg_sender)); -+ } -+ -+ if (uctx->uctx_status & UCTX_NETERR_TIMER) -+ { -+ uctx->uctx_status &= ~UCTX_NETERR_TIMER; -+ -+ spin_unlock_irqrestore (&uctx->uctx_spinlock, flags); -+ -+ return deliver_trap (utrapp, UTS_NETWORK_ERROR_TIMER, UTS_NOPROC, NULL); -+ } -+ spin_unlock_irqrestore (&uctx->uctx_spinlock, flags); -+ -+ return (UTS_FINISHED); -+} -+ -+static void -+restart_command_queues (USER_CTXT *uctx) -+{ -+ struct list_head *entry; -+ -+ ASSERT (SPINLOCK_HELD (&uctx->uctx_spinlock)); -+ -+ list_for_each (entry, &uctx->uctx_cqlist) { -+ USER_CQ *ucq = list_entry (entry, USER_CQ, ucq_link); -+ -+ if (ucq->ucq_state == UCQ_NEEDS_RESTART) -+ { -+ ucq->ucq_state = UCQ_RUNNING; -+ -+ elan4_restartcq (uctx->uctx_ctxt.ctxt_dev, ucq->ucq_cq); -+ } -+ } -+} -+ -+static int -+restart_dmas (USER_CTXT *uctx) -+{ -+ PRINTF (uctx, DBG_TRAP, "restart_dmas: back=%d front=%d\n", uctx->uctx_dmaQ.q_back, uctx->uctx_dmaQ.q_front); -+ -+ while (! RING_QUEUE_EMPTY (uctx->uctx_dmaQ)) -+ { -+ if (! user_ddcq_check (uctx, 7)) -+ return (0); -+ -+ user_ddcq_run_dma (uctx, RING_QUEUE_FRONT(uctx->uctx_dmaQ, uctx->uctx_dmas)); -+ -+ (void) RING_QUEUE_REMOVE (uctx->uctx_dmaQ); -+ } -+ -+ return (1); -+} -+ -+static int -+restart_threads (USER_CTXT *uctx) -+{ -+ PRINTF (uctx, DBG_TRAP, "restart_threads: back=%d front=%d\n", uctx->uctx_threadQ.q_back, uctx->uctx_threadQ.q_front); -+ -+ while (! RING_QUEUE_EMPTY (uctx->uctx_threadQ)) -+ { -+ if (! user_ddcq_check (uctx, 7)) -+ return (0); -+ -+ user_ddcq_run_thread (uctx, RING_QUEUE_FRONT(uctx->uctx_threadQ, uctx->uctx_threads)); -+ -+ (void) RING_QUEUE_REMOVE (uctx->uctx_threadQ); -+ } -+ -+ return (1); -+} -+ -+int -+user_resume_eproc_trap (USER_CTXT *uctx, E4_Addr addr) -+{ -+ PRINTF2 (uctx, DBG_RESUME, "user_resume_eproc_trap: addr=%llx -> %s\n", addr, user_ddcq_check(uctx, 2) ? "success" : "EAGAIN"); -+ -+ if (! user_ddcq_check (uctx, 2)) -+ return (-EAGAIN); -+ -+ user_ddcq_setevent (uctx, addr); -+ -+ return (0); -+} -+ -+int -+user_resume_cproc_trap (USER_CTXT *uctx, unsigned indx) -+{ -+ struct list_head *entry; -+ unsigned long flags; -+ -+ PRINTF1 (uctx, DBG_RESUME, "user_resume_cproc_trap: indx=%d\n", indx); -+ -+ spin_lock_irqsave (&uctx->uctx_spinlock, flags); -+ -+ list_for_each (entry, &uctx->uctx_cqlist) { -+ USER_CQ *ucq = list_entry (entry, USER_CQ, ucq_link); -+ -+ if (elan4_cq2idx(ucq->ucq_cq) == indx && ucq->ucq_state == UCQ_STOPPED && !(ucq->ucq_flags & UCQ_SYSTEM)) -+ { -+ ucq->ucq_state = UCQ_NEEDS_RESTART; -+ -+ user_signal_trap (uctx); -+ -+ spin_unlock_irqrestore (&uctx->uctx_spinlock, flags); -+ return (0); -+ } -+ } -+ spin_unlock_irqrestore (&uctx->uctx_spinlock, flags); -+ -+ return (-EINVAL); -+} -+ -+int -+user_resume_dproc_trap (USER_CTXT *uctx, E4_DMA *dma) -+{ -+ unsigned long flags; -+ int res = 0; -+ -+ spin_lock_irqsave (&uctx->uctx_spinlock, flags); -+ if (RING_QUEUE_FULL (uctx->uctx_dmaQ)) -+ res = -ENOMEM; -+ else -+ { -+ *RING_QUEUE_BACK (uctx->uctx_dmaQ, uctx->uctx_dmas) = *dma; -+ (void) RING_QUEUE_ADD (uctx->uctx_dmaQ); -+ -+ user_signal_trap (uctx); -+ } -+ spin_unlock_irqrestore (&uctx->uctx_spinlock, flags); -+ return (res); -+} -+ -+int -+user_resume_tproc_trap (USER_CTXT *uctx, E4_ThreadRegs *regs) -+{ -+ unsigned long flags; -+ int res = 0; -+ -+ spin_lock_irqsave (&uctx->uctx_spinlock, flags); -+ if (RING_QUEUE_FULL (uctx->uctx_threadQ)) -+ res = -ENOMEM; -+ else -+ { -+ *RING_QUEUE_BACK (uctx->uctx_threadQ, uctx->uctx_threads) = *regs; -+ (void) RING_QUEUE_ADD (uctx->uctx_threadQ); -+ -+ user_signal_trap (uctx); -+ } -+ spin_unlock_irqrestore (&uctx->uctx_spinlock, flags); -+ return (res); -+} -+ -+int -+user_resume_iproc_trap (USER_CTXT *uctx, unsigned channel, unsigned trans, -+ E4_IprocTrapHeader *hdrp, E4_IprocTrapData *datap) -+{ -+ unsigned long flags; -+ int res = 0; -+ -+ if (channel >= 2) -+ return (-EINVAL); -+ -+ spin_lock_irqsave (&uctx->uctx_spinlock, flags); -+ if (uctx->uctx_iprocTrap[channel].ut_state != UTS_IPROC_STOPPED && -+ uctx->uctx_iprocTrap[channel].ut_state != UTS_IPROC_EXECUTING && -+ uctx->uctx_iprocTrap[channel].ut_state != UTS_IPROC_NETWORK_ERROR) -+ res = -EINVAL; -+ else -+ { -+ ELAN4_IPROC_TRAP *trap = &uctx->uctx_iprocTrap[channel].ut_trap; -+ -+ if (trans < trap->tr_numTransactions) -+ { -+ PRINTF1 (uctx, DBG_RESUME, "user_resume_iproc_trap: trans=%d -> execute\n", trans); -+ -+ uctx->uctx_iprocTrap[channel].ut_state = UTS_IPROC_EXECUTE_PACKET; -+ trap->tr_trappedTrans = trans; -+ trap->tr_transactions[trans] = *hdrp; -+ trap->tr_dataBuffers[trans] = *datap; -+ } -+ else -+ { -+ PRINTF1 (uctx, DBG_RESUME, "user_resume_iproc_trap: trans=%d -> running\n", trans); -+ -+ uctx->uctx_iprocTrap[channel].ut_state = UTS_IPROC_RUNNING; -+ -+ user_stop_nacking (uctx, channel ? UCTX_IPROC_CH0_TRAPPED : UCTX_IPROC_CH1_TRAPPED); -+ } -+ } -+ spin_unlock_irqrestore (&uctx->uctx_spinlock, flags); -+ -+ return (res); -+} -+ -+int -+__categorise_command (E4_uint64 command, int *cmdSize) -+{ -+ switch (command & 0x3) -+ { -+ case RUN_THREAD_CMD: *cmdSize = 7; break; -+ -+ default: -+ switch (command & 0x7) -+ { -+ case WRITE_DWORD_CMD: *cmdSize = 2; break; -+ case ADD_DWORD_CMD: *cmdSize = 2; break; -+ -+ default: -+ switch (command & 0xF) -+ { -+ case OPEN_STEN_PKT_CMD: -+ *cmdSize = 1; -+ return 1; -+ -+ case COPY64_CMD: *cmdSize = 2; break; -+ case GUARD_CMD: *cmdSize = 1; break; -+ case INTERRUPT_CMD: *cmdSize = 1; break; -+ case RUN_DMA_CMD: *cmdSize = 7; break; -+ -+ default: -+ switch (command & 0x1f) -+ { -+ case SEND_TRANS_CMD: -+ *cmdSize = 2 + (((command >> 16) & TR_SIZE_MASK) >> TR_SIZE_SHIFT); -+ return 2; -+ -+ case SET_EVENT_CMD: *cmdSize = 1; break; -+ case SET_EVENTN_CMD: *cmdSize = 2; break; -+ case WAIT_EVENT_CMD: *cmdSize = 4; break; -+ -+ default: -+ switch (command & 0x3f) -+ { -+ case NOP_CMD: *cmdSize = 1; break; -+ case MAKE_EXT_CLEAN_CMD: *cmdSize = 1; break; -+ default: -+ return 3; -+ } -+ break; -+ } -+ } -+ } -+ } -+ -+ return 0; -+} -+ -+int -+__whole_command (sdramaddr_t *commandPtr, sdramaddr_t insertPtr, unsigned int cqSize, unsigned int cmdSize) -+{ -+ /* Move onto next command */ -+ while (cmdSize-- && (*commandPtr) != insertPtr) -+ *commandPtr = ((*commandPtr) & ~(cqSize-1)) | (((*commandPtr) + sizeof (E4_uint64)) & (cqSize-1)); -+ -+ return cmdSize == -1; -+} -+ -+int -+user_neterr_sten (USER_CTXT *uctx, unsigned int vp, E4_uint64 cookie, int waitforeop) -+{ -+ ELAN4_DEV *dev = uctx->uctx_ctxt.ctxt_dev; -+ int found = 0; -+ struct list_head *el; -+ -+ user_swapout (uctx, UCTX_NETERR_FIXUP); -+ -+ kmutex_lock (&uctx->uctx_cqlock); -+ list_for_each (el, &uctx->uctx_cqlist) { -+ USER_CQ *ucq = list_entry (el, USER_CQ, ucq_link); -+ -+ if ((ucq->ucq_cq->cq_perm & CQ_STENEnableBit) != 0) -+ { -+ sdramaddr_t cqdesc = dev->dev_cqaddr + (elan4_cq2num(ucq->ucq_cq) * sizeof (E4_CommandQueueDesc)); -+ E4_uint64 queuePtrs = elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_QueuePtrs)); -+ sdramaddr_t insertPtr = (queuePtrs & CQ_PtrMask); -+ sdramaddr_t commandPtr = CQ_CompletedPtr (queuePtrs); -+ unsigned int cqSize = CQ_Size ((queuePtrs >> CQ_SizeShift) & CQ_SizeMask); -+ E4_uint64 openCommand = 0; -+ -+ if (dev->dev_devinfo.dev_revision_id != PCI_REVISION_ID_ELAN4_REVA && (queuePtrs & CQ_RevB_ReorderingQueue)) -+ { -+ E4_uint32 oooMask = elan4_sdram_readl (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_HoldingValue)); -+ -+ for (; (oooMask & 1) != 0; oooMask >>= 1) -+ insertPtr = (insertPtr & ~(cqSize-1)) | ((insertPtr + sizeof (E4_uint64)) & (cqSize-1)); -+ } -+ -+ while (commandPtr != insertPtr) -+ { -+ E4_uint64 command = elan4_sdram_readq (dev, commandPtr); -+ sdramaddr_t identifyPtr; -+ unsigned int cmdSize; -+ -+ switch (__categorise_command (command, &cmdSize)) -+ { -+ case 0: -+ (void) __whole_command (&commandPtr, insertPtr, cqSize, cmdSize); -+ break; -+ -+ case 1: -+ PRINTF (uctx, DBG_NETERR, "user_neterr_sten: cq=%d OPEN %llx\n", elan4_cq2num (ucq->ucq_cq), command); -+ -+ if ((command >> 32) == vp) -+ openCommand = command; -+ -+ (void) __whole_command (&commandPtr, insertPtr, cqSize, cmdSize); -+ break; -+ -+ case 2: -+ PRINTF (uctx, DBG_NETERR, "user_neterr_sten: cq=%d SENDTRANS %llx\n", elan4_cq2num (ucq->ucq_cq), command); -+ -+ if (openCommand == 0) -+ (void) __whole_command (&commandPtr, insertPtr, cqSize, cmdSize); -+ else -+ { -+ switch ((command >> 16) & (TR_OPCODE_MASK | TR_SIZE_MASK)) -+ { -+ case TR_IDENTIFY & (TR_OPCODE_MASK | TR_SIZE_MASK): -+ case TR_REMOTEDMA & (TR_OPCODE_MASK | TR_SIZE_MASK): -+ PRINTF (uctx, DBG_NETERR, "user_neterr_sten: TR_IDENTIFY/TR_REMOTEDMA\n"); -+ identifyPtr = (commandPtr & ~(cqSize-1)) | ((commandPtr + sizeof (E4_uint64)) & (cqSize-1)); -+ break; -+ -+ case TR_SETEVENT_IDENTIFY & (TR_OPCODE_MASK | TR_SIZE_MASK): -+ case TR_INPUT_Q_COMMIT & (TR_OPCODE_MASK | TR_SIZE_MASK): -+ PRINTF (uctx, DBG_NETERR, "user_neterr_sten: TR_SETEVENT_IDENTIFY/TR_INPUT_Q_COMMIT\n"); -+ identifyPtr = (commandPtr & ~(cqSize-1)) | ((commandPtr + 2*sizeof (E4_uint64)) & (cqSize-1)); -+ break; -+ -+ case TR_ADDWORD & (TR_OPCODE_MASK | TR_SIZE_MASK): -+ PRINTF (uctx, DBG_NETERR, "user_neterr_sten: TR_ADDWORD\n"); -+ identifyPtr = (commandPtr & ~(cqSize-1)) | ((commandPtr + 3*sizeof (E4_uint64)) & (cqSize-1)); -+ break; -+ -+ case TR_TESTANDWRITE & (TR_OPCODE_MASK | TR_SIZE_MASK): -+ PRINTF (uctx, DBG_NETERR, "user_neterr_sten: TR_TESTANDWRITE\n"); -+ identifyPtr = (commandPtr & ~(cqSize-1)) | ((commandPtr + 4*sizeof (E4_uint64)) & (cqSize-1)); -+ break; -+ -+ default: -+ identifyPtr = 0; -+ } -+ -+ if (! __whole_command (&commandPtr, insertPtr, cqSize, cmdSize)) -+ { -+ PRINTF (uctx, DBG_NETERR, "user_neterr_sten: not whole command\n"); -+ openCommand = 0; -+ } -+ -+ else if (identifyPtr) -+ { -+ E4_uint64 tcookie = elan4_sdram_readq (dev, identifyPtr); -+ -+ PRINTF (uctx, DBG_NETERR, "user_neterr_sten: cookie=%llx [%llx]\n", tcookie, cookie); -+ -+ if (tcookie == cookie) -+ { -+ unsigned int vchan = (openCommand >> 4) & 0x1f; -+ -+ PRINTF (uctx, DBG_NETERR, "user_neterr_sten: cookie matches - vchan=%d\n", vchan); -+ -+ if (! waitforeop) -+ { -+ /* Alter the CQ_AckBuffer for this channel to indicate an -+ * ack was received */ -+ E4_uint64 value = elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_AckBuffers)); -+ E4_uint64 nvalue = ((value & ~((E4_uint64)0xf << ((vchan & 0xf) << 2))) | -+ ((E4_uint64) PackOk << ((vchan & 0xf) << 2))); -+ -+ PRINTF (uctx, DBG_NETERR, "user_neterr_sten: CQ_AckBuffers %llx -> %llx\n", value, nvalue); -+ -+ elan4_sdram_writeq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_AckBuffers), nvalue); -+ pioflush_sdram (dev); -+ } -+ -+ found++; -+ } -+ openCommand = 0; -+ } -+ -+ if ((command >> 16) & TR_LAST_AND_SEND_ACK) -+ openCommand = 0; -+ } -+ break; -+ -+ case 3: -+ PRINTF (uctx, DBG_NETERR, "user_neterr_sten: invalid command %llx\n", command); -+ kmutex_unlock (&uctx->uctx_cqlock); -+ return -EINVAL; -+ } -+ -+ } -+ } -+ } -+ kmutex_unlock (&uctx->uctx_cqlock); -+ -+ user_swapin (uctx, UCTX_NETERR_FIXUP); -+ -+ return found; -+} -+ -+int -+user_neterr_dma (USER_CTXT *uctx, unsigned int vp, E4_uint64 cookie, int waitforeop) -+{ -+ unsigned long flags; -+ int found = 0; -+ int idx; -+ -+ user_swapout (uctx, UCTX_NETERR_FIXUP); -+ -+ spin_lock_irqsave (&uctx->uctx_spinlock, flags); -+ RING_QUEUE_ITERATE (uctx->uctx_dmaQ, idx) { -+ E4_DMA *dma = &uctx->uctx_dmas[idx]; -+ -+ if (dma->dma_vproc == vp && dma->dma_cookie == cookie) -+ { -+ PRINTF (uctx, DBG_NETERR, "user_neterr_dma: dmaQ matches %s\n", waitforeop ? "waitforeop" : "remove remoteness"); -+ -+ if (! waitforeop) -+ { -+ dma->dma_dstEvent = 0; -+ dma->dma_typeSize = DMA_ShMemWrite | DMA_Context (dma->dma_typeSize); -+ } -+ found++; -+ } -+ } -+ -+ RING_QUEUE_ITERATE (uctx->uctx_dprocTrapQ, idx) { -+ ELAN4_DPROC_TRAP *trap = &uctx->uctx_dprocTraps[idx]; -+ -+ if (trap->tr_desc.dma_vproc == vp && trap->tr_desc.dma_cookie == cookie) -+ { -+ PRINTF (uctx, DBG_NETERR, "user_neterr_dma: dmaTrapQ matches %s\n", waitforeop ? "waitforeop" : "remove remoteness"); -+ -+ if (! waitforeop) -+ { -+ trap->tr_desc.dma_dstEvent = 0; -+ trap->tr_desc.dma_typeSize = DMA_ShMemWrite | DMA_Context (trap->tr_desc.dma_typeSize); -+ } -+ found++; -+ } -+ } -+ -+ /* The device driver command queue should be empty at this point ! */ -+ if (user_ddcq_flush (uctx) == 0) -+ found = -EAGAIN; -+ -+ spin_unlock_irqrestore (&uctx->uctx_spinlock, flags); -+ -+ /* The device driver command queue should be empty at this point ! */ -+ if (user_ddcq_flush (uctx) == 0) -+ found = -EAGAIN; -+ -+ user_swapin (uctx, UCTX_NETERR_FIXUP); -+ -+ return found; -+} -+ -+int -+user_trap_handler (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp, int nticks) -+{ -+ unsigned long entered = jiffies; -+ unsigned int need_reenter = 0; -+ unsigned long flags; -+ int res; -+ int tbl; -+ -+ spin_lock_irqsave (&uctx->uctx_spinlock, flags); -+ -+ PRINTF1 (uctx, DBG_TRAP, "user_trap_handler: entered state=%d\n", uctx->uctx_trap_state); -+ -+ uctx->uctx_trap_count++; -+ -+ for (;;) -+ { -+ if (uctx->uctx_status & UCTX_SWAPPED_REASONS) -+ { -+ PRINTF0 (uctx, DBG_TRAP, "user_trap_handler: exiting on swapped reasons\n"); -+ -+ res = UTS_FINISHED; -+ goto no_more_to_do; -+ } -+ -+ if ((long) (jiffies - entered) > HZ) -+ { -+ PRINTF0 (uctx, DBG_TRAP, "user_trap_handler: exiting for reschedule\n"); -+ res = UTS_RESCHEDULE; -+ goto no_more_to_do; -+ } -+ -+ switch (uctx->uctx_trap_state) -+ { -+ case UCTX_TRAP_ACTIVE: -+ uctx->uctx_trap_state = UCTX_TRAP_SLEEPING; -+ -+ if (nticks == 0 || need_reenter || kcondvar_timedwaitsig (&uctx->uctx_wait, &uctx->uctx_spinlock, &flags, lbolt + nticks) != CV_RET_NORMAL) -+ { -+ PRINTF0 (uctx, DBG_TRAP, "user_trap_handler: exiting by kcondvar_timedwaitsig\n"); -+ -+ res = UTS_FINISHED; -+ goto no_more_to_do; -+ } -+ -+ /* Have slept above, so resample entered */ -+ entered = jiffies; -+ -+ uctx->uctx_trap_state = UCTX_TRAP_SIGNALLED; -+ continue; -+ -+ case UCTX_TRAP_IDLE: -+ case UCTX_TRAP_SIGNALLED: -+ uctx->uctx_trap_state = UCTX_TRAP_ACTIVE; -+ break; -+ } -+ spin_unlock_irqrestore (&uctx->uctx_spinlock, flags); -+ -+ PRINTF2 (uctx, DBG_TRAP, "user_trap_handler: resolve traps - state=%d status=%x\n", uctx->uctx_trap_state, uctx->uctx_status); -+ -+ switch ((res = resolve_all_traps (uctx, utrapp))) -+ { -+ case UTS_FINISHED: -+ break; -+ -+ case UTS_RESCHEDULE: -+ need_reenter++; -+ break; -+ -+ default: -+ spin_lock_irqsave (&uctx->uctx_spinlock, flags); -+ goto no_more_to_do; -+ } -+ -+ spin_lock_irqsave (&uctx->uctx_spinlock, flags); -+ if (! user_ddcq_flush (uctx)) -+ { -+ PRINTF0 (uctx, DBG_TRAP, "user_trap_handler: ddcq not flushed - re-enter\n"); -+ need_reenter++; -+ continue; -+ } -+ spin_unlock_irqrestore (&uctx->uctx_spinlock, flags); -+ -+ for(tbl=0; tbl < NUM_HASH_TABLES; tbl++) -+ if (uctx->uctx_ctxt.shuffle_needed[tbl]) -+ elan4mmu_do_shuffle (&uctx->uctx_ctxt, tbl); -+ -+ if ((res = progress_neterr (uctx, utrapp)) != UTS_FINISHED) -+ { -+ spin_lock_irqsave (&uctx->uctx_spinlock, flags); -+ goto no_more_to_do; -+ } -+ -+ if ((res = execute_iproc_traps (uctx, utrapp)) != UTS_FINISHED) -+ { -+ spin_lock_irqsave (&uctx->uctx_spinlock, flags); -+ goto no_more_to_do; -+ } -+ -+ PRINTF2 (uctx, DBG_TRAP, "user_trap_handler: restart items - state=%d status=%x\n", uctx->uctx_trap_state, uctx->uctx_status); -+ -+ spin_lock_irqsave (&uctx->uctx_spinlock, flags); -+ if (UCTX_RUNNABLE (uctx)) -+ { -+ restart_command_queues (uctx); -+ -+ if (! restart_threads (uctx) || ! restart_dmas (uctx)) -+ { -+ PRINTF0 (uctx, DBG_TRAP, "user_trap_handler: ddcq full - re-enter\n"); -+ need_reenter++; -+ } -+ } -+ } -+ no_more_to_do: -+ uctx->uctx_trap_state = UCTX_TRAP_IDLE; -+ -+ /* -+ * Always ensure that the command queue is flushed with a flow control -+ * write, so that on the next trap we (hopefully) find it empty and so -+ * can immediately restart the context. Also if we need to be re-enter -+ * the trap handler and don't have an interrupt outstanding, then issue -+ * one now. -+ */ -+ user_ddcq_flush (uctx); -+ if (need_reenter && uctx->uctx_ddcq_intr == 0) -+ { -+ uctx->uctx_ddcq_intr++; -+ user_ddcq_intr (uctx); -+ } -+ -+ if (--uctx->uctx_trap_count == 0 && (uctx->uctx_status & UCTX_SWAPPING)) -+ kcondvar_wakeupall (&uctx->uctx_wait, &uctx->uctx_spinlock); -+ -+ spin_unlock_irqrestore (&uctx->uctx_spinlock, flags); -+ -+ /* Should update the user trap area in this case as deliver_trap() -+ * has not been called -+ */ -+ if (res == UTS_RESCHEDULE) -+ put_user (res, &utrapp->ut_type); -+ -+ PRINTF2 (uctx, DBG_TRAP, "user_trap_handler: finished state=%d res=%d\n", uctx->uctx_trap_state, res); -+ -+ return (res == UTS_EFAULT ? -EFAULT : 0); -+} -+ -+USER_CQ * -+user_alloccq (USER_CTXT *uctx, unsigned cqsize, unsigned perm, unsigned uflags) -+{ -+ USER_CQ *ucq; -+ unsigned long flags; -+ -+ KMEM_ZALLOC (ucq, USER_CQ *, sizeof (USER_CQ), 1); -+ -+ if (ucq == (USER_CQ *) NULL) -+ return ERR_PTR(-ENOMEM); -+ -+ /* NOTE - do not allow the user to create high-priority queues as we only flush through the low-priority run queues */ -+ if ((ucq->ucq_cq = elan4_alloccq (&uctx->uctx_ctxt, cqsize, perm, (uflags & UCQ_REORDER) ? CQ_Reorder : 0)) == NULL) -+ { -+ KMEM_FREE (ucq, sizeof (USER_CQ)); -+ -+ PRINTF2 (uctx, DBG_CQ, "user_alloccq: failed elan4_allocq cqsize %d uflags %x\n", cqsize, uflags); -+ -+ return ERR_PTR(-ENOMEM); -+ } -+ -+ atomic_set (&ucq->ucq_ref, 1); -+ -+ ucq->ucq_state = UCQ_RUNNING; -+ ucq->ucq_flags = uflags; -+ -+ PRINTF3 (uctx, DBG_CQ, "user_alloccq: ucq=%p idx=%d cqnum=%d\n", ucq, elan4_cq2idx (ucq->ucq_cq), elan4_cq2num(ucq->ucq_cq)); -+ -+ /* chain it onto the context */ -+ kmutex_lock (&uctx->uctx_cqlock); -+ spin_lock_irqsave (&uctx->uctx_spinlock, flags); -+ list_add (&ucq->ucq_link, &uctx->uctx_cqlist); -+ spin_unlock_irqrestore (&uctx->uctx_spinlock, flags); -+ kmutex_unlock (&uctx->uctx_cqlock); -+ -+ return (ucq); -+} -+ -+USER_CQ * -+user_findcq (USER_CTXT *uctx, unsigned idx) -+{ -+ struct list_head *entry; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&uctx->uctx_spinlock, flags); -+ list_for_each (entry, &uctx->uctx_cqlist) { -+ USER_CQ *ucq = list_entry (entry, USER_CQ, ucq_link); -+ -+ if (elan4_cq2idx(ucq->ucq_cq) == idx) -+ { -+ atomic_inc (&ucq->ucq_ref); -+ spin_unlock_irqrestore (&uctx->uctx_spinlock, flags); -+ return (ucq); -+ } -+ } -+ spin_unlock_irqrestore (&uctx->uctx_spinlock, flags); -+ -+ return (NULL); -+} -+ -+void -+user_dropcq (USER_CTXT *uctx, USER_CQ *ucq) -+{ -+ unsigned long flags; -+ -+ PRINTF2 (uctx, DBG_CQ, "user_dropcq: ucq=%p ref=%d\n", ucq, atomic_read (&ucq->ucq_ref)); -+ -+ kmutex_lock (&uctx->uctx_cqlock); -+ if (! atomic_dec_and_test (&ucq->ucq_ref)) -+ { -+ kmutex_unlock (&uctx->uctx_cqlock); -+ return; -+ } -+ -+ spin_lock_irqsave (&uctx->uctx_spinlock, flags); -+ list_del (&ucq->ucq_link); -+ spin_unlock_irqrestore (&uctx->uctx_spinlock, flags); -+ -+ kmutex_unlock (&uctx->uctx_cqlock); -+ -+ elan4_freecq (&uctx->uctx_ctxt, ucq->ucq_cq); -+ -+ KMEM_FREE (ucq, sizeof (USER_CQ)); -+} -+ -+int -+user_alloc_trap_queues (USER_CTXT *uctx, unsigned ndproc_traps, unsigned neproc_traps, -+ unsigned ntproc_traps, unsigned nthreads, unsigned ndmas) -+{ -+ ELAN4_DPROC_TRAP *dprocs; -+ ELAN4_EPROC_TRAP *eprocs; -+ ELAN4_TPROC_TRAP *tprocs; -+ E4_DMA *dmas; -+ E4_ThreadRegs *threads; -+ ELAN4_NETERR_MSG *msgs; -+ unsigned long flags; -+ -+ int nmsgs = NETERR_MSGS; -+ -+ /* bounds check the values that have been passed in */ -+ if (ndproc_traps < 2 || ndproc_traps > 10000 || -+ ntproc_traps < 1 || ntproc_traps > 100 || -+ neproc_traps < 6 || neproc_traps > 10000 || -+ nthreads < 2 || nthreads > 10000 || -+ ndmas < 2 || ndmas > 10000) -+ return -EINVAL; -+ -+ if (uctx->uctx_dmas != NULL) -+ return -EBUSY; -+ -+ KMEM_ZALLOC (dprocs, ELAN4_DPROC_TRAP *, ndproc_traps * sizeof (ELAN4_DPROC_TRAP), 1); -+ KMEM_ZALLOC (eprocs, ELAN4_EPROC_TRAP *, neproc_traps * sizeof (ELAN4_EPROC_TRAP), 1); -+ KMEM_ZALLOC (tprocs, ELAN4_TPROC_TRAP *, ntproc_traps * sizeof (ELAN4_TPROC_TRAP), 1); -+ KMEM_ZALLOC (threads, E4_ThreadRegs *, nthreads * sizeof (E4_ThreadRegs), 1); -+ KMEM_ZALLOC (dmas, E4_DMA *, ndmas * sizeof (E4_DMA), 1); -+ KMEM_ZALLOC (msgs, ELAN4_NETERR_MSG *, nmsgs * sizeof (ELAN4_NETERR_MSG), 1); -+ -+ if (dprocs == NULL || eprocs == NULL || tprocs == NULL || dmas == NULL || threads == NULL || msgs == NULL) -+ { -+ if (dprocs != NULL) KMEM_FREE (dprocs, ndproc_traps * sizeof (ELAN4_DPROC_TRAP)); -+ if (eprocs != NULL) KMEM_FREE (eprocs, neproc_traps * sizeof (ELAN4_EPROC_TRAP)); -+ if (tprocs != NULL) KMEM_FREE (tprocs, ntproc_traps * sizeof (ELAN4_TPROC_TRAP)); -+ if (threads != NULL) KMEM_FREE (threads, nthreads * sizeof (E4_ThreadRegs)); -+ if (dmas != NULL) KMEM_FREE (dmas, ndmas * sizeof (E4_DMA)); -+ if (msgs != NULL) KMEM_FREE (msgs, nmsgs * sizeof (ELAN4_NETERR_MSG)); -+ -+ return -ENOMEM; -+ } -+ -+ spin_lock_irqsave (&uctx->uctx_spinlock, flags); -+ -+ uctx->uctx_dprocTraps = dprocs; -+ uctx->uctx_eprocTraps = eprocs; -+ uctx->uctx_tprocTraps = tprocs; -+ uctx->uctx_threads = threads; -+ uctx->uctx_dmas = dmas; -+ uctx->uctx_msgs = msgs; -+ -+ RING_QUEUE_INIT (uctx->uctx_dprocTrapQ, ndproc_traps, 1 /* 1 for 2nd dma */); -+ RING_QUEUE_INIT (uctx->uctx_tprocTrapQ, ntproc_traps, 0); -+ RING_QUEUE_INIT (uctx->uctx_eprocTrapQ, neproc_traps, 5 /* 1 for command, 2 for dma, 2 for inputter */); -+ RING_QUEUE_INIT (uctx->uctx_threadQ, nthreads, 1); -+ RING_QUEUE_INIT (uctx->uctx_dmaQ, ndmas, 1); -+ RING_QUEUE_INIT (uctx->uctx_msgQ, nmsgs, 0); -+ -+ spin_unlock_irqrestore (&uctx->uctx_spinlock, flags); -+ -+ return 0; -+} -+ -+USER_CTXT * -+user_alloc (ELAN4_DEV *dev) -+{ -+ USER_CTXT *uctx; -+ int res; -+ int i; -+ -+ /* Allocate and initialise the context private data */ -+ KMEM_ZALLOC (uctx, USER_CTXT *, sizeof (USER_CTXT), 1); -+ -+ if (uctx == NULL) -+ return ERR_PTR(-ENOMEM); -+ -+ if (elan4_get_position (dev, &uctx->uctx_position) == ELAN_POS_UNKNOWN) -+ { -+ KMEM_FREE (uctx, sizeof (USER_CTXT)); -+ return ERR_PTR(-EAGAIN); -+ } -+ -+ if ((res = elan4_insertctxt (dev, &uctx->uctx_ctxt, &user_trap_ops)) != 0) -+ { -+ KMEM_FREE (uctx, sizeof (USER_CTXT)); -+ return ERR_PTR(res); -+ } -+ -+ if (! user_ioproc_enabled) -+ uctx->uctx_ctxt.ctxt_features |= ELAN4_FEATURE_NO_IOPROC | ELAN4_FEATURE_PIN_DOWN; -+ if (! user_pagefault_enabled) -+ uctx->uctx_ctxt.ctxt_features |= ELAN4_FEATURE_NO_PAGEFAULT; -+ -+ KMEM_GETPAGES (uctx->uctx_upage, ELAN4_USER_PAGE *, btopr (sizeof (ELAN4_USER_PAGE)), 1); -+ if (uctx->uctx_upage == NULL) -+ { -+ elan4_removectxt (dev, &uctx->uctx_ctxt); -+ KMEM_FREE (uctx, sizeof (USER_CTXT)); -+ return ERR_PTR(-ENOMEM); -+ } -+ -+ if ((uctx->uctx_trampoline = elan4_sdram_alloc (dev, SDRAM_PAGE_SIZE)) == 0) -+ { -+ KMEM_FREEPAGES (uctx->uctx_upage, btopr (sizeof (ELAN4_USER_PAGE))); -+ elan4_removectxt (dev, &uctx->uctx_ctxt); -+ -+ KMEM_FREE (uctx, sizeof (USER_CTXT)); -+ return ERR_PTR(-ENOMEM); -+ } -+ -+ if ((uctx->uctx_routetable = elan4_alloc_routetable (dev, 4 /* 512 << 4 == 8192 entries */)) == NULL) -+ { -+ elan4_sdram_free (dev, uctx->uctx_trampoline, SDRAM_PAGE_SIZE); -+ KMEM_FREEPAGES (uctx->uctx_upage, btopr (sizeof (ELAN4_USER_PAGE))); -+ elan4_removectxt (dev, &uctx->uctx_ctxt); -+ -+ KMEM_FREE (uctx, sizeof (USER_CTXT)); -+ return ERR_PTR(-ENOMEM); -+ } -+ -+ elan4_set_routetable (&uctx->uctx_ctxt, uctx->uctx_routetable); -+ -+ /* initialise the trap and swap queues to be really full */ -+ RING_QUEUE_INIT (uctx->uctx_dprocTrapQ, 0, 1); -+ RING_QUEUE_INIT (uctx->uctx_tprocTrapQ, 0, 1); -+ RING_QUEUE_INIT (uctx->uctx_eprocTrapQ, 0, 1); -+ RING_QUEUE_INIT (uctx->uctx_threadQ, 0, 1); -+ RING_QUEUE_INIT (uctx->uctx_dmaQ, 0, 1); -+ -+ INIT_LIST_HEAD (&uctx->uctx_cent_list); -+ INIT_LIST_HEAD (&uctx->uctx_vpseg_list); -+ INIT_LIST_HEAD (&uctx->uctx_cqlist); -+ -+ uctx->uctx_haltop.op_function = user_flush; -+ uctx->uctx_haltop.op_arg = uctx; -+ uctx->uctx_haltop.op_mask = INT_Halted|INT_Discarding; -+ -+ uctx->uctx_dma_flushop.op_function = user_flush_dmas; -+ uctx->uctx_dma_flushop.op_arg = uctx; -+ -+ kmutex_init (&uctx->uctx_vpseg_lock); -+ kmutex_init (&uctx->uctx_cqlock); -+ kmutex_init (&uctx->uctx_rgnmutex); -+ -+ spin_lock_init (&uctx->uctx_spinlock); -+ spin_lock_init (&uctx->uctx_rgnlock); -+ spin_lock_init (&uctx->uctx_fault_lock); -+ -+ kcondvar_init (&uctx->uctx_wait); -+ -+ if ((uctx->uctx_ddcq = user_alloccq (uctx, CQ_Size1K, CQ_EnableAllBits, UCQ_SYSTEM)) == NULL) -+ { -+ user_free (uctx); -+ return ERR_PTR(-ENOMEM); -+ } -+ -+ uctx->uctx_trap_count = 0; -+ uctx->uctx_trap_state = UCTX_TRAP_IDLE; -+ uctx->uctx_status = 0 /* UCTX_DETACHED | UCTX_SWAPPED | UCTX_STOPPED */; -+ -+ init_timer (&uctx->uctx_int_timer); -+ -+ uctx->uctx_int_timer.function = user_signal_timer; -+ uctx->uctx_int_timer.data = (unsigned long) uctx; -+ uctx->uctx_int_start = jiffies; -+ uctx->uctx_int_count = 0; -+ uctx->uctx_int_delay = 0; -+ -+ init_timer (&uctx->uctx_shuffle_timer); -+ -+ uctx->uctx_shuffle_timer.function = user_signal_timer; -+ uctx->uctx_shuffle_timer.data = (unsigned long) uctx; -+ -+ -+ init_timer (&uctx->uctx_neterr_timer); -+ uctx->uctx_neterr_timer.function = user_neterr_timer; -+ uctx->uctx_neterr_timer.data = (unsigned long) uctx; -+ -+ uctx->uctx_upage->upage_ddcq_completed = 0; -+ uctx->uctx_ddcq_completed = 0; -+ uctx->uctx_ddcq_insertcnt = 0; -+ -+ uctx->uctx_num_fault_save = num_fault_save; -+ if (uctx->uctx_num_fault_save) -+ { -+ KMEM_ZALLOC (uctx->uctx_faults, FAULT_SAVE *, (sizeof(FAULT_SAVE) * uctx->uctx_num_fault_save), 1); -+ if ( uctx->uctx_faults == NULL) -+ { -+ user_free (uctx); -+ return ERR_PTR(-ENOMEM); -+ } -+ -+ for (i = 0; i < uctx->uctx_num_fault_save; i++) -+ uctx->uctx_faults[i].next = (i == (uctx->uctx_num_fault_save-1) ? NULL : &uctx->uctx_faults[i+1]); -+ -+ } -+ uctx->uctx_fault_list = uctx->uctx_faults; -+ -+ return (uctx); -+} -+ -+void -+user_free (USER_CTXT *uctx) -+{ -+ ELAN4_DEV *dev = uctx->uctx_ctxt.ctxt_dev; -+ -+ user_swapout (uctx, UCTX_EXITING); -+ -+ /* Detach from all input contexts */ -+ user_detach (uctx, NULL); -+ -+ /* since we're single threaded here - (called from close()) */ -+ /* we don't need to hold the lock to drop the command queues */ -+ /* since they cannot be mapped into user space */ -+ while (! list_empty (&uctx->uctx_cqlist)) -+ user_dropcq (uctx, list_entry (uctx->uctx_cqlist.next, USER_CQ, ucq_link)); -+ -+ /* Free off all of vpseg_list */ -+ kmutex_lock (&uctx->uctx_vpseg_lock); -+ while (! list_empty (&uctx->uctx_vpseg_list)) -+ user_remove_vpseg(uctx, list_entry (uctx->uctx_vpseg_list.next, USER_VPSEG, vps_link)); -+ kmutex_unlock (&uctx->uctx_vpseg_lock); -+ -+ if (timer_pending (&uctx->uctx_int_timer)) -+ del_timer_sync (&uctx->uctx_int_timer); -+ -+ if (timer_pending (&uctx->uctx_shuffle_timer)) -+ del_timer_sync (&uctx->uctx_shuffle_timer); -+ -+ if (timer_pending (&uctx->uctx_neterr_timer)) -+ del_timer_sync (&uctx->uctx_neterr_timer); -+ -+ if (uctx->uctx_dprocTraps) -+ KMEM_FREE (uctx->uctx_dprocTraps, uctx->uctx_dprocTrapQ.q_size * sizeof (ELAN4_DPROC_TRAP)); -+ if (uctx->uctx_tprocTraps) -+ KMEM_FREE (uctx->uctx_tprocTraps, uctx->uctx_tprocTrapQ.q_size * sizeof (ELAN4_TPROC_TRAP)); -+ if (uctx->uctx_eprocTraps) -+ KMEM_FREE (uctx->uctx_eprocTraps, uctx->uctx_eprocTrapQ.q_size * sizeof (ELAN4_EPROC_TRAP)); -+ if (uctx->uctx_dmas) -+ KMEM_FREE (uctx->uctx_dmas, uctx->uctx_dmaQ.q_size * sizeof (E4_DMA)); -+ if (uctx->uctx_msgs) -+ KMEM_FREE (uctx->uctx_msgs, NETERR_MSGS * sizeof (ELAN4_NETERR_MSG)); -+ if (uctx->uctx_threads) -+ KMEM_FREE (uctx->uctx_threads, uctx->uctx_threadQ.q_size * sizeof (E4_ThreadRegs)); -+ if (uctx->uctx_faults) -+ KMEM_FREE (uctx->uctx_faults, (sizeof(FAULT_SAVE) * uctx->uctx_num_fault_save)); -+ -+ if (uctx->uctx_intcookie_table) -+ intcookie_free_table (uctx->uctx_intcookie_table); -+ -+ elan4_set_routetable (&uctx->uctx_ctxt, NULL); -+ elan4_free_routetable (dev, uctx->uctx_routetable); -+ -+ /* Free off all USER_RGNs */ -+ user_freergns(uctx); -+ -+ elan4_sdram_free (dev, uctx->uctx_trampoline, SDRAM_PAGE_SIZE); -+ -+ /* Clear the PG_Reserved bit before free to avoid a memory leak */ -+ ClearPageReserved(pte_page(*find_pte_kernel((unsigned long) uctx->uctx_upage))); -+ KMEM_FREEPAGES (uctx->uctx_upage, btopr (sizeof (ELAN4_USER_PAGE))); -+ -+ elan4_removectxt (dev, &uctx->uctx_ctxt); -+ -+ kcondvar_destroy (&uctx->uctx_wait); -+ -+ spin_lock_destroy (&uctx->uctx_rgnlock); -+ spin_lock_destroy (&uctx->uctx_spinlock); -+ -+ kmutex_destroy (&uctx->uctx_rgnmutex); -+ kmutex_destroy (&uctx->uctx_cqlock); -+ kmutex_destroy (&uctx->uctx_vpseg_lock); -+ -+ KMEM_FREE (uctx, sizeof (USER_CTXT)); -+} -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/elan4/user_ddcq.c linux-2.6.9/drivers/net/qsnet/elan4/user_ddcq.c ---- clean/drivers/net/qsnet/elan4/user_ddcq.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan4/user_ddcq.c 2005-07-20 07:35:36.000000000 -0400 -@@ -0,0 +1,230 @@ -+/* -+ * Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: user_ddcq.c,v 1.16.2.1 2005/07/20 11:35:36 mike Exp $" -+/* $Source: /cvs/master/quadrics/elan4mod/user_ddcq.c,v $*/ -+ -+#include -+ -+#include -+#include -+#include -+#include -+ -+#if PAGE_SIZE < CQ_CommandMappingSize -+# define ELAN4_COMMAND_QUEUE_MAPPING PAGE_SIZE -+#else -+# define ELAN4_COMMAND_QUEUE_MAPPING CQ_CommandMappingSize -+#endif -+ -+/* The user device driver command queue is used for re-issuing -+ * trapped items. It is allocated as a 1K command queue, and -+ * we insert command flow writes event 256 bytes (32 dwords). -+ */ -+#define USER_CTRLFLOW_COUNT 32 -+ -+/* Flow control of the device driver command queue is handled by periodically -+ * inserting dword writes into the command stream. When you need to know -+ * that the queue has been flushed, then you insert an extra contorl flow -+ * write into the command queue. Should the queue not be flushed, but the -+ * trap handler be returning to user space, then it will also insert and -+ * extra interrupt command to ensure that it is re-entered after the queue -+ * has been flushed. -+ * -+ * Note - we account the space for the interrupt command on each control -+ * flow write so that we do not overflow the queue even if we end up -+ * inserting an interrupt for every command flow write. In general only -+ * a single interrupt should get inserted.... -+ */ -+ -+#define user_ddcq_command_write(value,off) do { \ -+ PRINTF(uctx, DBG_DDCQ, "user_ddcq_command_write: cmdptr=%x off=%d value=%llx\n", cmdptr, off, value);\ -+ writeq(value, (void *)(cmdptr + (off << 3))); \ -+} while (0) -+ -+#define user_ddcq_command_space(uctx) \ -+ ((CQ_Size (uctx->uctx_ddcq->ucq_cq->cq_size)>>3) - ((uctx)->uctx_ddcq_insertcnt - (uctx)->uctx_upage->upage_ddcq_completed)) -+ -+#define user_ddcq_command_flow_write(uctx) do { \ -+ E4_uint64 iptr = (uctx)->uctx_ddcq_insertcnt; \ -+ ioaddr_t cmdptr = (uctx)->uctx_ddcq->ucq_cq->cq_mapping + ((iptr<<3) & ((ELAN4_COMMAND_QUEUE_MAPPING >> 1)-1));\ -+\ -+ (uctx)->uctx_ddcq_completed = ((uctx)->uctx_ddcq_insertcnt += 3);\ -+\ -+ PRINTF (uctx, DBG_DDCQ, "user_ddcq_command_flow_write: completed=%llx [%llx] addr=%llx\n", (uctx)->uctx_ddcq_completed, \ -+ (uctx)->uctx_upage->upage_ddcq_completed, (uctx)->uctx_upage_addr); \ -+ user_ddcq_command_write (GUARD_CMD | GUARD_ALL_CHANNELS, 0);\ -+ user_ddcq_command_write (WRITE_DWORD_CMD | (uctx)->uctx_upage_addr, 1);\ -+ user_ddcq_command_write ((uctx)->uctx_ddcq_completed, 2);\ -+} while (0) -+ -+#define user_ddcq_command_flow_intr(uctx) do { \ -+ E4_uint64 iptr = (uctx)->uctx_ddcq_insertcnt; \ -+ ioaddr_t cmdptr = (uctx)->uctx_ddcq->ucq_cq->cq_mapping + ((iptr<<3) & ((ELAN4_COMMAND_QUEUE_MAPPING >> 1)-1));\ -+\ -+ PRINTF (uctx, DBG_DDCQ, "user_ddcq_command_flow_intr: completed=%llx [%llx] addr=%llx\n", (uctx)->uctx_ddcq_completed, \ -+ (uctx)->uctx_upage->upage_ddcq_completed, (uctx)->uctx_upage_addr); \ -+ user_ddcq_command_write (INTERRUPT_CMD | ELAN4_INT_COOKIE_DDCQ, 3);\ -+} while (0) -+ -+#define user_ddcq_command_prologue(uctx, count) do { \ -+ E4_uint64 iptr = (uctx)->uctx_ddcq_insertcnt; \ -+ ioaddr_t cmdptr = (uctx)->uctx_ddcq->ucq_cq->cq_mapping + ((iptr<<3) & ((ELAN4_COMMAND_QUEUE_MAPPING >> 1)-1));\ -+ PRINTF(uctx, DBG_DDCQ, "user_ddcq_command_prologue: iptr=%llx cmdptr=%x\n", iptr, cmdptr); -+ -+#define user_ddcq_command_epilogue(uctx, count, extra) \ -+ (uctx)->uctx_ddcq_insertcnt = iptr + (count);\ -+\ -+ PRINTF(uctx, DBG_DDCQ, "user_ddcq_command_epilogue: iptr=%llx + %x + %x - completed %llx\n", iptr, count, extra, (uctx)->uctx_ddcq_completed);\ -+ if (((iptr) + (count) + (extra)) > ((uctx)->uctx_ddcq_completed + USER_CTRLFLOW_COUNT))\ -+ user_ddcq_command_flow_write(uctx); \ -+} while (0) -+ -+int -+user_ddcq_check (USER_CTXT *uctx, unsigned num) -+{ -+ PRINTF (uctx, DBG_DDCQ, "user_ddcq_check: insert=%llx completed=%llx [%llx] space=%d num=%d\n", -+ uctx->uctx_ddcq_insertcnt, uctx->uctx_ddcq_completed, -+ uctx->uctx_upage->upage_ddcq_completed, -+ user_ddcq_command_space (uctx), -+ num -+ ); -+ -+ /* Ensure that there is enough space for the command we want to issue, -+ * PLUS the guard/writeword for the control flow flush. -+ * PLUS the interrupt command for rescheduling */ -+ if (user_ddcq_command_space (uctx) > (num + 4)) -+ { -+ PRINTF (uctx, DBG_DDCQ, "user_ddcq_check: loads of space\n"); -+ -+ return (1); -+ } -+ -+ PRINTF (uctx, DBG_DDCQ, "user_ddcq_check: not enough space - reschedule\n"); -+ -+ uctx->uctx_trap_state = UCTX_TRAP_SIGNALLED; -+ return (0); -+} -+ -+int -+user_ddcq_flush (USER_CTXT *uctx) -+{ -+ ELAN4_DEV *dev = uctx->uctx_ctxt.ctxt_dev; -+ USER_CQ *ucq = uctx->uctx_ddcq; -+ -+ switch (ucq->ucq_state) -+ { -+ case UCQ_TRAPPED: -+ PRINTF (uctx, DBG_DDCQ, "user_ddcq_flush: command queue is trapped\n"); -+ return (0); -+ -+ case UCQ_NEEDS_RESTART: -+ PRINTF (uctx, DBG_DDCQ, "user_ddcq_flush: restarting command queue\n"); -+ -+ if (UCTX_RUNNABLE (uctx)) -+ { -+ ucq->ucq_state = UCQ_RUNNING; -+ elan4_restartcq (dev, ucq->ucq_cq); -+ } -+ break; -+ } -+ -+ PRINTF (uctx, DBG_DDCQ, "user_ddcq_flush: insertcnt=%llx completed=%llx [%llx]\n", -+ uctx->uctx_ddcq_insertcnt, uctx->uctx_ddcq_completed, uctx->uctx_upage->upage_ddcq_completed); -+ -+ if (uctx->uctx_ddcq_completed != uctx->uctx_ddcq_insertcnt) -+ user_ddcq_command_flow_write (uctx); -+ -+ return (uctx->uctx_ddcq_completed == uctx->uctx_upage->upage_ddcq_completed); -+} -+ -+void -+user_ddcq_intr (USER_CTXT *uctx) -+{ -+ user_ddcq_command_flow_intr (uctx); -+} -+ -+void -+user_ddcq_run_dma (USER_CTXT *uctx, E4_DMA *dma) -+{ -+ PRINTF (uctx, DBG_DDCQ, "user_ddcq_run_dma: cookie=%llx vproc=%llx\n", dma->dma_cookie, dma->dma_vproc); -+ -+ user_ddcq_command_prologue(uctx, 7) { -+ -+ user_ddcq_command_write ((dma->dma_typeSize & ~DMA_ContextMask) | RUN_DMA_CMD, 0); -+ user_ddcq_command_write (dma->dma_cookie, 1); -+ user_ddcq_command_write (dma->dma_vproc, 2); -+ user_ddcq_command_write (dma->dma_srcAddr, 3); -+ user_ddcq_command_write (dma->dma_dstAddr, 4); -+ user_ddcq_command_write (dma->dma_srcEvent, 5); -+ user_ddcq_command_write (dma->dma_dstEvent, 6); -+ -+ } user_ddcq_command_epilogue (uctx, 7, 0); -+} -+ -+void -+user_ddcq_run_thread (USER_CTXT *uctx, E4_ThreadRegs *regs) -+{ -+ PRINTF (uctx, DBG_DDCQ, "user_ddcq_run_thread: PC=%llx SP=%llx\n", regs->Registers[0], regs->Registers[1]); -+ -+ user_ddcq_command_prologue(uctx, 7) { -+ -+ user_ddcq_command_write (regs->Registers[0] | RUN_THREAD_CMD, 0); -+ user_ddcq_command_write (regs->Registers[1], 1); -+ user_ddcq_command_write (regs->Registers[2], 2); -+ user_ddcq_command_write (regs->Registers[3], 3); -+ user_ddcq_command_write (regs->Registers[4], 4); -+ user_ddcq_command_write (regs->Registers[5], 5); -+ user_ddcq_command_write (regs->Registers[6], 6); -+ -+ } user_ddcq_command_epilogue (uctx, 7, 0); -+} -+ -+void -+user_ddcq_setevent (USER_CTXT *uctx, E4_Addr addr) -+{ -+ user_ddcq_command_prologue (uctx, 1) { -+ -+ user_ddcq_command_write (SET_EVENT_CMD | addr, 0); -+ -+ } user_ddcq_command_epilogue (uctx, 1, 0); -+} -+ -+void -+user_ddcq_seteventn (USER_CTXT *uctx, E4_Addr addr, E4_uint32 count) -+{ -+ PRINTF (uctx, DBG_DDCQ, "user_ddcq_seteventn: addr=%llx count=%lx\n", addr, count); -+ -+ user_ddcq_command_prologue (uctx, 2) { -+ -+ user_ddcq_command_write (SET_EVENTN_CMD, 0); -+ user_ddcq_command_write (addr | count, 1); -+ -+ } user_ddcq_command_epilogue (uctx, 2, 0); -+} -+ -+void -+user_ddcq_waitevent (USER_CTXT *uctx, E4_Addr addr, E4_uint64 CountAndType, E4_uint64 Param0, E4_uint64 Param1) -+{ -+ PRINTF (uctx, DBG_DDCQ, "user_ddcq_waitevent: addr=%llx CountAndType=%llx Param=%llx,%llx\n", addr, CountAndType, Param0, Param1); -+ -+ user_ddcq_command_prologue (uctx, 4) { -+ -+ user_ddcq_command_write (WAIT_EVENT_CMD | addr, 0); -+ user_ddcq_command_write (CountAndType, 1); -+ user_ddcq_command_write (Param0, 2); -+ user_ddcq_command_write (Param1, 3); -+ -+ } user_ddcq_command_epilogue (uctx, 4, 0); -+} -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/elan4/user_Linux.c linux-2.6.9/drivers/net/qsnet/elan4/user_Linux.c ---- clean/drivers/net/qsnet/elan4/user_Linux.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/elan4/user_Linux.c 2005-06-09 10:46:55.000000000 -0400 -@@ -0,0 +1,349 @@ -+/* -+ * Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: user_Linux.c,v 1.35.2.1 2005/06/09 14:46:55 addy Exp $" -+/* $Source: /cvs/master/quadrics/elan4mod/user_Linux.c,v $*/ -+ -+#include -+#include -+ -+#include -+#include -+ -+#ifdef CONFIG_HUGETLB_PAGE -+#include -+#endif -+ -+#include -+#include -+#include -+ -+extern struct vm_operations_struct mem_vm_ops; -+extern struct vm_operations_struct user_vm_ops; -+extern int mem_pteload (struct vm_area_struct *vma, unsigned long pgoff, ELAN4_CTXT *ctxt, E4_Addr eaddr, int perm); -+extern int user_pteload (struct vm_area_struct *vma, unsigned long pgoff, ELAN4_CTXT *ctxt, E4_Addr eaddr, int perm); -+ -+static inline int -+user_load_page (USER_CTXT *uctx, struct vm_area_struct *vma, unsigned long maddr, E4_Addr eaddr, int perm, int writeable) -+{ -+ ELAN4_DEV *dev = uctx->uctx_ctxt.ctxt_dev; -+ struct page *page = NULL; -+ int i, res = 0; -+ -+ if (get_user_pages (current, current->mm, maddr, 1, writeable, 0, &page, NULL) == 1) -+ { -+ /* NOTE - the page can't be paged out since we've pinned it down. -+ * it also can't be munmap'd since we hold the mmap_sem */ -+ -+ PRINTF (uctx, DBG_FAULT, "user_load_page: %lx %s page %p\n", maddr, writeable ? "writeable" : "readonly", page); -+ -+ for (i = 0; i < PAGE_SIZE; i += (1 << dev->dev_pageshift[0])) -+ if ((res = elan4mmu_pteload_page (&uctx->uctx_ctxt, 0, eaddr + i, page, perm)) < 0) -+ break; -+ -+ page_cache_release (page); -+ } -+ else -+ { -+ if (vma && vma->vm_ops == &mem_vm_ops) -+ res = mem_pteload (vma, maddr, &uctx->uctx_ctxt, eaddr, perm); -+ else if (vma && vma->vm_ops == &user_vm_ops) -+ res = user_pteload (vma, maddr, &uctx->uctx_ctxt, eaddr, perm); -+ else -+ res = -EINVAL; -+ } -+ -+ return res; -+} -+ -+int -+user_load_range (USER_CTXT *uctx, E4_Addr eaddr, unsigned long nbytes, E4_uint32 fsr) -+{ -+ struct mm_struct *mm = current->mm; -+ int writeable = (AT_Perm(fsr) == AT_PermLocalDataWrite || -+ AT_Perm(fsr) == AT_PermRemoteWrite || -+ AT_Perm(fsr) == AT_PermLocalEvent || -+ AT_Perm(fsr) == AT_PermRemoteEvent); -+ struct vm_area_struct *vma; -+ int perm; -+ unsigned long len; -+ unsigned long maddr; -+ int res = 0; -+ -+ kmutex_lock (&uctx->uctx_rgnmutex); -+ -+ while (nbytes > 0) -+ { -+ USER_RGN *rgn = user_rgnat_elan (uctx, eaddr); -+ -+ if (rgn == NULL || ELAN4_INCOMPAT_ACCESS (rgn->rgn_perm, AT_Perm (fsr))) -+ { -+ PRINTF (uctx, DBG_FAULT, "user_load_range: eaddr=%llx -> %s\n", eaddr, rgn == NULL ? "no mapping" : "no permission"); -+ -+ kmutex_unlock (&uctx->uctx_rgnmutex); -+ return (rgn == NULL ? -EFAULT : -EPERM); -+ } -+ -+ if (writeable) -+ perm = rgn->rgn_perm; -+ else if (AT_Perm(fsr) == AT_PermExecute) -+ perm = PERM_LocRead | (rgn->rgn_perm & ~PERM_Mask); -+ else -+ perm = ELAN4_PERM_READONLY (rgn->rgn_perm & PERM_Mask) | (rgn->rgn_perm & ~PERM_Mask); -+ -+ PRINTF (uctx, DBG_FAULT, "user_load_range: rgn=%p [%llx.%lx.%x]\n", rgn, rgn->rgn_ebase, rgn->rgn_mbase, rgn->rgn_len); -+ -+ len = ((rgn->rgn_ebase + rgn->rgn_len) - eaddr); -+ if (len > nbytes) -+ len = nbytes; -+ nbytes -= len; -+ -+ maddr = rgn->rgn_mbase + (eaddr - rgn->rgn_ebase); -+ -+ PRINTF (uctx, DBG_FAULT, "user_load_range: eaddr=%llx->%llx -> %lx->%lx len=%x perm=%x\n", eaddr, -+ eaddr + len, maddr, maddr + len, len, perm); -+ -+ down_read (&mm->mmap_sem); -+ while (len > 0) -+ { -+#if defined(conditional_schedule) -+ conditional_schedule(); -+#endif -+ if ((vma = find_vma_intersection (mm, maddr, maddr + PAGE_SIZE)) == NULL || -+ (writeable && !(vma->vm_flags & VM_WRITE))) -+ { -+ PRINTF (DBG_USER, DBG_FAULT, "user_load_range: %s %lx\n", vma ? "not writeble at" : "no vma for", maddr); -+ up_read (&mm->mmap_sem); -+ kmutex_unlock (&uctx->uctx_rgnmutex); -+ return (-EFAULT); -+ } -+ -+ if ((res = user_load_page (uctx, vma, maddr, eaddr, perm, writeable)) < 0) -+ { -+ PRINTF (DBG_USER, DBG_FAULT, "user_load_range: failed to load page res=%d\n", res); -+ up_read (&mm->mmap_sem); -+ kmutex_unlock (&uctx->uctx_rgnmutex); -+ return res; -+ } -+ -+ eaddr += PAGE_SIZE; -+ maddr += PAGE_SIZE; -+ len -= PAGE_SIZE; -+ } -+ up_read (&mm->mmap_sem); -+ } -+ kmutex_unlock (&uctx->uctx_rgnmutex); -+ -+ PRINTF (uctx, DBG_FAULT, "user_load_range: alldone\n"); -+ -+ return (0); -+} -+ -+void -+user_preload_main (USER_CTXT *uctx, virtaddr_t addr, unsigned long len) -+{ -+ virtaddr_t lim = addr + len - 1; -+ struct vm_area_struct *vma; -+ -+ down_read (¤t->mm->mmap_sem); -+ -+ if ((vma = find_vma (current->mm, addr)) != NULL) -+ { -+ do { -+ unsigned long start = vma->vm_start; -+ unsigned long end = vma->vm_end; -+ -+ if ((start-1) >= lim) -+ break; -+ -+ if (start < addr) start = addr; -+ if ((end-1) > lim) end = lim+1; -+ -+ if (vma->vm_flags & VM_IO) -+ continue; -+ -+ user_unload_main (uctx, start, end - start); -+ -+ if (get_user_pages (current, current->mm, start, (end - start)/PAGE_SIZE, -+ (vma->vm_flags & VM_WRITE) != 0, 0, NULL, NULL) > 0) -+ user_update_main (uctx, vma->vm_mm, vma, start, end - start); -+ -+ else if (vma->vm_ops == &mem_vm_ops) -+ user_update_main (uctx, vma->vm_mm, vma, start, end - start); -+ else if (vma->vm_ops == &user_vm_ops) -+ user_update_main (uctx, vma->vm_mm, vma, start, end - start); -+ -+ } while ((vma = find_vma (current->mm, vma->vm_end)) != NULL); -+ } -+ up_read (¤t->mm->mmap_sem); -+} -+ -+static void -+user_update_range (USER_CTXT *uctx, int tbl, struct mm_struct *mm, struct vm_area_struct *vma, virtaddr_t maddr, E4_Addr eaddr, unsigned long len, int perm) -+{ -+ ELAN4_DEV *dev = uctx->uctx_ctxt.ctxt_dev; -+ int roperm = ELAN4_PERM_READONLY(perm & PERM_Mask) | (perm & ~PERM_Mask); -+ int i, write; -+ pte_t *ptep; -+ struct page *page; -+ -+ if (vma && vma->vm_ops == &mem_vm_ops) -+ { -+ mem_pteload (vma, maddr, &uctx->uctx_ctxt, eaddr, perm); -+ return; -+ } -+ -+ if (vma && vma->vm_ops == &user_vm_ops) -+ { -+ user_pteload (vma, maddr, &uctx->uctx_ctxt, eaddr, perm); -+ return; -+ } -+ -+#ifdef CONFIG_HUGETLB_PAGE -+ /* If the kernel has hugetlb support compiled in, then -+ * we can't walk the pte's unless we know for sure that -+ * they're normal ptes. */ -+ -+ if (vma == NULL || is_vm_hugetlb_page (vma)) -+ return; -+#endif -+ -+ while (len > 0) -+ { -+ if ((ptep = find_pte_map (mm, maddr)) != NULL) -+ { -+ write = (pte_write(*ptep) && pte_dirty(*ptep)); -+ -+#if LINUX_VERSION_CODE < KERNEL_VERSION (2, 5, 0) -+ page = pte_page (*ptep); -+ if (! VALID_PAGE (page)) -+ page = NULL; -+#else -+ { -+ unsigned long pfn; -+ -+ pfn = pte_pfn (*ptep); -+ page = pfn_valid (pfn) ? pfn_to_page (pfn) : NULL; -+ } -+#endif -+ pte_unmap (ptep); -+ -+ PRINTF (uctx, DBG_IOPROC, "user_update_range: maddr=%lx eaddr=%llx -> page %p %lx %s\n", maddr, eaddr, page, page_to_pfn (page), write ? "writeable" : "read-only"); -+ -+ if (page != NULL) -+ for (i = 0; i < PAGE_SIZE; i += (1 << dev->dev_pageshift[tbl])) -+ elan4mmu_pteload_page (&uctx->uctx_ctxt, tbl, eaddr + i, page, write ? perm : roperm); -+ } -+ -+ eaddr += PAGE_SIZE; -+ maddr += PAGE_SIZE; -+ len -= PAGE_SIZE; -+ } -+} -+ -+void -+user_update_main (USER_CTXT *uctx, struct mm_struct *mm, struct vm_area_struct *vma, virtaddr_t start, unsigned long len) -+{ -+ USER_RGN *rgn; -+ unsigned long ssize; -+ virtaddr_t end = start + len - 1; -+ -+ spin_lock (&uctx->uctx_rgnlock); -+ -+ PRINTF (uctx, DBG_IOPROC, "user_update_main: start=%lx end=%lx\n", start, end); -+ -+ for (rgn = user_findrgn_main (uctx, start, 0); rgn != NULL; rgn = rgn->rgn_mnext) -+ { -+ if (end < rgn->rgn_mbase) -+ break; -+ -+ if (start <= rgn->rgn_mbase && end >= (rgn->rgn_mbase + rgn->rgn_len - 1)) -+ { -+ PRINTF (uctx, DBG_IOPROC, "user_update_main: whole %lx -> %lx\n", rgn->rgn_mbase, rgn->rgn_mbase + rgn->rgn_len - 1); -+ -+ user_update_range (uctx, 0 /* tbl */, mm, vma, rgn->rgn_mbase, rgn->rgn_ebase, rgn->rgn_len, rgn->rgn_perm); -+ } -+ else if (start <= rgn->rgn_mbase) -+ { -+ ssize = end - rgn->rgn_mbase + 1; -+ -+ PRINTF (uctx, DBG_IOPROC, "user_update_main: start %lx -> %lx\n", rgn->rgn_mbase, rgn->rgn_mbase + ssize); -+ -+ user_update_range (uctx, 0 /* tbl */, mm, vma, rgn->rgn_mbase, rgn->rgn_ebase, ssize, rgn->rgn_perm); -+ } -+ else if (end >= (rgn->rgn_mbase + rgn->rgn_len - 1)) -+ { -+ ssize = (rgn->rgn_mbase + rgn->rgn_len) - start; -+ -+ PRINTF (uctx, DBG_IOPROC, "user_update_main: end %lx -> %lx\n", start, start + ssize); -+ -+ user_update_range (uctx, 0 /* tbl */, mm, vma, start, rgn->rgn_ebase + (start - rgn->rgn_mbase), ssize, rgn->rgn_perm); -+ } -+ else -+ { -+ PRINTF (uctx, DBG_IOPROC, "user_update_main: middle %lx -> %lx\n", start, end); -+ -+ user_update_range (uctx, 0 /* tbl */, mm, vma, start, rgn->rgn_ebase + (start - rgn->rgn_mbase), len, rgn->rgn_perm); -+ } -+ } -+ spin_unlock (&uctx->uctx_rgnlock); -+} -+ -+void -+user_unload_main (USER_CTXT *uctx, virtaddr_t start, unsigned long len) -+{ -+ USER_RGN *rgn; -+ unsigned long ssize; -+ virtaddr_t end = start + len - 1; -+ -+ spin_lock (&uctx->uctx_rgnlock); -+ -+ PRINTF (uctx, DBG_IOPROC, "user_unload_main: start=%lx end=%lx\n", start, end); -+ -+ for (rgn = user_findrgn_main (uctx, start, 0); rgn != NULL; rgn = rgn->rgn_mnext) -+ { -+ if (end < rgn->rgn_mbase) -+ break; -+ -+ if (start <= rgn->rgn_mbase && end >= (rgn->rgn_mbase + rgn->rgn_len - 1)) -+ { -+ PRINTF (uctx, DBG_IOPROC, "user_unload_main: whole %lx -> %lx\n", rgn->rgn_mbase, rgn->rgn_mbase + rgn->rgn_len - 1); -+ -+ elan4mmu_unload_range (&uctx->uctx_ctxt, 0 /* tbl */, rgn->rgn_ebase, rgn->rgn_len); -+ } -+ else if (start <= rgn->rgn_mbase) -+ { -+ ssize = end - rgn->rgn_mbase + 1; -+ -+ PRINTF (uctx, DBG_IOPROC, "user_unload_main: start %lx -> %lx\n", rgn->rgn_mbase, rgn->rgn_mbase + ssize); -+ -+ elan4mmu_unload_range (&uctx->uctx_ctxt, 0 /* tbl */, rgn->rgn_ebase, ssize); -+ } -+ else if (end >= (rgn->rgn_mbase + rgn->rgn_len - 1)) -+ { -+ ssize = (rgn->rgn_mbase + rgn->rgn_len) - start; -+ -+ PRINTF (uctx, DBG_IOPROC, "user_unload_main: end %lx -> %lx\n", start, start + ssize); -+ -+ elan4mmu_unload_range (&uctx->uctx_ctxt, 0 /* tbl */, rgn->rgn_ebase + (start - rgn->rgn_mbase), ssize); -+ } -+ else -+ { -+ -+ PRINTF (uctx, DBG_IOPROC, "user_unload_main: middle %lx -> %lx\n", start, end); -+ -+ elan4mmu_unload_range (&uctx->uctx_ctxt, 0 /* tbl */, rgn->rgn_ebase + (start - rgn->rgn_mbase), len); -+ } -+ } -+ spin_unlock (&uctx->uctx_rgnlock); -+} -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/ep/asm_elan4_thread.S linux-2.6.9/drivers/net/qsnet/ep/asm_elan4_thread.S ---- clean/drivers/net/qsnet/ep/asm_elan4_thread.S 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/asm_elan4_thread.S 2003-09-23 09:55:11.000000000 -0400 -@@ -0,0 +1,78 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: asm_elan4_thread.S,v 1.1 2003/09/23 13:55:11 david Exp $ $Name: QSNETMODULES-5-11-3_20050907 $" -+/* $Source: /cvs/master/quadrics/epmod/asm_elan4_thread.S,v $*/ -+ -+#include -+#include -+ -+/* -+ * c_reschedule (E4_uint64 *commandport) -+ */ -+ .global c_reschedule -+c_reschedule: -+ add %sp, -128, %sp -+ st64 %r16, [%sp] // preserve call preserved registers -+ st64 %r24, [%sp + 64] // - see CALL_USED_REGISTERS. -+ mov %r16,%r16 // BUG FIX: E4 RevA -+ mov %r24,%r24 // BUG FIX: E4 RevA -+ nop // BUG FIX: E4 RevA -+ nop // BUG FIX: E4 RevA -+ -+ mov %r7, %r18 // (%r2) return pc -+1: call 2f -+ mov %sp, %r17 // (%r1) SP -+2: add %r7, (3f-1b), %r16 // (%r0) PC -+ mov NOP_CMD, %r23 // "nop" command -+ st64suspend %r16, [%r8] -+3: ld64 [%sp], %r16 -+ ld64 [%sp + 64], %r24 // restore call preserved register -+ jmpl %r2+8, %r0 // and return -+ add %sp, 128, %sp -+ -+ -+/* -+ * c_waitevent (E4_uint64 *commandport, E4_Event *event, E4_uint64 count) -+ */ -+ .global c_waitevent -+c_waitevent: -+ add %sp, -192, %sp -+ st64 %r16, [%sp + 64] // preserve call preserved registers -+ st64 %r24, [%sp + 128] // - see CALL_USED_REGISTERS. -+ mov %r16,%r16 // BUG FIX: E4 RevA -+ mov %r24,%r24 // BUG FIX: E4 RevA -+ nop // BUG FIX: E4 RevA -+ nop // BUG FIX: E4 RevA -+ -+ mov %r7, %r18 // (%r2) return pc -+1: call 2f -+ mov %sp, %r17 // (%r1) SP -+2: add %r7, (3f-1b), %r16 // (%r0) PC -+ st32 %r16, [%sp] // event source block -+ mov MAKE_EXT_CLEAN_CMD, %r23 // "flush command queue desc" command -+ st8 %r23, [%sp+56] // event source block -+ mov %r16,%r16 // BUG FIX: E4 RevA -+ mov %r23,%r23 // BUG FIX: E4 RevA -+ nop // BUG FIX: E4 RevA -+ nop // BUG FIX: E4 RevA -+ -+ -+ or %r9, WAIT_EVENT_CMD, %r16 -+ sll8 %r10, 32, %r17 -+ or %r17, E4_EVENT_TYPE_VALUE(E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, 8), %r17 -+ mov %sp, %r18 -+ mov %r8, %r19 -+ -+ st32suspend %r16, [%r8] -+ -+3: ld64 [%sp + 64], %r16 // restore call preserved register -+ ld64 [%sp + 128], %r24 -+ jmpl %r2+8, %r0 // and return -+ add %sp, 192, %sp -+ -diff -urN clean/drivers/net/qsnet/ep/assym_elan4.h linux-2.6.9/drivers/net/qsnet/ep/assym_elan4.h ---- clean/drivers/net/qsnet/ep/assym_elan4.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/assym_elan4.h 2005-09-07 10:39:44.000000000 -0400 -@@ -0,0 +1,20 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: genassym_elan4.c,v 1.3 2004/04/25 11:26:07 david Exp $ $Name: QSNETMODULES-5-11-3_20050907 $" -+/* $Source: /cvs/master/quadrics/epmod/genassym_elan4.c,v $*/ -+ -+/* Generated by genassym_elan4 - do not modify */ -+ -+#define EP4_RCVR_THREAD_STALL 0 -+#define EP4_RCVR_PENDING_TAILP 128 -+#define EP4_RCVR_PENDING_HEAD 136 -+#define EP4_RCVR_DEBUG 176 -+#define EP4_RXD_NEXT 664 -+#define EP4_RXD_QUEUED 728 -+#define EP4_RXD_DEBUG 944 -diff -urN clean/drivers/net/qsnet/ep/cm.c linux-2.6.9/drivers/net/qsnet/ep/cm.c ---- clean/drivers/net/qsnet/ep/cm.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/cm.c 2005-05-24 05:19:12.000000000 -0400 -@@ -0,0 +1,2835 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: cm.c,v 1.90 2005/05/24 09:19:12 david Exp $" -+/* $Source: /cvs/master/quadrics/epmod/cm.c,v $ */ -+ -+#include -+ -+#include -+ -+#include "kcomm_vp.h" -+#include "debug.h" -+#include "cm.h" -+#include -+ -+#include -+ -+#if defined(LINUX) -+#include "conf_linux.h" -+#endif -+ -+int BranchingRatios[CM_MAX_LEVELS]; -+ -+int MachineId = -1; -+int BrokenLevel = -1; /* Simulates Broken Network */ -+int RejoinCheck = 1; -+int RejoinPanic = 0; -+ -+static int -+SegmentNo (CM_RAIL *cmRail, u_int nodeid, u_int lvl) -+{ -+ int i; -+ -+ ASSERT (lvl < cmRail->NumLevels); -+ -+ for (i = 0; i < lvl; i++) -+ nodeid /= cmRail->Levels[i].NumSegs; -+ -+ return (nodeid % cmRail->Levels[lvl].NumSegs); -+} -+ -+static int -+ClusterIds (CM_RAIL *cmRail, int clvl, int *clmin, int *clmax) -+{ -+ int clid = cmRail->Rail->Position.pos_nodeid - cmRail->Levels[clvl].MinNodeId; -+ -+ if (clvl == 0) -+ *clmin = *clmax = clid; -+ else -+ { -+ *clmin = cmRail->Levels[clvl - 1].MinNodeId - cmRail->Levels[clvl].MinNodeId; -+ *clmax = *clmin + cmRail->Levels[clvl - 1].NumNodes - 1; -+ } -+ return (clid); -+} -+ -+static void -+__Schedule_Timer (CM_RAIL *cmRail, long tick) -+{ -+ if (! timer_pending (&cmRail->HeartbeatTimer) || AFTER (cmRail->NextRunTime, tick)) -+ { -+ cmRail->NextRunTime = tick; -+ -+ mod_timer (&cmRail->HeartbeatTimer, tick); -+ } -+} -+ -+static void -+__Schedule_Discovery (CM_RAIL *cmRail) /* we urgently need to schedule discovery */ -+{ -+ __Schedule_Timer (cmRail, cmRail->NextDiscoverTime = lbolt); -+} -+ -+static int -+MsgBusy (CM_RAIL *cmRail, int msgNumber) -+{ -+ switch (ep_outputq_state (cmRail->Rail, cmRail->MsgQueue, msgNumber)) -+ { -+ case EP_OUTPUTQ_BUSY: /* still busy */ -+ return 1; -+ -+ case EP_OUTPUTQ_FAILED: /* NACKed */ -+ { -+#if defined(DEBUG_PRINTF) -+ CM_MSG *msg = ep_outputq_msg (cmRail->Rail, cmRail->MsgQueue, msgNumber); -+ uint8_t type = msg->Hdr.Type; -+ uint16_t nmaps = msg->Hdr.NumMaps; -+ int16_t off = msg->Payload.Statemaps[CM_MSG_MAP(0)].offset; -+ -+ CPRINTF4 (((type == CM_MSG_TYPE_DISCOVER_LEADER) || (type == CM_MSG_TYPE_DISCOVER_SUBORDINATE)) ? 6 : 3, /* we expect broadcasts to be NACKed */ -+ "%s: msg %d type %d failed%s\n", cmRail->Rail->Name, msgNumber, type, -+ (type != CM_MSG_TYPE_HEARTBEAT) ? "" : nmaps == 0 ? ": null heartbeat" : -+ off == STATEMAP_RESET ? ": heartbeat with R statemaps" : ": heartbeat with statemaps"); -+#endif -+ return 0; -+ } -+ -+ case EP_OUTPUTQ_FINISHED: -+ return 0; -+ -+ default: -+ panic ("MsgBusy - bad return code from ep_outputq_state\n"); -+ /* NOTREACHED */ -+ } -+ return 0; -+} -+ -+static void -+LaunchMessage (CM_RAIL *cmRail, int msgNumber, int vp, int qnum, int retries, int type, int lvl, int nmaps) -+{ -+ CM_MSG *msg = ep_outputq_msg (cmRail->Rail, cmRail->MsgQueue, msgNumber); -+ CM_HDR *hdr = &msg->Hdr; -+ -+ ASSERT (nmaps >= 0 && nmaps <= CM_MSG_MAXMAPS); -+ ASSERT (SPINLOCK_HELD (&cmRail->Lock)); -+ -+ hdr->Version = CM_MSG_VERSION; -+ hdr->ParamHash = cmRail->ParamHash; -+ hdr->Timestamp = cmRail->Timestamp; -+ hdr->Checksum = 0; -+ hdr->NodeId = cmRail->Rail->Position.pos_nodeid; -+ hdr->MachineId = MachineId; -+ hdr->NumMaps = nmaps; -+ hdr->Level = lvl; -+ hdr->Type = type; -+ hdr->Checksum = CheckSum ((char *)msg + CM_MSG_BASE(nmaps), CM_MSG_SIZE(nmaps)); -+ -+ if (BrokenLevel != -1 && (lvl >= ((BrokenLevel >> (cmRail->Rail->Number*4)) & 0xf))) /* Simulate broken network? */ -+ return; -+ -+ if (ep_outputq_send (cmRail->Rail, cmRail->MsgQueue, msgNumber, -+ CM_MSG_SIZE(nmaps), vp, qnum, retries)); -+ IncrStat (cmRail, LaunchMessageFail); -+} -+ -+static int -+SendMessage (CM_RAIL *cmRail, int nodeId, int lvl, int type) -+{ -+ int msgNumber = CM_NUM_NODE_MSG_BUFFERS + cmRail->NextSpareMsg; -+ int n = CM_NUM_SPARE_MSG_BUFFERS; -+ int retries; -+ -+ ASSERT (type == CM_MSG_TYPE_IMCOMING || /* other types must use SendToSgmt */ -+ type == CM_MSG_TYPE_REJOIN); -+ -+ while (n-- > 0 && MsgBusy (cmRail, msgNumber)) /* search for idle "spare" buffer */ -+ { -+ if (++(cmRail->NextSpareMsg) == CM_NUM_SPARE_MSG_BUFFERS) -+ cmRail->NextSpareMsg = 0; -+ -+ msgNumber = CM_NUM_NODE_MSG_BUFFERS + cmRail->NextSpareMsg; -+ } -+ -+ if (n == 0) /* all "spare" message buffers busy */ -+ { -+ CPRINTF3 (3, "%s: all spare message buffers busy: trying to send type %d to %d\n", -+ cmRail->Rail->Name, type, nodeId); -+ return (0); -+ } -+ -+ /* NB IMCOMING may be echoed by MANY nodes, so we don't (and musn't) have any retries */ -+ retries = (type == CM_MSG_TYPE_IMCOMING) ? 0 : CM_P2P_DMA_RETRIES; -+ -+ LaunchMessage (cmRail, msgNumber, EP_VP_NODE (nodeId), EP_SYSTEMQ_INTR, /* eager receive */ -+ retries, type, lvl, 0); -+ -+ if (++(cmRail->NextSpareMsg) == CM_NUM_SPARE_MSG_BUFFERS) /* check this one last next time */ -+ cmRail->NextSpareMsg = 0; -+ -+ return (1); -+} -+ -+static int -+SendToSgmt (CM_RAIL *cmRail, CM_SGMT *sgmt, int type) -+{ -+ bitmap_t seg; -+ int offset; -+ int nmaps; -+ int sidx; -+ int clvl; -+ -+ ASSERT (sgmt->Level <= cmRail->TopLevel); -+ -+ if (MsgBusy (cmRail, sgmt->MsgNumber)) /* previous message still busy */ -+ { -+ CPRINTF3 (3, "%s: node message buffer busy: trying to send type %d to %d\n", -+ cmRail->Rail->Name, type, sgmt->NodeId); -+ -+ return (0); -+ } -+ -+ switch (type) -+ { -+ case CM_MSG_TYPE_RESOLVE_LEADER: -+ case CM_MSG_TYPE_DISCOVER_LEADER: -+ ASSERT (sgmt->State == CM_SGMT_ABSENT); -+ ASSERT (sgmt->Level == ((cmRail->Role == CM_ROLE_LEADER_CANDIDATE) ? cmRail->TopLevel : cmRail->TopLevel - 1)); -+ ASSERT (sgmt->Level < cmRail->NumLevels); -+ ASSERT (sgmt->Sgmt == cmRail->Levels[sgmt->Level].MySgmt); -+ -+ /* broadcast to me and all my peers at this level (== my segment in the level above) */ -+ sidx = (sgmt->Level == cmRail->NumLevels - 1) ? 0 : cmRail->Levels[sgmt->Level + 1].MySgmt; -+ -+ LaunchMessage (cmRail, sgmt->MsgNumber, EP_VP_BCAST (sgmt->Level + 1, sidx), -+ EP_SYSTEMQ_INTR, 0, /* eager rx; no retries */ -+ type, sgmt->Level, 0); -+ return (1); -+ -+ case CM_MSG_TYPE_DISCOVER_SUBORDINATE: -+ ASSERT (sgmt->Sgmt != cmRail->Levels[sgmt->Level].MySgmt); -+ ASSERT (sgmt->State == CM_SGMT_WAITING); -+ ASSERT (sgmt->Level > 0); /* broadcasting just to subtree */ -+ -+ LaunchMessage (cmRail, sgmt->MsgNumber, EP_VP_BCAST (sgmt->Level, sgmt->Sgmt), -+ EP_SYSTEMQ_INTR, 0, /* eager rx; no retries */ -+ CM_MSG_TYPE_DISCOVER_SUBORDINATE, sgmt->Level, 0); -+ return (1); -+ -+ case CM_MSG_TYPE_NOTIFY: -+ ASSERT (sgmt->State == CM_SGMT_PRESENT); -+ -+ LaunchMessage (cmRail, sgmt->MsgNumber, EP_VP_NODE (sgmt->NodeId), -+ EP_SYSTEMQ_INTR, CM_P2P_DMA_RETRIES, /* eager rx; lots of retries */ -+ CM_MSG_TYPE_NOTIFY, sgmt->Level, 0); -+ return (1); -+ -+ case CM_MSG_TYPE_HEARTBEAT: -+ { -+ CM_MSG *msg = ep_outputq_msg (cmRail->Rail, cmRail->MsgQueue, sgmt->MsgNumber); -+ CM_HDR *hdr = &msg->Hdr; -+ -+ ASSERT (sgmt->State == CM_SGMT_PRESENT); -+ -+ hdr->AckSeq = sgmt->AckSeq; -+ -+ if (!sgmt->MsgAcked) /* Current message not acknowledged */ -+ { -+ /* must have been something significant to require an ack */ -+ ASSERT (sgmt->SendMaps); -+ ASSERT (sgmt->NumMaps > 0); -+ -+ CPRINTF3 (3, "%s: retrying heartbeat to %d (%d entries)\n", cmRail->Rail->Name, sgmt->NodeId, sgmt->NumMaps); -+ -+ IncrStat (cmRail, RetryHeartbeat); -+ -+ nmaps = sgmt->NumMaps; -+ } -+ else -+ { -+ nmaps = 0; -+ -+ if (sgmt->SendMaps) /* can send maps */ -+ { -+ for (clvl = sgmt->Level; clvl < cmRail->NumLevels; clvl++) -+ { -+ if (!sgmt->Maps[clvl].OutputMapValid) -+ continue; -+ -+ while ((offset = statemap_findchange (sgmt->Maps[clvl].OutputMap, &seg, 1)) >= 0) -+ { -+ CM_STATEMAP_ENTRY *map = &msg->Payload.Statemaps[CM_MSG_MAP(nmaps)]; -+ -+ sgmt->Maps[clvl].SentChanges = 1; -+ -+ map->level = clvl; -+ map->offset = offset; -+ map->seg[0] = seg & 0xffff; -+ map->seg[1] = (seg >> 16) & 0xffff; -+#if (BT_ULSHIFT == 6) -+ map->seg[2] = (seg >> 32) & 0xffff; -+ map->seg[3] = (seg >> 48) & 0xffff; -+#elif (BT_ULSHIFT != 5) -+#error "Bad value for BT_ULSHIFT" -+#endif -+ if (++nmaps == CM_MSG_MAXMAPS) -+ goto msg_full; -+ } -+ -+ if (sgmt->Maps[clvl].SentChanges) -+ { -+ CM_STATEMAP_ENTRY *map = &msg->Payload.Statemaps[CM_MSG_MAP(nmaps)]; -+ -+ sgmt->Maps[clvl].SentChanges = 0; -+ -+ map->level = clvl; -+ map->offset = STATEMAP_NOMORECHANGES; -+ -+ if (++nmaps == CM_MSG_MAXMAPS) -+ goto msg_full; -+ } -+ } -+ } -+ -+ ASSERT (nmaps < CM_MSG_MAXMAPS); -+ -+ msg_full: -+ sgmt->NumMaps = nmaps; /* remember how many incase we retry */ -+ -+ if (nmaps == 0) /* no changes to send */ -+ hdr->Seq = sgmt->MsgSeq; /* this one can be dropped */ -+ else -+ { -+ hdr->Seq = ++(sgmt->MsgSeq); /* on to next message number */ -+ sgmt->MsgAcked = 0; /* need this one to be acked before I can send another */ -+ -+ IncrStat (cmRail, MapChangesSent); -+ } -+ } -+ -+ LaunchMessage (cmRail, sgmt->MsgNumber, EP_VP_NODE (sgmt->NodeId), -+ EP_SYSTEMQ_POLLED, CM_P2P_DMA_RETRIES, /* polled receive, lots of retries */ -+ CM_MSG_TYPE_HEARTBEAT, sgmt->Level, nmaps); -+ -+ IncrStat (cmRail, HeartbeatsSent); -+ -+ return (1); -+ } -+ -+ default: /* other types must use SendMessage */ -+ printk ("SendToSgmt: invalid type %d\n", type); -+ ASSERT (0); -+ -+ return (1); -+ } -+} -+ -+static char * -+GlobalStatusString (statemap_t *map, int idx) -+{ -+ char *strings[] = {"....", "S...", "C...", "R...", -+ ".s..", "Ss..", "Cs..", "Rs..", -+ "..r.", "S.r.", "C.r.", "R.r.", -+ ".sr.", "Ssr.", "Csr.", "Rsr.", -+ "...R", "S..R", "C..R", "R..R", -+ ".s.R", "Ss.R", "Cs.R", "Rs.R", -+ "..rR", "S.rR", "C.rR", "R.rR", -+ ".srR", "SsrR", "CsrR", "RsrR"}; -+ -+ return (strings[statemap_getbits (map, idx * CM_GSTATUS_BITS, CM_GSTATUS_BITS)]); -+} -+ -+static char * -+MapString (char *name, statemap_t *map, int nnodes, char *trailer) -+{ -+ static char *space; -+ int i; -+ -+ if (space == NULL) -+ KMEM_ALLOC (space, char *, EP_MAX_NODES*(CM_GSTATUS_BITS+1), 0); -+ -+ if (space == NULL) -+ return (""); -+ else -+ { -+ char *ptr = space; -+ -+ sprintf (space, "%s ", name); ptr += strlen (ptr); -+ for (i = 0; i < nnodes; i++, ptr += strlen (ptr)) -+ sprintf (ptr, "%s%s", i == 0 ? "" : ",", GlobalStatusString (map, i)); -+ sprintf (ptr, " %s", trailer); -+ return (space); -+ } -+} -+ -+void -+DisplayMap (DisplayInfo *di, CM_RAIL *cmRail, char *name, statemap_t *map, int nnodes, char *trailer) -+{ -+ char linebuf[256]; -+ char *ptr = linebuf; -+ int i; -+ -+#define NODES_PER_LINE 32 -+ for (i = 0; i < nnodes; i++) -+ { -+ if (ptr == linebuf) -+ { -+ sprintf (ptr, "%4d", i); -+ ptr += strlen (ptr); -+ } -+ -+ sprintf (ptr, ",%s", GlobalStatusString (map, i)); -+ ptr += strlen (ptr); -+ -+ if ((i % NODES_PER_LINE) == (NODES_PER_LINE-1) || (i == (nnodes-1))) -+ { -+ (di->func)(di->arg, "%s: %s %s %s\n", cmRail->Rail->Name, name, linebuf, trailer); -+ ptr = linebuf; -+ } -+ } -+#undef NODES_PER_LINE -+} -+ -+void -+DisplayNodeMaps (DisplayInfo *di, CM_RAIL *cmRail) -+{ -+ int lvl; -+ int clvl; -+ char mapname[128]; -+ -+ (di->func)(di->arg, "%s: Node %d maps...\n", cmRail->Rail->Name, cmRail->Rail->Position.pos_nodeid); -+ -+ for (clvl = 0; clvl < cmRail->NumLevels; clvl++) -+ { -+ int nnodes = cmRail->Levels[clvl].NumNodes; -+ -+ (di->func)(di->arg, "%s: Cluster level %d: Connected %ld - %s%s\n", -+ cmRail->Rail->Name, clvl, cmRail->Levels[clvl].Connected, -+ cmRail->Levels[clvl].Online ? "Online" : "Offline", -+ cmRail->Levels[clvl].Restarting ? ", Restarting" : ""); -+ -+ for (lvl = 0; lvl < cmRail->TopLevel && lvl <= clvl; lvl++) -+ { -+ CM_LEVEL *level = &cmRail->Levels[lvl]; -+ -+ sprintf (mapname, "%10s%2d", "Level", lvl); -+ DisplayMap (di, cmRail, mapname, level->SubordinateMap[clvl], nnodes, -+ level->SubordinateMapValid[clvl] ? "" : "(invalid)"); -+ } -+ -+ sprintf (mapname, "%12s", "Local"); -+ DisplayMap (di, cmRail, mapname, cmRail->Levels[clvl].LocalMap, nnodes, ""); -+ -+ sprintf (mapname, "%12s", "Subtree"); -+ DisplayMap (di, cmRail, mapname, cmRail->Levels[clvl].SubTreeMap, nnodes, -+ cmRail->Levels[clvl].SubTreeMapValid ? "" : "(invalid)"); -+ -+ sprintf (mapname, "%12s", "Global"); -+ DisplayMap (di, cmRail, mapname, cmRail->Levels[clvl].GlobalMap, nnodes, -+ cmRail->Levels[clvl].GlobalMapValid ? "" : "(invalid)"); -+ -+ sprintf (mapname, "%12s", "LastGlobal"); -+ DisplayMap (di, cmRail, mapname, cmRail->Levels[clvl].LastGlobalMap, nnodes, ""); -+ } -+} -+ -+void -+DisplayNodeSgmts (DisplayInfo *di, CM_RAIL *cmRail) -+{ -+ int lvl; -+ int sidx; -+ -+ (di->func)(di->arg, "%s: Node %d segments...\n", cmRail->Rail->Name, cmRail->NodeId); -+ -+ for (lvl = 0; lvl <= cmRail->TopLevel && lvl < cmRail->NumLevels; lvl++) -+ { -+ (di->func)(di->arg, " level %d: ", lvl); -+ -+ for (sidx = 0; sidx < ((lvl == cmRail->TopLevel) ? 1 : cmRail->Levels[lvl].NumSegs); sidx++) -+ { -+ CM_SGMT *sgmt = &cmRail->Levels[lvl].Sgmts[sidx]; -+ -+ if (sgmt->State == CM_SGMT_PRESENT) -+ (di->func)(di->arg, "[%d, in: %d out: %d %s%s]", -+ sgmt->NodeId, -+ sgmt->AckSeq, -+ sgmt->MsgSeq, -+ sgmt->MsgAcked ? "A" : "-", -+ sgmt->SendMaps ? "!" : "-"); -+ else -+ (di->func)(di->arg, "[%s]", (sgmt->State == CM_SGMT_ABSENT ? "absent" : -+ sgmt->State == CM_SGMT_WAITING ? "waiting" : -+ sgmt->State == CM_SGMT_COMING ? "coming" : "UNKNOWN")); -+ } -+ (di->func)(di->arg, "\n"); -+ } -+} -+ -+ -+static void -+StartConnecting (CM_RAIL *cmRail, CM_SGMT *sgmt, int NodeId, int Timestamp) -+{ -+ int clvl; -+ -+ CPRINTF4 (2, "%s: lvl %d subtree %d node %d -> connecting\n", cmRail->Rail->Name, sgmt->Level, sgmt->Sgmt, NodeId); -+ -+ /* Only reconnect the same guy if he was reborn */ -+ ASSERT (sgmt->State != CM_SGMT_PRESENT || -+ (sgmt->NodeId == NodeId && sgmt->Timestamp != Timestamp)); -+ -+ /* After we've connected to a new peer, we wait to receive -+ * STATEMAP_RESET before we accumulate changes and we wait for a -+ * complete map to be received before we propagate changes to other -+ * nodes. -+ * -+ * If I'm the subordinate, I can start sending maps right away, since -+ * the leader is ready for them already. If I'm the leader, I hold off -+ * sending maps until I've seen the subordinate's first heartbeat, -+ * because the subordinate might miss my NOTIFY message, still think -+ * she's a leader candidate and ignore my heartbeats. -+ */ -+ sgmt->SendMaps = (sgmt->Level == cmRail->TopLevel); /* I can send maps to my leader (she NOTIFIED me) */ -+ -+ for (clvl = sgmt->Level; clvl < cmRail->NumLevels; clvl++) -+ { -+ statemap_reset (sgmt->Maps[clvl].CurrentInputMap); -+ statemap_reset (sgmt->Maps[clvl].InputMap); -+ statemap_reset (sgmt->Maps[clvl].OutputMap); -+ -+ sgmt->Maps[clvl].InputMapValid = 0; -+ sgmt->Maps[clvl].OutputMapValid = 0; -+ sgmt->Maps[clvl].SentChanges = 0; -+ -+ if (sgmt->Level == cmRail->TopLevel) /* connection to leader */ -+ { -+ ASSERT (sgmt->Sgmt == 0); -+ ASSERT (cmRail->Role == CM_ROLE_SUBORDINATE); -+ -+ if (cmRail->Levels[clvl].SubTreeMapValid) /* already got a subtree map to send up */ -+ { -+ statemap_setmap (sgmt->Maps[clvl].OutputMap, cmRail->Levels[clvl].SubTreeMap); -+ sgmt->Maps[clvl].OutputMapValid = 1; -+ -+ statemap_clearchanges (cmRail->Levels[clvl].SubTreeMap); -+ } -+ } -+ else /* connection to subordinate */ -+ { -+ ASSERT (sgmt->Sgmt != cmRail->Levels[sgmt->Level].MySgmt); -+ -+ if (cmRail->Levels[clvl].GlobalMapValid) /* already got a global map to broadcast */ -+ { -+ statemap_setmap (sgmt->Maps[clvl].OutputMap, cmRail->Levels[clvl].GlobalMap); -+ sgmt->Maps[clvl].OutputMapValid = 1; -+ } -+ } -+ } -+ -+ /* Initialise sequence counters */ -+ sgmt->MsgSeq = sgmt->AckSeq = 0; -+ sgmt->MsgAcked = 1; /* ready to send a new sequenced message */ -+ -+ sgmt->State = CM_SGMT_PRESENT; -+ sgmt->NodeId = NodeId; -+ sgmt->UpdateTick = lbolt; -+ sgmt->Timestamp = Timestamp; -+} -+ -+static void -+StartSubTreeDiscovery (CM_RAIL *cmRail, CM_SGMT *sgmt) -+{ -+ sgmt->State = CM_SGMT_WAITING; -+ sgmt->UpdateTick = lbolt; -+ sgmt->WaitingTick = lbolt; -+ -+ if (sgmt->Level > 0) -+ __Schedule_Discovery (cmRail); -+} -+ -+void -+StartSubordinateDiscovery (CM_RAIL *cmRail) -+{ -+ int i; -+ int lvl = cmRail->TopLevel - 1; -+ CM_LEVEL *level = &cmRail->Levels[lvl]; -+ -+ ASSERT (lvl >= 0 && lvl < cmRail->NumLevels); -+ -+ for (i = 0; i < level->NumSegs; i++) -+ { -+ CM_SGMT *sgmt = &level->Sgmts[i]; -+ -+ if (i != level->MySgmt) /* No-one should connect here */ -+ StartSubTreeDiscovery (cmRail, sgmt); -+ } -+} -+ -+void -+StartLeaderDiscovery (CM_RAIL *cmRail) -+{ -+ int i; -+ int clvl; -+ CM_LEVEL *level = &cmRail->Levels[cmRail->TopLevel]; -+ -+ ASSERT (cmRail->TopLevel < cmRail->NumLevels); -+ -+ for (clvl = cmRail->TopLevel; clvl < cmRail->NumLevels; clvl++) -+ { -+ cmRail->Levels[clvl].GlobalMapValid = 0; -+ cmRail->Levels[clvl].SubTreeMapValid = 0; -+ level->SubordinateMapValid[clvl] = 0; -+ } -+ -+ for (i = 0; i < level->NumSegs; i++) -+ { -+ CM_SGMT *sgmt = &level->Sgmts[i]; -+ -+ sgmt->State = CM_SGMT_ABSENT; -+ } -+ -+ cmRail->DiscoverStartTick = lbolt; -+ cmRail->Role = CM_ROLE_LEADER_CANDIDATE; -+ -+ __Schedule_Discovery (cmRail); -+} -+ -+static void -+RaiseTopLevel (CM_RAIL *cmRail) -+{ -+ ASSERT (cmRail->NumLevels != 0); -+ ASSERT (cmRail->TopLevel < cmRail->NumLevels); -+ -+ CPRINTF2 (2, "%s: RaiseTopLevel %d\n", cmRail->Rail->Name, cmRail->TopLevel + 1); -+ -+ if (++cmRail->TopLevel == cmRail->NumLevels) /* whole machine leader? */ -+ cmRail->Role = CM_ROLE_LEADER; -+ else -+ StartLeaderDiscovery (cmRail); /* look for my leader */ -+ -+ StartSubordinateDiscovery (cmRail); /* and any direct subordinates */ -+} -+ -+static void -+LowerTopLevel (CM_RAIL *cmRail, int lvl) -+{ -+ ASSERT (cmRail->NumLevels != 0); -+ ASSERT (lvl < cmRail->NumLevels); -+ -+ CPRINTF2 (2, "%s: LowerTopLevel %d\n", cmRail->Rail->Name, lvl); -+ -+ if (lvl == 0) -+ cmRail->Timestamp = lbolt; -+ -+ cmRail->TopLevel = lvl; -+ -+ StartLeaderDiscovery (cmRail); /* look for my leader */ -+} -+ -+static int -+IShouldLead (CM_RAIL *cmRail, CM_MSG *msg) -+{ -+ /* NB, this function MUST be consistently calculated on any nodes, just -+ * from the info supplied in the message. Otherwise leadership -+ * arbitration during concurrent discovery will fail. -+ */ -+ return (cmRail->NodeId < msg->Hdr.NodeId); -+} -+ -+static int -+SumCheck (CM_MSG *msg) -+{ -+ CM_HDR *hdr = &msg->Hdr; -+ uint16_t sum = hdr->Checksum; -+ uint16_t nmaps = hdr->NumMaps; -+ -+ if (nmaps > CM_MSG_MAXMAPS) { -+ printk ("SumCheck: nmaps %d > CM_MSG_MAXMAPS\n", nmaps); -+ return 0; -+ } -+ -+ if ((hdr->Type != CM_MSG_TYPE_HEARTBEAT) && nmaps != 0) { -+ printk ("SumCheck: type(%d) not HEARTBEAT and nmaps(%d) != 0\n", hdr->Type, nmaps); -+ return 0; -+ } -+ -+ hdr->Checksum = 0; -+ -+ if (CheckSum ((char *)msg + CM_MSG_BASE(nmaps), CM_MSG_SIZE(nmaps)) != sum) { -+ printk ("SumCheck: checksum failed %x %x\n", CheckSum ((char *)msg + CM_MSG_BASE(nmaps), CM_MSG_SIZE(nmaps)), sum); -+ -+ return 0; -+ } -+ -+ return 1; -+} -+ -+static void -+ProcessMessage (EP_RAIL *rail, void *arg, void *msgbuf) -+{ -+ CM_RAIL *cmRail = (CM_RAIL *) arg; -+ CM_MSG *msg = (CM_MSG *) msgbuf; -+ CM_HDR *hdr = &msg->Hdr; -+ int lvl; -+ int sidx; -+ CM_LEVEL *level; -+ CM_SGMT *sgmt; -+ bitmap_t seg; -+ int i; -+ int delay; -+ static long tlast; -+ static int count; -+ -+ /* Poll the message Version field until the message has completely -+ * arrived in main memory. */ -+ for (delay = 1; hdr->Version == EP_SYSTEMQ_UNRECEIVED && delay < EP_SYSTEMQ_UNRECEIVED_TLIMIT; delay <<= 1) -+ DELAY (delay); -+ -+ /* Display a message every 60 seconds if we see an "old" format message */ -+ if (hdr->Version == EP_SYSTEMQ_UNRECEIVED && (((lbolt - tlast) > 60*HZ) ? (count = 0) : ++count) < 1) -+ { -+ printk ("%s: received old protocol message (type %d from node %d)\n", cmRail->Rail->Name, -+ ((uint8_t *) msg)[20], ((uint16_t *) msg)[4]); -+ -+ tlast = lbolt; -+ goto finished; -+ } -+ -+ if (hdr->Version != CM_MSG_VERSION || hdr->ParamHash != cmRail->ParamHash || hdr->MachineId != MachineId) -+ { -+ CPRINTF8 (1, "%s: invalid message : Version %08x (%08x) ParamHash %08x (%08x) MachineId %04x (%04x) Nodeid %d\n", cmRail->Rail->Name, -+ hdr->Version, CM_MSG_VERSION, hdr->ParamHash, cmRail->ParamHash, hdr->MachineId, MachineId, hdr->NodeId); -+ goto finished; -+ } -+ -+ if (!SumCheck (msg)) -+ { -+ printk ("%s: checksum failed on msg from %d?\n", cmRail->Rail->Name, hdr->NodeId); -+ goto finished; -+ } -+ -+ if (hdr->NodeId == cmRail->NodeId) /* ignore my own broadcast */ -+ { -+ CPRINTF3 (6, "%s: node %d type %d: ignored (MESSAGE FROM ME)\n", -+ cmRail->Rail->Name, hdr->NodeId, hdr->Type); -+ -+ if (hdr->Type != CM_MSG_TYPE_DISCOVER_LEADER && hdr->Type != CM_MSG_TYPE_RESOLVE_LEADER) -+ printk ("%s: node %d type %d: ignored (MESSAGE FROM ME)\n", -+ cmRail->Rail->Name, hdr->NodeId, hdr->Type); -+ goto finished; -+ } -+ -+ lvl = hdr->Level; -+ level = &cmRail->Levels[lvl]; -+ -+ if (BrokenLevel != -1 && (lvl >= ((BrokenLevel >> (cmRail->Rail->Number*4)) & 0xf))) /* Simulate broken network? */ -+ goto finished; -+ -+ if (lvl >= cmRail->NumLevels || /* from outer space */ -+ hdr->NodeId < level->MinNodeId || /* from outside this level's subtree */ -+ hdr->NodeId >= level->MinNodeId + level->NumNodes) -+ { -+ printk ("%s: lvl %d node %d type %d: ignored (%s)\n", -+ cmRail->Rail->Name, lvl, hdr->NodeId, hdr->Type, -+ lvl >= cmRail->NumLevels ? "level too big for machine" : "outside subtree"); -+ goto finished; -+ } -+ -+ sidx = SegmentNo (cmRail, hdr->NodeId, lvl); -+ sgmt = &level->Sgmts[sidx]; -+ -+ switch (hdr->Type) -+ { -+ case CM_MSG_TYPE_RESOLVE_LEADER: -+ if (lvl >= cmRail->TopLevel) -+ { -+ CPRINTF4 (6, "%s: lvl %d sidx %d node %d RESOLVE_LEADER: ignored (above my level)\n", -+ cmRail->Rail->Name, lvl, sidx, hdr->NodeId); -+ break; -+ } -+ -+ /* someone else thinks they lead at the same level as me */ -+ CPRINTF4 (1, "%s: lvl %d sidx %d node %d RESOLVE_LEADER: !REJOIN (putsch)\n", -+ cmRail->Rail->Name, lvl, sidx, hdr->NodeId); -+ -+ printk ("%s: lvl %d sidx %d node %d RESOLVE_LEADER: !REJOIN (putsch)\n", -+ cmRail->Rail->Name, lvl, sidx, hdr->NodeId); -+ -+ -+ SendMessage (cmRail, hdr->NodeId, lvl, CM_MSG_TYPE_REJOIN); -+ break; -+ -+ case CM_MSG_TYPE_DISCOVER_LEADER: -+ if (lvl > cmRail->TopLevel) -+ { -+ CPRINTF4 (6, "%s: lvl %d sidx %d node %d DISCOVER_LEADER: ignored (above my level)\n", -+ cmRail->Rail->Name, lvl, sidx, hdr->NodeId); -+ break; -+ } -+ -+ if (sidx == level->MySgmt) /* someone I led thinks they lead some of my subtrees */ -+ { -+ CPRINTF4 (1, "%s: lvl %d sidx %d node %d DISCOVER_LEADER: !REJOIN (putsch)\n", -+ cmRail->Rail->Name, lvl, sidx, hdr->NodeId); -+ -+ printk ("%s: lvl %d sidx %d node %d DISCOVER_LEADER: !REJOIN (putsch)\n", -+ cmRail->Rail->Name, lvl, sidx, hdr->NodeId); -+ -+ SendMessage (cmRail, hdr->NodeId, hdr->Level, CM_MSG_TYPE_REJOIN); -+ break; -+ } -+ -+ if (lvl < cmRail->TopLevel) /* I'm the leader of this level */ -+ { -+ if (sgmt->State == CM_SGMT_PRESENT && /* someone thinks someone I lead is dead */ -+ sgmt->NodeId != hdr->NodeId) -+ { -+ /* My subordinate's death could be noticed by one of her peers -+ * before I do. If she _is_ dead, I'll notice before long and -+ * NOTIFY this discover. If this discover completes before I -+ * detect my subordinate's death, the discovering node will -+ * try to take over from me, and then I'll RESET her. -+ */ -+ CPRINTF4 (6, "%s: lvl %d sidx %d node %d DISCOVER_LEADER: ignored (got established subordinate)\n", -+ cmRail->Rail->Name, lvl, sidx, hdr->NodeId); -+ return; -+ } -+ -+ if (sgmt->State != CM_SGMT_PRESENT || /* New connection */ -+ sgmt->Timestamp != hdr->Timestamp) /* new incarnation */ -+ StartConnecting (cmRail, sgmt, hdr->NodeId, hdr->Timestamp); -+ -+ CPRINTF4 (2, "%s: lvl %d sidx %d node %d DISCOVER_LEADER: !NOTIFY)\n", -+ cmRail->Rail->Name, lvl, sidx, hdr->NodeId); -+ -+ SendToSgmt (cmRail, sgmt, CM_MSG_TYPE_NOTIFY); -+ break; -+ } -+ -+ ASSERT (lvl == cmRail->TopLevel); -+ -+ if (cmRail->Role == CM_ROLE_SUBORDINATE) -+ { -+ /* I think my leader is alive, in which case she'll NOTIFY this -+ * DISCOVER. If she's dead, I'll start to become a leader -+ * candidate and handle this appropriately. -+ */ -+ CPRINTF3 (6, "%s: lvl %d node %d DISCOVER: ignored (I'm a subordinate)\n", -+ cmRail->Rail->Name, lvl, hdr->NodeId); -+ break; -+ } -+ -+ ASSERT (cmRail->Role == CM_ROLE_LEADER_CANDIDATE); -+ -+ /* A peer at this level is bidding for leadership along with me */ -+ if (IShouldLead (cmRail, msg)) -+ { -+ CPRINTF3 (6, "%s: lvl %d node %d DISCOVER: but I should lead\n", -+ cmRail->Rail->Name, lvl, hdr->NodeId); -+ -+ /* So there _is_ someone there; She'll be seeing my DISCOVER -+ * messages and extending her discovery period, so that when I -+ * become leader, I'll NOTIFY her. In the meantime I'll flag her -+ * activity, so she remains WAITING. -+ */ -+ sgmt->UpdateTick = lbolt; -+ break; -+ } -+ -+ /* Defer to sender... */ -+ CPRINTF3 (6, "%s: lvl %d node %d DISCOVER: delaying me becoming leader\n", -+ cmRail->Rail->Name, lvl, hdr->NodeId); -+ -+ StartLeaderDiscovery (cmRail); -+ break; -+ -+ case CM_MSG_TYPE_DISCOVER_SUBORDINATE: -+ if (lvl <= cmRail->TopLevel) -+ { -+ CPRINTF3 (6, "%s: lvl %d node %d DISCOVER_SUBORDINATE: ignored (from my subtree)\n", -+ cmRail->Rail->Name, lvl, hdr->NodeId); -+ break; -+ } -+ -+ if (cmRail->Role != CM_ROLE_LEADER_CANDIDATE) -+ { -+ CPRINTF3 (6, "%s: lvl %d node %d DISCOVER_SUBORDINATE: ignored (I'm not looking for a leader)\n", -+ cmRail->Rail->Name, lvl, hdr->NodeId); -+ break; -+ } -+ -+ if (hdr->Level > cmRail->BroadcastLevel && AFTER (lbolt, cmRail->BroadcastLevelTick + EP_WITHDRAW_TIMEOUT)) -+ { -+ CPRINTF3 (6, "%s: lvl %d node %d DISCOVER_SUBORDINATE: ignored (broadcast level too low)\n", -+ cmRail->Rail->Name, lvl, hdr->NodeId); -+ break; -+ } -+ -+ CPRINTF3 (2, "%s: lvl %d node %d DISCOVER_SUBORDINATE: !IMCOMING\n", -+ cmRail->Rail->Name, lvl, hdr->NodeId); -+ -+ SendMessage (cmRail, hdr->NodeId, hdr->Level, CM_MSG_TYPE_IMCOMING); -+ break; -+ -+ case CM_MSG_TYPE_IMCOMING: -+ if (lvl > cmRail->TopLevel || /* from peer or node above me */ -+ sgmt->State == CM_SGMT_PRESENT || /* already got a subtree */ -+ sgmt->State == CM_SGMT_ABSENT) /* already written off this subtree */ -+ { -+ CPRINTF4 (2, "%s: lvl %d sidx %d node %d IMCOMING: ignored\n", cmRail->Rail->Name, lvl, sidx, hdr->NodeId); -+ break; -+ } -+ -+ CPRINTF4 (2, "%s: lvl %d sidx %d node %d IMCOMING: waiting...\n", cmRail->Rail->Name, lvl, sidx, hdr->NodeId); -+ -+ sgmt->State = CM_SGMT_COMING; -+ sgmt->UpdateTick = lbolt; -+ break; -+ -+ case CM_MSG_TYPE_NOTIFY: -+ if (cmRail->Role != CM_ROLE_LEADER_CANDIDATE || /* I'm not looking for a leader */ -+ lvl != cmRail->TopLevel) /* at this level */ -+ { -+ /* If this person really should be my leader, my existing leader -+ * will time out, and I'll discover this one. */ -+ CPRINTF4 (2, "%s: lvl %d node %d NOTIFY: ignored (%s)\n", -+ cmRail->Rail->Name, lvl, hdr->NodeId, -+ lvl < cmRail->TopLevel ? "already leader" : -+ lvl > cmRail->TopLevel ? "lvl too high" : "already subordinate"); -+ break; -+ } -+ -+ CPRINTF3 (2, "%s: lvl %d node %d NOTIFY: becoming subordinate\n", -+ cmRail->Rail->Name, lvl, hdr->NodeId); -+ -+ cmRail->Role = CM_ROLE_SUBORDINATE; /* Now I've found my level */ -+ StartConnecting (cmRail, &level->Sgmts[0], hdr->NodeId, hdr->Timestamp); -+ break; -+ -+ case CM_MSG_TYPE_HEARTBEAT: -+ if (lvl > cmRail->TopLevel) -+ { -+ CPRINTF3 (2, "%s: lvl %d node %d H/BEAT: ignored (lvl too high)\n", -+ cmRail->Rail->Name, lvl, hdr->NodeId); -+ break; -+ } -+ -+ if (lvl == cmRail->TopLevel) /* heartbeat from my leader */ -+ { -+ if (cmRail->Role == CM_ROLE_LEADER_CANDIDATE) /* but I've not got one */ -+ { -+ /* I'm probably a new incarnation of myself; I'll keep doing -+ * discovery until my previous existence's leader NOTIFY's me. -+ * If I was this node's leader, she'll time me out (I'm not -+ * sending heartbeats to her) and we'll fight it out for -+ * leadership. */ -+ CPRINTF3 (2, "%s: lvl %d node %d H/BEAT ignored (no leader)\n", -+ cmRail->Rail->Name, lvl, hdr->NodeId); -+ break; -+ } -+ sidx = 0; -+ sgmt = &level->Sgmts[0]; -+ } -+ -+ if (sgmt->State != CM_SGMT_PRESENT || /* not fully connected with this guy */ -+ sgmt->NodeId != hdr->NodeId || /* someone else impersonating my peer */ -+ sgmt->Timestamp != hdr->Timestamp) /* new incarnation of my peer */ -+ { -+ CPRINTF4 (1, "%s: lvl %d sidx %d node %d H/BEAT: !REJOIN\n", -+ cmRail->Rail->Name, lvl, sidx, hdr->NodeId); -+ -+ printk ("%s: lvl %d sidx %d node %d H/BEAT: !REJOIN %s\n", -+ cmRail->Rail->Name, lvl, sidx, hdr->NodeId, -+ sgmt->State != CM_SGMT_PRESENT ? "not present" : -+ sgmt->NodeId != hdr->NodeId ? "someone else" : "new incarnation"); -+ -+ SendMessage (cmRail, hdr->NodeId, hdr->Level, CM_MSG_TYPE_REJOIN); -+ break; -+ } -+ -+ if (!((hdr->Seq == sgmt->AckSeq) || /* NOT duplicate message or */ -+ (hdr->Seq == (CM_SEQ)(sgmt->AckSeq + 1))) || /* expected message */ -+ !((hdr->AckSeq == sgmt->MsgSeq) || /* NOT expected ack or */ -+ (hdr->AckSeq == (CM_SEQ)(sgmt->MsgSeq - 1)))) /* duplicate ack */ -+ { -+ CPRINTF9 (1, "%s: lvl %d sidx %d node %d type %d: H/BEAT !REJOIN (out-of-seq) M(%d,a%d) S%d,A%d\n", -+ cmRail->Rail->Name, lvl, sidx, hdr->NodeId, hdr->Type, -+ (int)hdr->Seq, (int)hdr->AckSeq, (int)sgmt->MsgSeq, (int)sgmt->AckSeq); -+ -+ printk ("%s: lvl %d sidx %d node %d type %d: H/BEAT !REJOIN (out-of-seq) M(%d,a%d) S%d,A%d\n", -+ cmRail->Rail->Name, lvl, sidx, hdr->NodeId, hdr->Type, -+ (int)hdr->Seq, (int)hdr->AckSeq, (int)sgmt->MsgSeq, (int)sgmt->AckSeq); -+ -+ SendMessage (cmRail, hdr->NodeId, hdr->Level, CM_MSG_TYPE_REJOIN); -+ break; -+ } -+ -+ IncrStat (cmRail, HeartbeatsRcvd); -+ -+ sgmt->UpdateTick = lbolt; -+ sgmt->SendMaps = 1; -+ -+ if (sgmt->MsgSeq == hdr->AckSeq) /* acking current message */ -+ sgmt->MsgAcked = 1; /* can send the next one */ -+ -+ if (hdr->Seq == sgmt->AckSeq) /* discard duplicate (or NULL heartbeat) */ -+ { -+ CPRINTF6 (6, "%s: lvl %d sidx %d node %d type %d: %s H/BEAT\n", -+ cmRail->Rail->Name, lvl, sidx, hdr->NodeId, hdr->Type, -+ hdr->NumMaps == 0 ? "null" : "duplicate"); -+ break; -+ } -+ -+ CPRINTF7 (6, "%s: lvl %d sidx %d node %d type %d: seq %d maps %d H/BEAT\n", -+ cmRail->Rail->Name, lvl, sidx, hdr->NodeId, hdr->Type, hdr->Seq, hdr->NumMaps); -+ -+ sgmt->AckSeq = hdr->Seq; /* ready to receive next one */ -+ -+ for (i = 0; i < hdr->NumMaps; i++) -+ { -+ CM_STATEMAP_ENTRY *map = &msg->Payload.Statemaps[CM_MSG_MAP(i)]; -+ int clvl = map->level; -+ -+ if (clvl < 0) /* end of message */ -+ break; -+ -+ if (clvl < sgmt->Level) /* bad level */ -+ { -+ CPRINTF6 (1, "%s: lvl %d sidx %d node %d type %d: H/BEAT !REJOIN (bad clevel %d)\n", -+ cmRail->Rail->Name, lvl, sidx, hdr->NodeId, hdr->Type, clvl); -+ -+ SendMessage (cmRail, hdr->NodeId, hdr->Level, CM_MSG_TYPE_REJOIN); -+ goto finished; -+ } -+ -+ if (map->offset == STATEMAP_NOMORECHANGES) /* end of atomic changes */ -+ { -+ if (!sgmt->Maps[clvl].InputMapValid || /* not set InputMap yet */ -+ statemap_changed (sgmt->Maps[clvl].CurrentInputMap)) /* previously applied changes */ -+ { -+ CPRINTF3 (4, "%s: received new clvl %d map from %d\n", cmRail->Rail->Name, clvl, sgmt->NodeId); -+ -+ statemap_setmap (sgmt->Maps[clvl].InputMap, sgmt->Maps[clvl].CurrentInputMap); -+ sgmt->Maps[clvl].InputMapValid = 1; -+ -+ statemap_clearchanges (sgmt->Maps[clvl].CurrentInputMap); -+ } -+ continue; -+ } -+ -+ seg = ((bitmap_t)map->seg[0]) -+ | (((bitmap_t)map->seg[1]) << 16) -+#if (BT_ULSHIFT == 6) -+ | (((bitmap_t)map->seg[2]) << 32) -+ | (((bitmap_t)map->seg[3]) << 48) -+#elif (BT_ULSHIFT != 5) -+#error "Bad value for BT_ULSHIFT" -+#endif -+ ; -+ statemap_setseg (sgmt->Maps[clvl].CurrentInputMap, map->offset, seg); -+ } -+ break; -+ -+ case CM_MSG_TYPE_REJOIN: -+ CPRINTF5 (1, "%s: lvl %d sidx %d node %d type %d: REJOIN\n", -+ cmRail->Rail->Name, lvl, sidx, hdr->NodeId, hdr->Type); -+ printk ("%s: lvl %d sidx %d node %d type %d: REJOIN\n", -+ cmRail->Rail->Name, lvl, sidx, hdr->NodeId, hdr->Type); -+ -+ LowerTopLevel (cmRail, 0); -+ -+ IncrStat (cmRail, RejoinRequest); -+ break; -+ -+ default: -+ printk ("%s: lvl=%d unknown message type %d\n", cmRail->Rail->Name, lvl, hdr->Type); -+ break; -+ } -+ finished: -+ hdr->Version = EP_SYSTEMQ_UNRECEIVED; -+} -+ -+static void -+PollInputQueues (CM_RAIL *cmRail) -+{ -+ ep_poll_inputq (cmRail->Rail, cmRail->IntrQueue, 0, ProcessMessage, cmRail); -+ ep_poll_inputq (cmRail->Rail, cmRail->PolledQueue, 0, ProcessMessage, cmRail); -+} -+ -+static void -+IntrQueueCallback (EP_RAIL *rail, void *arg) -+{ -+ CM_RAIL *cmRail = (CM_RAIL *) arg; -+ unsigned long flags; -+ -+ /* If the lock is held, then don't bother spinning for it, -+ * since the messages will be received at this, or the -+ * next heartbeat */ -+ local_irq_save (flags); -+ if (spin_trylock (&cmRail->Lock)) -+ { -+ if (AFTER (lbolt, cmRail->NextRunTime + MSEC2TICKS(CM_TIMER_SCHEDULE_TIMEOUT))) -+ printk ("%s: heartbeat timer stuck - scheduled\n", cmRail->Rail->Name); -+ else -+ ep_poll_inputq (rail, cmRail->IntrQueue, 0, ProcessMessage, cmRail); -+ spin_unlock (&cmRail->Lock); -+ } -+ local_irq_restore (flags); -+} -+ -+char * -+sprintClPeers (char *str, CM_RAIL *cmRail, int clvl) -+{ -+ int clLo = cmRail->Levels[clvl].MinNodeId; -+ int clHi = clLo + cmRail->Levels[clvl].NumNodes - 1; -+ int subClLo = (clvl == 0) ? cmRail->NodeId : cmRail->Levels[clvl - 1].MinNodeId; -+ int subClHi = subClLo + ((clvl == 0) ? 0 : cmRail->Levels[clvl - 1].NumNodes - 1); -+ -+ if (subClHi == clHi) -+ sprintf (str, "[%d-%d]", clLo, subClLo - 1); -+ else if (subClLo == clLo) -+ sprintf (str, "[%d-%d]", subClHi + 1, clHi); -+ else -+ sprintf (str, "[%d-%d][%d-%d]", clLo, subClLo - 1, subClHi + 1, clHi); -+ -+ return (str); -+} -+ -+static void -+RestartComms (CM_RAIL *cmRail, int clvl) -+{ -+ int base; -+ int nodeId; -+ int lstat; -+ int numClNodes; -+ int subClMin; -+ int subClMax; -+ int myClId; -+ int thisClId; -+ -+ myClId = ClusterIds (cmRail, clvl, &subClMin, &subClMax); -+ base = myClId * CM_GSTATUS_BITS; -+ numClNodes = cmRail->Levels[clvl].NumNodes; -+ -+ statemap_setbits (cmRail->Levels[clvl].LocalMap, base, -+ CM_GSTATUS_CLOSING | CM_GSTATUS_MAY_START | CM_GSTATUS_RESTART, CM_GSTATUS_BITS); -+ cmRail->Levels[clvl].Restarting = 1; -+ -+ if (cmRail->Levels[clvl].Online) -+ { -+ cmRail->Levels[clvl].Online = 0; -+ -+ for (thisClId = 0; thisClId < numClNodes; thisClId++) -+ { -+ if (thisClId == subClMin) /* skip sub-cluster; it's just someone in this cluster */ -+ { /* that wants me to restart */ -+ thisClId = subClMax; -+ continue; -+ } -+ -+ nodeId = cmRail->Levels[clvl].MinNodeId + thisClId; -+ base = thisClId * CM_GSTATUS_BITS; -+ lstat = statemap_getbits (cmRail->Levels[clvl].LocalMap, base, CM_GSTATUS_BITS); -+ -+ if ((lstat & CM_GSTATUS_ACK_MASK) == CM_GSTATUS_MAY_RUN) -+ { -+ switch (ep_disconnect_node (cmRail->Rail, nodeId)) -+ { -+ case EP_NODE_CONNECTING: -+ /* gstat must == RUNNING */ -+ cmRail->Levels[clvl].Connected--; -+ break; -+ case EP_NODE_DISCONNECTED: -+ /* CLOSING || STARTING || (lstat & RESTART) */ -+ break; -+ } -+ } -+ } -+ } -+} -+ -+static void -+UpdateGlobalStatus (CM_RAIL *cmRail) -+{ -+ char clNodeStr[32]; /* [%d-%d][%d-%d] */ -+ int nodeId; -+ int offset; -+ int base; -+ bitmap_t gstat; -+ bitmap_t lgstat; -+ bitmap_t lstat; -+ int clvl; -+ int numClNodes; -+ int subClMin; -+ int subClMax; -+ int myClId; -+ int thisClId; -+ int lastClId; -+ -+ for (clvl = 0; clvl < cmRail->NumLevels; clvl++) -+ { -+ if (!cmRail->Levels[clvl].GlobalMapValid || /* not got the global map yet */ -+ !statemap_changed (cmRail->Levels[clvl].GlobalMap)) /* no changes to respond to */ -+ { -+ CPRINTF2 (6, "%s: Got invalid or unchanged clvl %d global map\n", cmRail->Rail->Name, clvl); -+ continue; -+ } -+ -+ CPRINTF2 (5, "%s: Got valid changed clvl %d global map\n", cmRail->Rail->Name, clvl); -+ -+ lastClId = -1; -+ myClId = ClusterIds (cmRail, clvl, &subClMin, &subClMax); -+ numClNodes = cmRail->Levels[clvl].NumNodes; -+ -+ while ((offset = statemap_findchange (cmRail->Levels[clvl].GlobalMap, &gstat, 1)) >= 0) -+ { -+ /* -+ * Check every node that this segment covers - however -+ * if the last node we checked in the previous segmemt -+ * is also the first node in this segment, then skip -+ * it. -+ */ -+ if ((thisClId = (offset/CM_GSTATUS_BITS)) == lastClId) -+ thisClId++; -+ lastClId = (offset + BT_NBIPUL - 1)/CM_GSTATUS_BITS; -+ -+ /* check each node that might have changed */ -+ for ( ; thisClId <= lastClId && thisClId < numClNodes; thisClId++) -+ { -+ base = thisClId * CM_GSTATUS_BITS; -+ nodeId = cmRail->Levels[clvl].MinNodeId + thisClId; -+ -+ if (thisClId >= subClMin && thisClId <= subClMax) /* skip sub-cluster */ -+ continue; -+ -+ /* This isn't me; I need to sense what this node is driving -+ * (just the starting and running bits) and respond -+ * appropriately... -+ */ -+ lgstat = statemap_getbits (cmRail->Levels[clvl].LastGlobalMap, base, CM_GSTATUS_BITS) & CM_GSTATUS_STATUS_MASK; -+ gstat = statemap_getbits (cmRail->Levels[clvl].GlobalMap, base, CM_GSTATUS_BITS) & CM_GSTATUS_STATUS_MASK; -+ -+ if (lgstat == gstat) /* no change in peer state */ -+ continue; -+ -+ CPRINTF5 (3, "%s: Node %d: lgstat %s, gstat %s, lstat %s\n", cmRail->Rail->Name, nodeId, -+ GlobalStatusString (cmRail->Levels[clvl].LastGlobalMap, thisClId), -+ GlobalStatusString (cmRail->Levels[clvl].GlobalMap, thisClId), -+ GlobalStatusString (cmRail->Levels[clvl].LocalMap, thisClId)); -+ -+ /* What I'm currently driving as my acknowledgement */ -+ lstat = statemap_getbits (cmRail->Levels[clvl].LocalMap, base, CM_GSTATUS_BITS); -+ -+ switch (gstat) -+ { -+ case CM_GSTATUS_STARTING: -+ if ((lgstat == CM_GSTATUS_ABSENT || lgstat == CM_GSTATUS_CLOSING) && lstat == CM_GSTATUS_MAY_START) -+ { -+ CPRINTF2 (1, "%s: ===================node %d STARTING\n", cmRail->Rail->Name, nodeId); -+ -+ ASSERT (cmRail->Rail->Nodes[nodeId].State == EP_NODE_DISCONNECTED); -+ -+ statemap_setbits (cmRail->Levels[clvl].LocalMap, base, CM_GSTATUS_MAY_RUN, CM_GSTATUS_BITS); -+ continue; -+ } -+ break; -+ -+ case CM_GSTATUS_RUNNING: -+ if ((lgstat == CM_GSTATUS_ABSENT && lstat == CM_GSTATUS_MAY_START) || -+ (lgstat == CM_GSTATUS_STARTING && lstat == CM_GSTATUS_MAY_RUN)) -+ { -+ CPRINTF3 (1, "%s: ===================node %d%s RUNNING\n", cmRail->Rail->Name, nodeId, -+ lgstat == CM_GSTATUS_ABSENT ? " Already" : ""); -+ -+ ASSERT (cmRail->Rail->Nodes[nodeId].State == EP_NODE_DISCONNECTED); -+ -+ if (cmRail->Levels[clvl].Online) -+ { -+ ep_connect_node (cmRail->Rail, nodeId); -+ -+ cmRail->Levels[clvl].Connected++; -+ } -+ -+ statemap_setbits (cmRail->Levels[clvl].LocalMap, base, CM_GSTATUS_MAY_RUN, CM_GSTATUS_BITS); -+ continue; -+ } -+ break; -+ -+ case CM_GSTATUS_CLOSING: -+ CPRINTF4 (1, "%s: ===================node %d CLOSING%s%s\n", cmRail->Rail->Name, nodeId, -+ (lstat & CM_GSTATUS_RESTART) ? " for Restart" : "", -+ cmRail->Levels[clvl].Online ? "" : " (offline)"); -+ -+ if ((lstat & CM_GSTATUS_ACK_MASK) == CM_GSTATUS_MAY_RUN) -+ { -+ switch (ep_disconnect_node (cmRail->Rail, nodeId)) -+ { -+ case EP_NODE_CONNECTING: -+ cmRail->Levels[clvl].Connected--; -+ /* DROPTHROUGH */ -+ case EP_NODE_DISCONNECTED: -+ lstat = CM_GSTATUS_MAY_START; -+ break; -+ } -+ } -+ -+ if ((lstat & CM_GSTATUS_ACK_MASK) == CM_GSTATUS_MAY_START) /* clear restart if we've disconnected */ -+ statemap_setbits (cmRail->Levels[clvl].LocalMap, base, CM_GSTATUS_MAY_START, CM_GSTATUS_BITS); -+ continue; -+ -+ default: -+ break; -+ } -+ -+ /* "unexpected" state change forces me to ask her to restart */ -+ if (! (lstat & CM_GSTATUS_RESTART)) /* not requesting restart already */ -+ { -+ CPRINTF5 (1, "%s: ===================node %d %s, old %s new %s\n", cmRail->Rail->Name, nodeId, -+ (gstat == CM_GSTATUS_ABSENT) ? "ABSENT" : "REQUEST RESTART", -+ GlobalStatusString (cmRail->Levels[clvl].LastGlobalMap, thisClId), -+ GlobalStatusString (cmRail->Levels[clvl].GlobalMap, thisClId)); -+ -+ /* request restart */ -+ if (cmRail->Levels[clvl].Online && lstat == CM_GSTATUS_MAY_RUN) -+ { -+ switch (ep_disconnect_node (cmRail->Rail, nodeId)) -+ { -+ case EP_NODE_CONNECTING: -+ cmRail->Levels[clvl].Connected--; -+ /* DROPTHROUGH */ -+ case EP_NODE_DISCONNECTED: -+ lstat = CM_GSTATUS_MAY_START; -+ break; -+ } -+ } -+ -+ statemap_setbits (cmRail->Levels[clvl].LocalMap, base, lstat | CM_GSTATUS_RESTART, CM_GSTATUS_BITS); -+ continue; -+ } -+ -+ continue; -+ } -+ } -+ -+ /* Now check myself - see what everyone else thinks I'm doing */ -+ base = myClId * CM_GSTATUS_BITS; -+ lstat = statemap_getbits (cmRail->Levels[clvl].LocalMap, base, CM_GSTATUS_BITS); -+ gstat = statemap_getbits (cmRail->Levels[clvl].GlobalMap, base, CM_GSTATUS_BITS); -+ lgstat = statemap_getbits (cmRail->Levels[clvl].LastGlobalMap, base, CM_GSTATUS_BITS); -+ -+ if (lgstat == gstat) /* my state in this cluster hasn't changed */ -+ { -+ CPRINTF3 (6, "%s: my clvl %d global status unchanged from %s\n", cmRail->Rail->Name, -+ clvl, GlobalStatusString (cmRail->Levels[clvl].GlobalMap, myClId)); -+ goto all_done; -+ } -+ -+ if ((gstat & CM_GSTATUS_RESTART) != 0) /* someone wants me to restart */ -+ { -+ if ((lstat & CM_GSTATUS_STATUS_MASK) == CM_GSTATUS_CLOSING) /* I'm already restarting */ -+ goto all_done; -+ -+ CPRINTF2 (1, "%s: ===================RESTART REQUEST from %s\n", cmRail->Rail->Name, -+ sprintClPeers (clNodeStr, cmRail, clvl)); -+ -+ printk ("%s: Restart Request from %s\n", cmRail->Rail->Name, -+ sprintClPeers (clNodeStr, cmRail, clvl)); -+ -+ RestartComms (cmRail, clvl); -+ goto all_done; -+ } -+ -+ CPRINTF6 (5, "%s: clvl %d: lgstat %s gstat %s, lstat %s%s\n", cmRail->Rail->Name, clvl, -+ GlobalStatusString (cmRail->Levels[clvl].LastGlobalMap, myClId), -+ GlobalStatusString (cmRail->Levels[clvl].GlobalMap, myClId), -+ GlobalStatusString (cmRail->Levels[clvl].LocalMap, myClId), -+ (gstat != lstat) ? " (IGNORED)" : ""); -+ -+ if (gstat != lstat) /* not everyone agrees with me */ -+ goto all_done; -+ -+ switch (lstat) -+ { -+ default: -+ ASSERT (0); /* I never drive this */ -+ -+ case CM_GSTATUS_CLOSING | CM_GSTATUS_MAY_START: /* I can restart now (have seen restart go away) */ -+ ASSERT (!cmRail->Levels[clvl].Online); -+ -+ CPRINTF2 (1,"%s: ===================NODES %s AGREE I MAY START\n", cmRail->Rail->Name, -+ sprintClPeers (clNodeStr, cmRail, clvl)); -+ printk ("%s: ===================NODES %s AGREE I MAY START\n", cmRail->Rail->Name, -+ sprintClPeers (clNodeStr, cmRail, clvl)); -+ -+ statemap_setbits (cmRail->Levels[clvl].LocalMap, base, -+ CM_GSTATUS_STARTING | CM_GSTATUS_MAY_RUN, CM_GSTATUS_BITS); -+ goto all_done; -+ -+ case CM_GSTATUS_STARTING | CM_GSTATUS_MAY_RUN: -+ ASSERT (!cmRail->Levels[clvl].Online); -+ -+ CPRINTF2 (1, "%s: ===================NODES %s AGREE I MAY RUN\n", cmRail->Rail->Name, -+ sprintClPeers (clNodeStr, cmRail, clvl)); -+ printk ("%s: ===================NODES %s AGREE I MAY RUN\n", cmRail->Rail->Name, -+ sprintClPeers (clNodeStr, cmRail, clvl)); -+ -+ statemap_setbits (cmRail->Levels[clvl].LocalMap, base, -+ CM_GSTATUS_RUNNING | CM_GSTATUS_MAY_RUN, CM_GSTATUS_BITS); -+ goto all_done; -+ -+ case CM_GSTATUS_RUNNING | CM_GSTATUS_MAY_RUN: -+ if (! cmRail->Levels[clvl].Online) -+ { -+ CPRINTF2 (1, "%s: ===================NODES %s AGREE I'M RUNNING\n", cmRail->Rail->Name, -+ sprintClPeers (clNodeStr, cmRail, clvl)); -+ printk ("%s: ===================NODES %s AGREE I'M RUNNING\n", cmRail->Rail->Name, -+ sprintClPeers (clNodeStr, cmRail, clvl)); -+ -+ cmRail->Levels[clvl].Online = 1; -+ -+ for (thisClId = 0; thisClId < numClNodes; thisClId++) -+ { -+ if (thisClId == subClMin) /* skip sub-cluster */ -+ { -+ thisClId = subClMax; -+ continue; -+ } -+ -+ nodeId = cmRail->Levels[clvl].MinNodeId + thisClId; -+ -+ base = thisClId * CM_GSTATUS_BITS; -+ lstat = statemap_getbits (cmRail->Levels[clvl].LocalMap, base, CM_GSTATUS_BITS); -+ gstat = statemap_getbits (cmRail->Levels[clvl].GlobalMap, base, CM_GSTATUS_BITS) & CM_GSTATUS_STATUS_MASK; -+ -+ /* Only connect to her if I see her as running and I'm not requesting her -+ * to restart - this means that I was offline when I saw her transition -+ * to running and haven't seen her in a "bad" state since. */ -+ if (gstat == CM_GSTATUS_RUNNING && ! (lstat & CM_GSTATUS_RESTART)) -+ { -+ CPRINTF5 (1, "%s: node %d lgstat %s gstat %s, lstat %s -> CONNECT\n", cmRail->Rail->Name, nodeId, -+ GlobalStatusString (cmRail->Levels[clvl].LastGlobalMap, thisClId), -+ GlobalStatusString (cmRail->Levels[clvl].GlobalMap, thisClId), -+ GlobalStatusString (cmRail->Levels[clvl].LocalMap, thisClId)); -+ -+ if (lstat == CM_GSTATUS_MAY_START) -+ statemap_setbits (cmRail->Levels[clvl].LocalMap, base, CM_GSTATUS_MAY_RUN, CM_GSTATUS_BITS); -+ -+ ep_connect_node (cmRail->Rail, nodeId); -+ -+ cmRail->Levels[clvl].Connected++; -+ } -+ } -+ } -+ goto all_done; -+ } -+ -+ all_done: -+ statemap_setmap (cmRail->Levels[clvl].LastGlobalMap, cmRail->Levels[clvl].GlobalMap); -+ } -+} -+ -+static void -+ReduceGlobalMap (CM_RAIL *cmRail, int clvl) -+{ -+ int lvl; -+ int sidx; -+ int recompute; -+ CM_LEVEL *level; -+ int cTopLevel; -+ int cRole; -+ -+ if (clvl < cmRail->TopLevel) -+ { -+ cTopLevel = clvl + 1; -+ cRole = CM_ROLE_LEADER; -+ } -+ else -+ { -+ cTopLevel = cmRail->TopLevel; -+ cRole = cmRail->Role; -+ } -+ -+ /* Update cmRail->Levels[*].SubordinateMap[clvl] for all subordinate levels */ -+ for (lvl = 0; lvl < cTopLevel; lvl++) -+ { -+ level = &cmRail->Levels[lvl]; -+ -+ /* We need to recompute this level's statemap if... -+ * . Previous level's statemap has changes to propagate OR -+ * . This level's statemap has not been computed yet OR -+ * . A subordinate at this level has sent me a change. -+ * Note that we can only do this if all subordinates from this -+ * level down are present with valid statemaps, or absent (i.e. not -+ * timing out). -+ */ -+ -+ ASSERT (lvl == 0 || cmRail->Levels[lvl - 1].SubordinateMapValid[clvl]); -+ -+ recompute = !level->SubordinateMapValid[clvl] || -+ (lvl > 0 && statemap_changed (cmRail->Levels[lvl - 1].SubordinateMap[clvl])); -+ -+ for (sidx = 0; sidx < level->NumSegs; sidx++) -+ { -+ CM_SGMT *sgmt = &level->Sgmts[sidx]; -+ -+ if (!(sgmt->State == CM_SGMT_ABSENT || /* absent nodes contribute zeros */ -+ (sgmt->State == CM_SGMT_PRESENT && /* present nodes MUST have received a map to contribute */ -+ sgmt->Maps[clvl].InputMapValid))) -+ { -+ CPRINTF5 (5, "%s: waiting for clvl %d lvl %d seg %d node %d\n", cmRail->Rail->Name, -+ clvl, lvl, sidx, sgmt->NodeId); -+ -+ /* Gotta wait for this guy, so we can't compute this level, -+ * or any higher levels. */ -+ return; -+ } -+ -+ if (statemap_changed (sgmt->Maps[clvl].InputMap)) -+ { -+ ASSERT (sgmt->Maps[clvl].InputMapValid); -+ -+ recompute = 1; -+ -+ CPRINTF7 (5, "%s: %s clvl %d map from @ %d %d (%d) - %s\n", -+ cmRail->Rail->Name, sgmt->State == CM_SGMT_ABSENT ? "newly absent" : "got new", -+ clvl, lvl, sidx, sgmt->NodeId, -+ MapString ("Input", sgmt->Maps[clvl].InputMap, cmRail->Levels[clvl].NumNodes, "")); -+ } -+ } -+ -+ if (recompute) -+ { -+ if (lvl == 0) -+ statemap_reset (cmRail->Levels[clvl].TmpMap); -+ else -+ { -+ ASSERT (cmRail->Levels[lvl - 1].SubordinateMapValid[clvl]); -+ -+ statemap_copy (cmRail->Levels[clvl].TmpMap, cmRail->Levels[lvl - 1].SubordinateMap[clvl]); -+ statemap_clearchanges (cmRail->Levels[lvl - 1].SubordinateMap[clvl]); -+ } -+ -+ for (sidx = 0; sidx < level->NumSegs; sidx++) -+ { -+ CM_SGMT *sgmt = &level->Sgmts[sidx]; -+ -+ if (sgmt->State != CM_SGMT_ABSENT) /* absent nodes contribute zeroes */ -+ { -+ ASSERT (sgmt->State == CM_SGMT_PRESENT); -+ ASSERT (sgmt->Maps[clvl].InputMapValid); -+ statemap_ormap (cmRail->Levels[clvl].TmpMap, sgmt->Maps[clvl].InputMap); -+ } -+ statemap_clearchanges (sgmt->Maps[clvl].InputMap); -+ } -+ -+ statemap_setmap (level->SubordinateMap[clvl], cmRail->Levels[clvl].TmpMap); -+ level->SubordinateMapValid[clvl] = 1; -+ -+ CPRINTF4 (5, "%s: recompute clvl %d level %d statemap - %s\n", cmRail->Rail->Name, clvl, lvl, -+ MapString ("level", level->SubordinateMap[clvl], cmRail->Levels[clvl].NumNodes, "")); -+ } -+ } -+ -+ if (cRole == CM_ROLE_LEADER_CANDIDATE) /* don't know this cluster's leader yet */ -+ return; -+ -+ ASSERT (cTopLevel == 0 || cmRail->Levels[cTopLevel - 1].SubordinateMapValid[clvl]); -+ -+ /* Update SubTreeMap */ -+ -+ if (!cmRail->Levels[clvl].SubTreeMapValid || -+ statemap_changed (cmRail->Levels[clvl].LocalMap) || -+ (cTopLevel > 0 && statemap_changed (cmRail->Levels[cTopLevel - 1].SubordinateMap[clvl]))) -+ { -+ statemap_copy (cmRail->Levels[clvl].TmpMap, cmRail->Levels[clvl].LocalMap); -+ statemap_clearchanges (cmRail->Levels[clvl].LocalMap); -+ -+ if (cTopLevel > 0) -+ { -+ statemap_ormap (cmRail->Levels[clvl].TmpMap, cmRail->Levels[cTopLevel - 1].SubordinateMap[clvl]); -+ statemap_clearchanges (cmRail->Levels[cTopLevel - 1].SubordinateMap[clvl]); -+ } -+ -+ statemap_setmap (cmRail->Levels[clvl].SubTreeMap, cmRail->Levels[clvl].TmpMap); -+ cmRail->Levels[clvl].SubTreeMapValid = 1; -+ -+ CPRINTF3 (5, "%s: recompute clvl %d subtree map - %s\n", cmRail->Rail->Name, clvl, -+ MapString ("subtree", cmRail->Levels[clvl].SubTreeMap, cmRail->Levels[clvl].NumNodes, "")); -+ } -+ -+ if (cRole == CM_ROLE_SUBORDINATE) /* got a leader (Not me) */ -+ { /* => send SubTreeMap to her */ -+ CM_SGMT *leader = &cmRail->Levels[cmRail->TopLevel].Sgmts[0]; -+ -+ ASSERT (leader->State == CM_SGMT_PRESENT); -+ ASSERT (cmRail->Levels[clvl].SubTreeMapValid); -+ -+ if (!leader->Maps[clvl].OutputMapValid || -+ statemap_changed (cmRail->Levels[clvl].SubTreeMap)) -+ { -+ statemap_setmap (leader->Maps[clvl].OutputMap, cmRail->Levels[clvl].SubTreeMap); -+ leader->Maps[clvl].OutputMapValid = 1; -+ -+ statemap_clearchanges (cmRail->Levels[clvl].SubTreeMap); -+ -+ CPRINTF3 (5, "%s: sending clvl %d subtree map to leader (%d)\n", cmRail->Rail->Name, clvl, leader->NodeId); -+ } -+ } -+} -+ -+void -+BroadcastGlobalMap (CM_RAIL *cmRail, int clvl) -+{ -+ int lvl; -+ int sidx; -+ CM_LEVEL *level; -+ CM_SGMT *leader; -+ int cTopLevel; -+ int cRole; -+ -+ if (clvl < cmRail->TopLevel) -+ { -+ cTopLevel = clvl + 1; -+ cRole = CM_ROLE_LEADER; -+ } -+ else -+ { -+ cTopLevel = cmRail->TopLevel; -+ cRole = cmRail->Role; -+ } -+ -+ switch (cRole) -+ { -+ default: -+ ASSERT (0); -+ -+ case CM_ROLE_LEADER_CANDIDATE: /* don't know this cluster's leader yet */ -+ return; -+ -+ case CM_ROLE_LEADER: /* cluster leader: */ -+ ASSERT (clvl < cmRail->TopLevel); /* set GlobalMap from SubTreeMap */ -+ -+ if (!cmRail->Levels[clvl].SubTreeMapValid) /* can't set global map */ -+ return; -+ -+ if (cmRail->Levels[clvl].GlobalMapValid && /* already set global map */ -+ !statemap_changed (cmRail->Levels[clvl].SubTreeMap)) /* no changes to propagate */ -+ return; -+ -+ statemap_setmap (cmRail->Levels[clvl].GlobalMap, cmRail->Levels[clvl].SubTreeMap); -+ cmRail->Levels[clvl].GlobalMapValid = 1; -+ statemap_clearchanges (cmRail->Levels[clvl].SubTreeMap); -+ -+ CPRINTF2 (5, "%s: whole cluster %d leader setting global map\n", cmRail->Rail->Name, clvl); -+ -+ UpdateGlobalStatus (cmRail); -+ break; -+ -+ case CM_ROLE_SUBORDINATE: /* cluster subordinate: */ -+ ASSERT (clvl >= cmRail->TopLevel); /* receive GlobalMap from leader */ -+ ASSERT (cmRail->TopLevel < cmRail->NumLevels); -+ -+ leader = &cmRail->Levels[cmRail->TopLevel].Sgmts[0]; -+ ASSERT (leader->State == CM_SGMT_PRESENT); -+ -+ if (!leader->Maps[clvl].InputMapValid) /* can't set global map */ -+ return; -+ -+ if (cmRail->Levels[clvl].GlobalMapValid && /* already set global map */ -+ !statemap_changed (leader->Maps[clvl].InputMap)) /* no changes to propagate */ -+ return; -+ -+ statemap_setmap (cmRail->Levels[clvl].GlobalMap, leader->Maps[clvl].InputMap); -+ cmRail->Levels[clvl].GlobalMapValid = 1; -+ statemap_clearchanges (leader->Maps[clvl].InputMap); -+ -+ CPRINTF3 (5, "%s: getting clvl %d global map from leader (%d)\n", cmRail->Rail->Name, clvl, leader->NodeId); -+ -+ UpdateGlobalStatus (cmRail); -+ break; -+ } -+ -+ CPRINTF3 (5, "%s: clvl %d %s\n", cmRail->Rail->Name, clvl, -+ MapString ("global", cmRail->Levels[clvl].GlobalMap, cmRail->Levels[clvl].NumNodes, "")); -+ -+ /* Broadcast global map to all subordinates */ -+ for (lvl = 0; lvl < cTopLevel; lvl++) -+ { -+ level = &cmRail->Levels[lvl]; -+ -+ for (sidx = 0; sidx < level->NumSegs; sidx++) -+ { -+ CM_SGMT *sgmt = &level->Sgmts[sidx]; -+ -+ if (sgmt->State == CM_SGMT_PRESENT) -+ { -+ statemap_setmap (sgmt->Maps[clvl].OutputMap, cmRail->Levels[clvl].GlobalMap); -+ sgmt->Maps[clvl].OutputMapValid = 1; -+ -+ CPRINTF5 (5, "%s: sending clvl %d global map to subordinate %d %d (%d)\n", -+ cmRail->Rail->Name, clvl, lvl, sidx, sgmt->NodeId); -+ } -+ } -+ } -+} -+ -+static void -+CheckPeerPulse (CM_RAIL *cmRail, CM_SGMT *sgmt) -+{ -+ int clvl, sendRejoin; -+ -+ switch (sgmt->State) -+ { -+ case CM_SGMT_ABSENT: -+ break; -+ -+ case CM_SGMT_WAITING: /* waiting for a subtree */ -+ if (!AFTER (lbolt, sgmt->UpdateTick + MSEC2TICKS(CM_DISCOVER_TIMEOUT))) -+ break; -+ -+ CPRINTF3 (2, "%s: lvl %d subtree %d contains no live nodes\n", cmRail->Rail->Name, -+ sgmt->Level, (int) (sgmt - &cmRail->Levels[sgmt->Level].Sgmts[0])); -+ -+ sgmt->State = CM_SGMT_ABSENT; -+ for (clvl = sgmt->Level; clvl < cmRail->NumLevels; clvl++) -+ { -+ statemap_zero (sgmt->Maps[clvl].InputMap); /* need to start propagating zeros (flags change) */ -+ sgmt->Maps[clvl].InputMapValid = 1; /* and must indicate that the map is now valid */ -+ } -+ break; -+ -+ case CM_SGMT_COMING: /* lost/waiting subtree sent me IMCOMING */ -+ ASSERT (sgmt->Level > 0); /* we only do subtree discovery below our own level */ -+ -+ if (AFTER (lbolt, sgmt->WaitingTick + MSEC2TICKS(CM_WAITING_TIMEOUT))) -+ { -+ CPRINTF3 (1, "%s: lvl %d subtree %d waiting too long\n", cmRail->Rail->Name, -+ sgmt->Level, (int) (sgmt - &cmRail->Levels[sgmt->Level].Sgmts[0])); -+ printk ("%s: lvl %d subtree %d waiting too long\n", cmRail->Rail->Name, -+ sgmt->Level, (int) (sgmt - &cmRail->Levels[sgmt->Level].Sgmts[0])); -+ -+ sgmt->State = CM_SGMT_ABSENT; -+ for (clvl = sgmt->Level; clvl < cmRail->NumLevels; clvl++) -+ { -+ statemap_zero (sgmt->Maps[clvl].InputMap); /* need to start propagating zeros (flags change) */ -+ sgmt->Maps[clvl].InputMapValid = 1; /* and must indicate that the map is now valid */ -+ } -+ break; -+ } -+ -+ if (!AFTER (lbolt, sgmt->UpdateTick + MSEC2TICKS(CM_DISCOVER_TIMEOUT))) -+ break; -+ -+ CPRINTF3 (2, "%s: lvl %d subtree %d hasn't connected yet\n", cmRail->Rail->Name, -+ sgmt->Level, (int) (sgmt - &cmRail->Levels[sgmt->Level].Sgmts[0])); -+ -+ sgmt->State = CM_SGMT_WAITING; -+ sgmt->UpdateTick = lbolt; -+ -+ if (sgmt->Level > 0) -+ __Schedule_Discovery (cmRail); -+ break; -+ -+ case CM_SGMT_PRESENT: -+ if (!AFTER (lbolt, sgmt->UpdateTick + MSEC2TICKS(CM_HEARTBEAT_TIMEOUT))) -+ break; -+ -+ if (sgmt->Level == cmRail->TopLevel) /* leader died */ -+ { -+ sendRejoin = (sgmt->State == CM_SGMT_PRESENT && sgmt->AckSeq == 0); -+ -+ CPRINTF4 (1, "%s: leader (%d) node %d JUST DIED%s\n", -+ cmRail->Rail->Name, sgmt->Level, sgmt->NodeId, -+ sendRejoin ? ": !REJOIN" : ""); -+ -+ printk ("%s: lvl %d leader (%d) JUST DIED%s\n", -+ cmRail->Rail->Name, sgmt->Level, sgmt->NodeId, -+ sendRejoin ? ": !REJOIN" : ""); -+ -+ if (sendRejoin) -+ { -+ /* she's not sent us any heartbeats even though she responded to a discover -+ * so tell her to rejoin the tree at the bottom, this will mean that she -+ * has to run the heartbeat timer before being able to rejoin the tree. */ -+ SendMessage (cmRail, sgmt->NodeId, sgmt->Level, CM_MSG_TYPE_REJOIN); -+ } -+ -+ StartLeaderDiscovery (cmRail); -+ break; -+ } -+ -+ sendRejoin = (sgmt->State == CM_SGMT_PRESENT && sgmt->AckSeq == 0); -+ -+ CPRINTF5 (2, "%s: lvl %d subordinate %d (%d) JUST DIED%s\n", cmRail->Rail->Name, -+ sgmt->Level, (int) (sgmt - &cmRail->Levels[sgmt->Level].Sgmts[0]), sgmt->NodeId, -+ sendRejoin ? ": !REJOIN" : ""); -+ printk ("%s: lvl %d subordinate %d (%d) JUST DIED%s\n", cmRail->Rail->Name, -+ sgmt->Level, (int) (sgmt - &cmRail->Levels[sgmt->Level].Sgmts[0]), sgmt->NodeId, -+ sendRejoin ? ": !REJOIN" : ""); -+ -+ if (sendRejoin) -+ { -+ /* she's not sent us any heartbeats even though she responded to a discover -+ * so tell her to rejoin the tree at the bottom, this will mean that she -+ * has to run the heartbeat timer before being able to rejoin the tree. */ -+ SendMessage (cmRail, sgmt->NodeId, sgmt->Level, CM_MSG_TYPE_REJOIN); -+ } -+ -+ StartSubTreeDiscovery (cmRail, sgmt); -+ break; -+ -+ default: -+ ASSERT (0); -+ } -+} -+ -+static void -+CheckPeerPulses (CM_RAIL *cmRail) -+{ -+ int lvl; -+ int sidx; -+ -+ /* check children are alive */ -+ for (lvl = 0; lvl < cmRail->TopLevel; lvl++) -+ for (sidx = 0; sidx < cmRail->Levels[lvl].NumSegs; sidx++) -+ CheckPeerPulse (cmRail, &cmRail->Levels[lvl].Sgmts[sidx]); -+ -+ /* check leader is alive */ -+ if (cmRail->Role == CM_ROLE_SUBORDINATE) -+ { -+ ASSERT (cmRail->TopLevel < cmRail->NumLevels); -+ ASSERT (cmRail->Levels[cmRail->TopLevel].Sgmts[0].State == CM_SGMT_PRESENT); -+ -+ CheckPeerPulse (cmRail, &cmRail->Levels[cmRail->TopLevel].Sgmts[0]); -+ } -+} -+ -+static void -+SendHeartbeats (CM_RAIL *cmRail) -+{ -+ int lvl; -+ -+ /* Send heartbeats to my children */ -+ for (lvl = 0; lvl < cmRail->TopLevel; lvl++) -+ { -+ CM_LEVEL *level = &cmRail->Levels[lvl]; -+ int sidx; -+ -+ for (sidx = 0; sidx < level->NumSegs; sidx++) -+ { -+ CM_SGMT *sgmt = &cmRail->Levels[lvl].Sgmts[sidx]; -+ -+ if (sgmt->State == CM_SGMT_PRESENT) -+ SendToSgmt (cmRail, sgmt, CM_MSG_TYPE_HEARTBEAT); -+ } -+ } -+ -+ /* Send heartbeat to my leader */ -+ if (cmRail->Role == CM_ROLE_SUBORDINATE) -+ { -+ ASSERT (cmRail->TopLevel < cmRail->NumLevels); -+ SendToSgmt (cmRail, &cmRail->Levels[cmRail->TopLevel].Sgmts[0], CM_MSG_TYPE_HEARTBEAT); -+ } -+} -+ -+static int -+BroadcastDiscover (CM_RAIL *cmRail) -+{ -+ int sidx; -+ int lvl; -+ int msgType; -+ CM_LEVEL *level; -+ int urgent; -+ -+ ASSERT (cmRail->TopLevel <= cmRail->NumLevels); -+ ASSERT ((cmRail->Role == CM_ROLE_LEADER) ? (cmRail->TopLevel == cmRail->NumLevels) : -+ (cmRail->Role == CM_ROLE_SUBORDINATE) ? (cmRail->Levels[cmRail->TopLevel].Sgmts[0].State == CM_SGMT_PRESENT) : -+ (cmRail->Role == CM_ROLE_LEADER_CANDIDATE)); -+ -+ if (cmRail->Role != CM_ROLE_LEADER_CANDIDATE) /* got a leader/lead whole machine */ -+ { -+ urgent = 0; /* non-urgent leader discovery */ -+ lvl = cmRail->TopLevel - 1; /* on nodes I lead (resolves leader conflicts) */ -+ msgType = CM_MSG_TYPE_RESOLVE_LEADER; -+ } -+ else -+ { -+ urgent = 1; /* urgent leader discovery */ -+ lvl = cmRail->TopLevel; /* on nodes I'd like to lead */ -+ msgType = CM_MSG_TYPE_DISCOVER_LEADER; -+ } -+ -+ if (lvl >= 0) -+ { -+ if (lvl > cmRail->BroadcastLevel) -+ { -+ /* Unable to broadcast at this level in the spanning tree, so we -+ * just continue doing discovery until we are able to broadcast */ -+ CPRINTF4 (6, "%s: broadcast level %d too low to discover %d at level %d\n", -+ cmRail->Rail->Name, cmRail->BroadcastLevel, msgType, lvl); -+ -+ cmRail->DiscoverStartTick = lbolt; -+ } -+ else -+ { -+ level = &cmRail->Levels[lvl]; -+ SendToSgmt (cmRail, &level->Sgmts[level->MySgmt], msgType); -+ } -+ } -+ -+ while (lvl > 0) -+ { -+ level = &cmRail->Levels[lvl]; -+ -+ for (sidx = 0; sidx < level->NumSegs; sidx++) -+ { -+ CM_SGMT *sgmt = &level->Sgmts[sidx]; -+ -+ if (sgmt->State == CM_SGMT_WAITING) -+ { -+ ASSERT (sidx != level->MySgmt); -+ /* Do subordinate discovery. Existing subordinates will -+ * ignore it, but leader candidates will send IMCOMING. -+ * This is always urgent since we'll assume a subtree is -+ * absent if I don't get IMCOMING within the timeout. -+ */ -+ SendToSgmt (cmRail, sgmt, CM_MSG_TYPE_DISCOVER_SUBORDINATE); -+ urgent = 1; -+ } -+ } -+ lvl--; -+ } -+ -+ return (urgent); -+} -+ -+static void -+CheckBroadcast (CM_RAIL *cmRail) -+{ -+ int clvl; -+ -+ for (clvl = cmRail->NumLevels-1; clvl >= 0 && cmRail->Rail->SwitchBroadcastLevel < cmRail->Levels[clvl].SwitchLevel; clvl--) -+ ; -+ -+ if (cmRail->OfflineReasons || cmRail->Rail->System->Shutdown) -+ clvl = -1; -+ -+ /* if the level at which we can broadcast drops, then we must rejoin the -+ * spanning tree at the highest level for which broadcast is good. */ -+ if (cmRail->BroadcastLevel > clvl && clvl < (int)(cmRail->Role == CM_ROLE_LEADER ? cmRail->TopLevel - 1 : cmRail->TopLevel)) -+ { -+ printk ("%s: REJOINING at level %d because %s\n", cmRail->Rail->Name, clvl+1, -+ (cmRail->OfflineReasons & CM_OFFLINE_MANAGER) ? "of manager thread" : -+ (cmRail->OfflineReasons & CM_OFFLINE_PROCFS) ? "force offline" : -+ cmRail->Rail->System->Shutdown ? "system shutdown" : "broadcast level changed"); -+ LowerTopLevel (cmRail, clvl+1); -+ } -+ -+ if (cmRail->BroadcastLevel != clvl) -+ { -+ cmRail->BroadcastLevel = clvl; -+ cmRail->BroadcastLevelTick = lbolt; -+ } -+ -+ /* schedule the update thread, to withdraw from comms with -+ * nodes "outside" of the valid broadcastable range. */ -+ for (clvl = 0; clvl < cmRail->NumLevels; clvl++) -+ { -+ if (cmRail->BroadcastLevel < clvl) -+ { -+ if (AFTER (lbolt, cmRail->BroadcastLevelTick + EP_WITHDRAW_TIMEOUT) && -+ !(cmRail->Levels[clvl].OfflineReasons & CM_OFFLINE_BROADCAST)) -+ { -+ printk ("%s: Withdraw at Level %d\n", cmRail->Rail->Name, clvl); -+ cmRail->Levels[clvl].OfflineReasons |= CM_OFFLINE_BROADCAST; -+ } -+ } -+ else -+ { -+ if (cmRail->Levels[clvl].OfflineReasons & CM_OFFLINE_BROADCAST) -+ { -+ printk ("%s: Rejoin at Level %d\n", cmRail->Rail->Name, clvl); -+ cmRail->Levels[clvl].OfflineReasons &= ~CM_OFFLINE_BROADCAST; -+ } -+ } -+ } -+ -+} -+ -+static void -+CheckManager (CM_RAIL *cmRail) -+{ -+ long time, state = ep_kthread_state (&cmRail->Rail->System->ManagerThread, &time); -+ -+ if (state == KT_STATE_RUNNING && BEFORE (lbolt, time + MSEC2TICKS(CM_THREAD_RUNNING_TIMEOUT))) -+ state = KT_STATE_SLEEPING; -+ if (state != KT_STATE_SLEEPING && BEFORE (lbolt, time + MSEC2TICKS(CM_THREAD_SCHEDULE_TIMEOUT))) -+ state = KT_STATE_SLEEPING; -+ -+ if ((cmRail->OfflineReasons & CM_OFFLINE_MANAGER) && state == KT_STATE_SLEEPING) -+ { -+ printk ("%s: manager thread unstuck\n", cmRail->Rail->Name); -+ -+ cmRail->OfflineReasons &= ~CM_OFFLINE_MANAGER; -+ } -+ -+ if (!(cmRail->OfflineReasons & CM_OFFLINE_MANAGER) && state != KT_STATE_SLEEPING) -+ { -+ printk ("%s: manager thread stuck - %s\n", cmRail->Rail->Name, -+ state == KT_STATE_SCHEDULED ? "scheduled" : -+ state == KT_STATE_RUNNING ? "running" : -+ state == KT_STATE_STALLED ? "stalled" : "unknown"); -+ -+ cmRail->OfflineReasons |= CM_OFFLINE_MANAGER; -+ } -+} -+ -+static void -+CheckOfflineReasons (CM_RAIL *cmRail, int clvl) -+{ -+ int subClMin, subClMax, myClId; -+ char clNodeStr[32]; /* [%d-%d][%d-%d] */ -+ -+ if (cmRail->Levels[clvl].OfflineReasons) -+ { -+ if (cmRail->Levels[clvl].Online) -+ { -+ printk ("%s: Withdraw from %s\n", cmRail->Rail->Name, sprintClPeers (clNodeStr, cmRail, clvl)); -+ -+ RestartComms (cmRail, clvl); -+ } -+ } -+ else -+ { -+ if (cmRail->Levels[clvl].Restarting && cmRail->Levels[clvl].Connected == 0) -+ { -+ printk ("%s: Rejoin with %s\n", cmRail->Rail->Name, sprintClPeers (clNodeStr, cmRail, clvl)); -+ -+ myClId = ClusterIds (cmRail, clvl, &subClMin, &subClMax); -+ -+ ASSERT (statemap_getbits (cmRail->Levels[clvl].LocalMap, myClId * CM_GSTATUS_BITS, CM_GSTATUS_BITS) == -+ (CM_GSTATUS_CLOSING | CM_GSTATUS_MAY_START | CM_GSTATUS_RESTART)); -+ -+ statemap_setbits (cmRail->Levels[clvl].LocalMap, myClId * CM_GSTATUS_BITS, -+ CM_GSTATUS_CLOSING | CM_GSTATUS_MAY_START, CM_GSTATUS_BITS); -+ -+ cmRail->Levels[clvl].Restarting = 0; -+ } -+ } -+} -+ -+void -+DoHeartbeatWork (CM_RAIL *cmRail) -+{ -+ long now = lbolt; -+ int clvl; -+ -+ if ((RejoinCheck || RejoinPanic) && -+ AFTER (now, cmRail->NextRunTime + MSEC2TICKS (CM_TIMER_SCHEDULE_TIMEOUT))) /* If I've been unresponsive for too long */ -+ { -+ /* I'd better reconnect to the network because I've not been playing the game */ -+ CPRINTF4 (1, "%s: REJOINING because I was too slow (heartbeat) [%ld,%ld,(%ld)]\n", cmRail->Rail->Name, now, cmRail->NextRunTime, (long int)MSEC2TICKS (CM_TIMER_SCHEDULE_TIMEOUT)); -+ printk ("%s: REJOINING because I was too slow (heartbeat) [%ld,%ld,(%ld)]\n", cmRail->Rail->Name, now, cmRail->NextRunTime, (long int)MSEC2TICKS (CM_TIMER_SCHEDULE_TIMEOUT)); -+ -+ LowerTopLevel (cmRail, 0); -+ -+ IncrStat (cmRail, RejoinTooSlow); -+ -+ if (RejoinPanic) -+ panic ("ep: REJOINING because I was too slow (heartbeat)\n"); -+ } -+ -+ PollInputQueues (cmRail); -+ -+ if (! BEFORE (now, cmRail->NextDiscoverTime)) -+ { -+ if (BroadcastDiscover (cmRail)) /* urgent discovery required? */ -+ cmRail->NextDiscoverTime = now + MSEC2TICKS (CM_URGENT_DISCOVER_INTERVAL); -+ else -+ cmRail->NextDiscoverTime = now + MSEC2TICKS (CM_PERIODIC_DISCOVER_INTERVAL); -+ -+ if (cmRail->Role == CM_ROLE_LEADER_CANDIDATE && AFTER (now, cmRail->DiscoverStartTick + MSEC2TICKS (CM_DISCOVER_TIMEOUT))) -+ RaiseTopLevel (cmRail); -+ } -+ -+ if (! BEFORE (now, cmRail->NextHeartbeatTime)) -+ { -+ CheckPosition (cmRail->Rail); -+ CheckPeerPulses (cmRail); -+ CheckBroadcast (cmRail); -+ CheckManager (cmRail); -+ -+ for (clvl = 0; clvl < cmRail->NumLevels; clvl++) -+ { -+ CheckOfflineReasons (cmRail, clvl); -+ ReduceGlobalMap (cmRail, clvl); -+ BroadcastGlobalMap (cmRail, clvl); -+ } -+ -+ SendHeartbeats (cmRail); -+ -+ /* Compute the next heartbeat time, but "drift" it towards the last -+ * periodic discovery time we saw from the whole machine leader */ -+ cmRail->NextHeartbeatTime = now + MSEC2TICKS (CM_HEARTBEAT_INTERVAL); -+ } -+ -+ if (AFTER (cmRail->NextHeartbeatTime, cmRail->NextDiscoverTime)) -+ cmRail->NextRunTime = cmRail->NextDiscoverTime; -+ else -+ cmRail->NextRunTime = cmRail->NextHeartbeatTime; -+} -+ -+#define CM_SVC_INDICATOR_OFFSET(CMRAIL,CLVL,IND,NODEID) ( ( CMRAIL->Levels[CLVL].NumNodes * CM_GSTATUS_BITS ) \ -+ + ( CMRAIL->Levels[CLVL].NumNodes * IND ) \ -+ + ( NODEID - CMRAIL->Levels[CLVL].MinNodeId ) ) -+int -+cm_svc_indicator_set (EP_RAIL *rail, int svc_indicator) -+{ -+ CM_RAIL *cmRail = rail->ClusterRail; -+ unsigned long flags; -+ int clvl; -+ -+ EPRINTF2 (DBG_SVC,"cm_svc_indicator_set: rail %p ind %d\n", rail, svc_indicator); -+ -+ if (svc_indicator < 0 || svc_indicator >= EP_SVC_NUM_INDICATORS) -+ { -+ EPRINTF1 (DBG_SVC,"cm_svc_indicator_set: service indicator %d not registered\n", svc_indicator); -+ return (-1); -+ } -+ -+ if (rail->State == EP_RAIL_STATE_UNINITIALISED) -+ return (-2); -+ -+ spin_lock_irqsave (&cmRail->Lock, flags); -+ for (clvl = 0; clvl < cmRail->NumLevels; clvl++) { -+ statemap_setbits (cmRail->Levels[clvl].LocalMap, CM_SVC_INDICATOR_OFFSET (cmRail, clvl, svc_indicator, cmRail->NodeId), 1, 1); -+ EPRINTF3 (DBG_SVC,"cm_svc_indicator_set: clvl %d nodeId %d offset %d\n", clvl, cmRail->NodeId, CM_SVC_INDICATOR_OFFSET (cmRail, clvl, svc_indicator, cmRail->NodeId)); -+ } -+ spin_unlock_irqrestore (&cmRail->Lock, flags); -+ -+ return (0); -+} -+ -+int -+cm_svc_indicator_clear (EP_RAIL *rail, int svc_indicator) -+{ -+ CM_RAIL *cmRail = rail->ClusterRail; -+ unsigned long flags; -+ int clvl; -+ -+ EPRINTF2 (DBG_SVC, "cm_svc_indicator_clear: rail %p ind %d\n", rail, svc_indicator); -+ -+ if (svc_indicator < 0 || svc_indicator >= EP_SVC_NUM_INDICATORS) -+ { -+ EPRINTF1 (DBG_SVC, "cm_svc_indicator_clear: service indicator %d not registered\n", svc_indicator); -+ return (-1); -+ } -+ -+ if (rail->State == EP_RAIL_STATE_UNINITIALISED) -+ return (-2); -+ -+ spin_lock_irqsave (&cmRail->Lock, flags); -+ for (clvl = 0; clvl < cmRail->NumLevels; clvl++) { -+ statemap_setbits (cmRail->Levels[clvl].LocalMap, CM_SVC_INDICATOR_OFFSET (cmRail, clvl, svc_indicator, cmRail->NodeId), 0, 1); -+ EPRINTF3 (DBG_SVC, "cm_svc_indicator_clear: clvl %d nodeId %d offset %d\n", clvl, cmRail->NodeId, CM_SVC_INDICATOR_OFFSET (cmRail, clvl, svc_indicator, cmRail->NodeId)); -+ } -+ spin_unlock_irqrestore (&cmRail->Lock, flags); -+ -+ return (0); -+} -+ -+int -+cm_svc_indicator_is_set (EP_RAIL *rail, int svc_indicator, int nodeId) -+{ -+ CM_RAIL *cmRail = rail->ClusterRail; -+ unsigned long flags; -+ int clvl; -+ bitmap_t bits; -+ -+ EPRINTF4 (DBG_SVC, "cm_svc_indicator_is_set: rail %p ind %d nodeId %d (me=%d)\n", rail, svc_indicator, nodeId, cmRail->NodeId); -+ -+ if (svc_indicator < 0 || svc_indicator > EP_SVC_NUM_INDICATORS) -+ { -+ EPRINTF1 (DBG_SVC, "cm_svc_indicator_is_set: service indicator %d not registered\n", svc_indicator); -+ return (0); -+ } -+ -+ if (rail->State == EP_RAIL_STATE_UNINITIALISED) -+ return (0); -+ -+ spin_lock_irqsave (&cmRail->Lock, flags); -+ for (clvl = 0; clvl < cmRail->NumLevels; clvl++) -+ if (nodeId >= cmRail->Levels[clvl].MinNodeId && nodeId < (cmRail->Levels[clvl].MinNodeId + cmRail->Levels[clvl].NumNodes)) -+ break; -+ -+ if ( clvl == cmRail->NumLevels) { -+ EPRINTF1 (DBG_SVC, "cm_svc_indicator_is_set: node out of range %d \n", nodeId); -+ spin_unlock_irqrestore (&cmRail->Lock, flags); -+ return (0); -+ } -+ -+ if ( cmRail->NodeId == nodeId ) -+ bits = statemap_getbits (cmRail->Levels[clvl].LocalMap, CM_SVC_INDICATOR_OFFSET (cmRail, clvl, svc_indicator, nodeId), 1); -+ else -+ bits = statemap_getbits (cmRail->Levels[clvl].GlobalMap, CM_SVC_INDICATOR_OFFSET (cmRail, clvl, svc_indicator, nodeId), 1); -+ -+ EPRINTF4 (DBG_SVC, "cm_svc_indicator_is_set: clvl %d nodeId %d offset %d %x\n", clvl, nodeId, CM_SVC_INDICATOR_OFFSET (cmRail, clvl, svc_indicator, nodeId), bits); -+ -+ spin_unlock_irqrestore (&cmRail->Lock, flags); -+ -+ return ( (bits == 0) ? (0) : (1) ); -+} -+ -+int -+cm_svc_indicator_bitmap (EP_RAIL *rail, int svc_indicator, bitmap_t * bitmap, int low, int nnodes) -+{ -+ /* or in the bit map */ -+ CM_RAIL *cmRail = rail->ClusterRail; -+ int nodeId, clvl; -+ bitmap_t bits; -+ unsigned long flags; -+ int clip_out_low, clip_out_high; -+ int curr_low, curr_high; -+ int check_low, check_high; -+ -+ EPRINTF4 (DBG_SVC, "cm_svc_indicator_bitmap: rail %p ind %d low %d high %d\n", rail, svc_indicator, low, (low + nnodes)); -+ -+ if (svc_indicator < 0 || svc_indicator >= EP_SVC_NUM_INDICATORS) -+ { -+ EPRINTF1 (DBG_SVC, "cm_svc_indicator_bitmap: service indicator %d not registered\n", svc_indicator); -+ return (-1); -+ } -+ -+ if (rail->State != EP_RAIL_STATE_RUNNING) -+ return (-2); -+ -+ spin_lock_irqsave (&cmRail->Lock, flags); -+ -+ clip_out_low = clip_out_high = -1; /* all in */ -+ for (clvl = 0; clvl < cmRail->NumLevels; clvl++) { -+ -+ /* curr_high/low is the range of the current lvl */ -+ curr_low = cmRail->Levels[clvl].MinNodeId; -+ curr_high = cmRail->Levels[clvl].MinNodeId + cmRail->Levels[clvl].NumNodes; -+ -+ /* find out how much of low high is in this range and only check that part */ -+ check_low = ( low < curr_low) ? curr_low : low; -+ check_high = ( (low + nnodes) > curr_high) ? curr_high : (low + nnodes); -+ -+ EPRINTF6 (DBG_SVC, "cm_svc_indicator_bitmap: curr(%d,%d) check(%d,%d) clip(%d,%d)\n", curr_low, curr_high, check_low, check_high, clip_out_low, clip_out_high); -+ -+ for(nodeId = check_low; nodeId < check_high; nodeId++) { -+ -+ if ( (clip_out_low <= nodeId) && (nodeId <= clip_out_high)) -+ nodeId = clip_out_high; /* step over the cliped out section */ -+ else { -+ -+ if ( cmRail->NodeId == nodeId ) -+ bits = statemap_getbits (cmRail->Levels[clvl].LocalMap, CM_SVC_INDICATOR_OFFSET (cmRail, clvl, svc_indicator, nodeId), 1); -+ else -+ bits = statemap_getbits (cmRail->Levels[clvl].GlobalMap, CM_SVC_INDICATOR_OFFSET (cmRail, clvl, svc_indicator, nodeId), 1); -+ -+ if ( bits ) { -+ EPRINTF2 (DBG_SVC, "cm_svc_indicator_bitmap: its set nodeId %d (clvl %d)\n", nodeId, clvl); -+ BT_SET ( bitmap , nodeId - low ); -+ } -+ } -+ } -+ -+ /* widen the clip out range */ -+ clip_out_low = curr_low; -+ clip_out_high = curr_high -1; -+ } -+ -+ spin_unlock_irqrestore (&cmRail->Lock, flags); -+ -+ return (0); -+} -+ -+ -+static void -+cm_heartbeat_timer (unsigned long arg) -+{ -+ CM_RAIL *cmRail = (CM_RAIL *) arg; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&cmRail->Lock, flags); -+ -+ ASSERT (cmRail->Rail->State == EP_RAIL_STATE_RUNNING); -+ -+ DoHeartbeatWork (cmRail); -+ -+ __Schedule_Timer (cmRail, cmRail->NextRunTime); -+ -+ spin_unlock_irqrestore (&cmRail->Lock, flags); -+} -+ -+void -+DisplayRailDo (DisplayInfo *di, EP_RAIL *rail) -+{ -+ CM_RAIL *cmRail = rail->ClusterRail; -+ unsigned long flags; -+ int i, j; -+ -+ if (rail->State != EP_RAIL_STATE_RUNNING) -+ return; -+ -+ spin_lock_irqsave (&cmRail->Lock, flags); -+ -+ (di->func)(di->arg, "NodeId=%d NodeLevel=%d NumLevels=%d NumNodes=%d\n", -+ cmRail->NodeId, cmRail->TopLevel, cmRail->NumLevels, cmRail->Rail->Position.pos_nodes); -+ -+ (di->func)(di->arg, "["); -+ -+ for (i = 0; i < cmRail->NumLevels; i++) -+ { -+ if (i > 0) -+ (di->func)(di->arg, ","); -+ -+ if (i < cmRail->TopLevel) -+ { -+ (di->func)(di->arg, "L "); -+ -+ for (j = 0; j < cmRail->Levels[i].NumSegs; j++) -+ switch (cmRail->Levels[i].Sgmts[j].State) -+ { -+ case CM_SGMT_PRESENT: (di->func)(di->arg, "p%-4d", cmRail->Levels[i].Sgmts[j].NodeId); break; -+ case CM_SGMT_WAITING: (di->func)(di->arg, "w%4s", ""); break; -+ case CM_SGMT_COMING: (di->func)(di->arg, "c%4s", ""); break; -+ case CM_SGMT_ABSENT: (di->func)(di->arg, ".%4s", ""); break; -+ default: (di->func)(di->arg, "?%4s", ""); break; -+ } -+ } -+ else -+ switch (cmRail->Role) -+ { -+ case CM_ROLE_LEADER_CANDIDATE: -+ (di->func)(di->arg,"l "); -+ for (j = 0; j < cmRail->Levels[i].NumSegs; j++) -+ (di->func)(di->arg," "); -+ break; -+ -+ case CM_ROLE_SUBORDINATE: -+ switch (cmRail->Levels[i].Sgmts[0].State) -+ { -+ case CM_SGMT_PRESENT: (di->func)(di->arg, "p%-4d", cmRail->Levels[i].Sgmts[0].NodeId); break; -+ case CM_SGMT_WAITING: (di->func)(di->arg, "w%4s", ""); break; -+ case CM_SGMT_COMING: (di->func)(di->arg, "c%4s", ""); break; -+ case CM_SGMT_ABSENT: (di->func)(di->arg, ".%4s", ""); break; -+ default: (di->func)(di->arg, "?%4s", ""); break; -+ } -+ for (j = 1; j < cmRail->Levels[i].NumSegs; j++) -+ (di->func)(di->arg, " "); -+ break; -+ -+ default: -+ (di->func)(di->arg, "####"); -+ break; -+ } -+ } -+ (di->func)(di->arg, "]\n"); -+ -+ spin_unlock_irqrestore (&cmRail->Lock, flags); -+} -+ -+void -+DisplayRail (EP_RAIL *rail) -+{ -+ if (rail->State == EP_RAIL_STATE_RUNNING) -+ DisplayRailDo (&di_ep_debug, rail); -+} -+ -+void -+DisplayStatus (EP_RAIL *rail) -+{ -+ if (rail->State == EP_RAIL_STATE_RUNNING) -+ { -+ CM_RAIL *cmRail = rail->ClusterRail; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&cmRail->Lock, flags); -+ -+ DisplayNodeMaps (&di_ep_debug, cmRail); -+ -+ spin_unlock_irqrestore (&cmRail->Lock, flags); -+ } -+} -+ -+void -+DisplaySegs (EP_RAIL *rail) -+{ -+ if (rail->State == EP_RAIL_STATE_RUNNING) -+ { -+ CM_RAIL *cmRail = rail->ClusterRail; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&cmRail->Lock, flags); -+ -+ DisplayNodeSgmts (&di_ep_debug, cmRail); -+ -+ spin_unlock_irqrestore (&cmRail->Lock, flags); -+ } -+} -+ -+static void -+LoadBroadcastRoute (CM_RAIL *cmRail, int lvl, int sidx) -+{ -+ EP_RAIL *rail = cmRail->Rail; -+ int nsegs = cmRail->Levels[0].NumSegs; -+ int vp = EP_VP_BCAST(lvl, sidx); -+ int nodes = 1; -+ int baseNode; -+ int i; -+ -+ ASSERT (lvl > 0 && lvl <= cmRail->NumLevels); -+ ASSERT (sidx == 0 || lvl < cmRail->NumLevels); -+ -+ ASSERT (vp >= EP_VP_BCAST_BASE && vp < EP_VP_BCAST_BASE + EP_VP_BCAST_COUNT); -+ -+ for (i = 1; i <= lvl; i++) -+ { -+ nodes *= nsegs; -+ nsegs = (i == cmRail->NumLevels) ? 1 : cmRail->Levels[i].NumSegs; -+ } -+ -+ baseNode = ((cmRail->NodeId / (nodes * nsegs)) * nsegs + sidx) * nodes; -+ -+ CPRINTF5 (2, "%s: broadcast vp lvl %d sidx %d [%d,%d]\n", -+ cmRail->Rail->Name, lvl, sidx, baseNode, baseNode + nodes - 1); -+ -+ rail->Operations.LoadSystemRoute (rail, vp, baseNode, baseNode + nodes - 1); -+} -+ -+static void -+LoadRouteTable (CM_RAIL *cmRail) -+{ -+ EP_RAIL *rail = cmRail->Rail; -+ int i, j; -+ -+ if (cmRail->NumNodes > EP_MAX_NODES) -+ { -+ printk ("More nodes (%d) than point-to-point virtual process table entries (%d)\n", cmRail->NumNodes, EP_MAX_NODES); -+ panic ("LoadRouteTable\n"); -+ } -+ -+ for (i = 0; i < cmRail->NumNodes; i++) -+ rail->Operations.LoadSystemRoute (rail, EP_VP_NODE(i), i, i); -+ -+ /* Generate broadcast routes for subtrees */ -+ for (i = 1; i < cmRail->NumLevels; i++) -+ for (j = 0; j < cmRail->Levels[i].NumSegs; j++) -+ LoadBroadcastRoute (cmRail, i, j); -+ -+ /* Generate broadcast route for whole machine */ -+ LoadBroadcastRoute (cmRail, cmRail->NumLevels, 0); -+ -+ /* Finally invalidate all the data routes */ -+ for (i = 0; i < cmRail->NumNodes; i++) -+ rail->Operations.UnloadNodeRoute (cmRail->Rail, i); -+} -+ -+void -+cm_node_disconnected (EP_RAIL *rail, unsigned nodeId) -+{ -+ CM_RAIL *cmRail = rail->ClusterRail; -+ int base, lstat, lgstat; -+ int clvl, subClMin, subClMax; -+ int thisClId, myClId; -+ unsigned long flags; -+ -+ ASSERT (nodeId != cmRail->NodeId); -+ -+ spin_lock_irqsave (&cmRail->Lock, flags); -+ for (clvl = 0; clvl < cmRail->NumLevels; clvl++) -+ if (nodeId >= cmRail->Levels[clvl].MinNodeId && nodeId < (cmRail->Levels[clvl].MinNodeId + cmRail->Levels[clvl].NumNodes)) -+ break; -+ -+ myClId = ClusterIds (cmRail, clvl, &subClMin, &subClMax); -+ thisClId = nodeId - cmRail->Levels[clvl].MinNodeId; -+ base = thisClId * CM_GSTATUS_BITS; -+ lstat = statemap_getbits (cmRail->Levels[clvl].LocalMap, base, CM_GSTATUS_BITS); -+ lgstat = statemap_getbits (cmRail->Levels[clvl].LastGlobalMap, base, CM_GSTATUS_BITS) & CM_GSTATUS_STATUS_MASK; -+ -+ ASSERT ((lstat & CM_GSTATUS_ACK_MASK) == CM_GSTATUS_MAY_RUN); -+ -+ CPRINTF7 (2, "%s: cm_node_disconnected: Node %d: clvl %d, lgstat %s, gstat %s, lstat %s -> %sMAY_START\n", -+ cmRail->Rail->Name, nodeId, clvl, -+ GlobalStatusString (cmRail->Levels[clvl].LastGlobalMap, thisClId), -+ GlobalStatusString (cmRail->Levels[clvl].GlobalMap, thisClId), -+ GlobalStatusString (cmRail->Levels[clvl].LocalMap, thisClId), -+ ((lgstat != CM_GSTATUS_CLOSING) && (lstat & CM_GSTATUS_RESTART)) ? "RESTART|" : ""); -+ -+ switch (lgstat) -+ { -+ case CM_GSTATUS_CLOSING: -+ /* delayed ack of closing - set MAY_START and clear RESTART */ -+ statemap_setbits (cmRail->Levels[clvl].LocalMap, base, CM_GSTATUS_MAY_START, CM_GSTATUS_BITS); -+ break; -+ case CM_GSTATUS_STARTING: -+ case CM_GSTATUS_RUNNING: -+ IASSERT (! cmRail->Levels[clvl].Online || lstat & CM_GSTATUS_RESTART); -+ break; -+ case CM_GSTATUS_ABSENT: -+ IASSERT (lstat & CM_GSTATUS_RESTART); -+ } -+ -+ cmRail->Levels[clvl].Connected--; -+ -+ spin_unlock_irqrestore (&cmRail->Lock, flags); -+} -+ -+void -+cm_restart_node (EP_RAIL *rail, unsigned nodeId) -+{ -+ CM_RAIL *cmRail = rail->ClusterRail; -+ int base, lstat, lgstat; -+ int clvl, subClMin, subClMax; -+ int thisClId, myClId; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&cmRail->Lock, flags); -+ if (nodeId == rail->Position.pos_nodeid) -+ { -+ for (clvl = 0; clvl < cmRail->NumLevels; clvl++) -+ RestartComms (cmRail, clvl); -+ } -+ else -+ { -+ for (clvl = 0; clvl < cmRail->NumLevels; clvl++) -+ if (nodeId >= cmRail->Levels[clvl].MinNodeId && nodeId < (cmRail->Levels[clvl].MinNodeId + cmRail->Levels[clvl].NumNodes)) -+ break; -+ -+ myClId = ClusterIds (cmRail, clvl, &subClMin, &subClMax); -+ thisClId = nodeId - cmRail->Levels[clvl].MinNodeId; -+ base = thisClId * CM_GSTATUS_BITS; -+ lstat = statemap_getbits (cmRail->Levels[clvl].LocalMap, base, CM_GSTATUS_BITS); -+ lgstat = statemap_getbits (cmRail->Levels[clvl].LastGlobalMap, base, CM_GSTATUS_BITS) & CM_GSTATUS_STATUS_MASK; -+ -+ CPRINTF6 (2, "%s: cm_restart_node: Node %d: clvl %d, lgstat %s, gstat %s, lstat %s\n", -+ cmRail->Rail->Name, nodeId, clvl, -+ GlobalStatusString (cmRail->Levels[clvl].LastGlobalMap, thisClId), -+ GlobalStatusString (cmRail->Levels[clvl].GlobalMap, thisClId), -+ GlobalStatusString (cmRail->Levels[clvl].LocalMap, thisClId)); -+ -+ if (lgstat != CM_GSTATUS_CLOSING) -+ statemap_setbits (cmRail->Levels[clvl].LocalMap, base, lstat | CM_GSTATUS_RESTART, CM_GSTATUS_BITS); -+ } -+ spin_unlock_irqrestore (&cmRail->Lock, flags); -+} -+ -+void -+cm_force_offline (EP_RAIL *rail, int offline, unsigned int reason) -+{ -+ CM_RAIL *cmRail = rail->ClusterRail; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&cmRail->Lock, flags); -+ if (offline) -+ cmRail->OfflineReasons |= reason; -+ else -+ cmRail->OfflineReasons &= ~reason; -+ spin_unlock_irqrestore (&cmRail->Lock, flags); -+} -+ -+static void -+cm_remove_rail (EP_SUBSYS *subsys, EP_SYS *epsys, EP_RAIL *rail) -+{ -+ CM_SUBSYS *sys = (CM_SUBSYS *) subsys; -+ CM_RAIL *cmRail = sys->Rails[rail->Number]; -+ int i, lvl, clvl; -+ -+ cm_procfs_rail_fini (cmRail); -+ -+ sys->Rails[rail->Number] = NULL; -+ rail->ClusterRail = NULL; -+ -+ del_timer_sync (&cmRail->HeartbeatTimer); -+ -+ cmRail->NextRunTime = 0; -+ cmRail->NextDiscoverTime = 0; -+ cmRail->NextHeartbeatTime = 0; -+ -+ for (clvl = 0; clvl < cmRail->NumLevels; clvl++) -+ { -+ for (lvl = 0; lvl <= clvl; lvl++) -+ { -+ CM_LEVEL *level = &cmRail->Levels[lvl]; -+ -+ statemap_destroy (level->SubordinateMap[clvl]); -+ -+ for (i = 0; i < level->NumSegs; i++) -+ { -+ statemap_destroy (level->Sgmts[i].Maps[clvl].CurrentInputMap); -+ statemap_destroy (level->Sgmts[i].Maps[clvl].InputMap); -+ statemap_destroy (level->Sgmts[i].Maps[clvl].OutputMap); -+ } -+ } -+ -+ cmRail->Levels[clvl].Online = 0; -+ -+ statemap_destroy (cmRail->Levels[clvl].TmpMap); -+ statemap_destroy (cmRail->Levels[clvl].GlobalMap); -+ statemap_destroy (cmRail->Levels[clvl].LastGlobalMap); -+ statemap_destroy (cmRail->Levels[clvl].SubTreeMap); -+ statemap_destroy (cmRail->Levels[clvl].LocalMap); -+ } -+ -+ spin_lock_destroy (&cmRail->Lock); -+ -+ ep_free_inputq (cmRail->Rail, cmRail->PolledQueue); -+ ep_free_inputq (cmRail->Rail, cmRail->IntrQueue); -+ ep_free_outputq (cmRail->Rail, cmRail->MsgQueue); -+ -+ KMEM_FREE (cmRail, sizeof (CM_RAIL)); -+} -+ -+static int -+cm_add_rail (EP_SUBSYS *subsys, EP_SYS *epsys, EP_RAIL *rail) -+{ -+ CM_SUBSYS *sys = (CM_SUBSYS *) subsys; -+ ELAN_POSITION *pos = &rail->Position; -+ CM_RAIL *cmRail; -+ int lvl, n, nn, clvl, span, i; -+ unsigned long flags; -+ -+ KMEM_ZALLOC (cmRail, CM_RAIL *, sizeof (CM_RAIL), 1); -+ -+ if (cmRail == NULL) -+ return (ENOMEM); -+ -+ cmRail->Rail = rail; -+ cmRail->NodeId = pos->pos_nodeid; -+ cmRail->NumNodes = pos->pos_nodes; -+ -+ spin_lock_init (&cmRail->Lock); -+ -+ if ((cmRail->IntrQueue = ep_alloc_inputq (rail, EP_SYSTEMQ_INTR, sizeof (CM_MSG), CM_INPUTQ_ENTRIES, IntrQueueCallback, cmRail)) == NULL || -+ (cmRail->PolledQueue = ep_alloc_inputq (rail, EP_SYSTEMQ_POLLED, sizeof (CM_MSG), CM_INPUTQ_ENTRIES, NULL, 0)) == NULL || -+ (cmRail->MsgQueue = ep_alloc_outputq (rail, sizeof (CM_MSG), CM_NUM_MSG_BUFFERS)) == NULL) -+ { -+ goto failed; -+ } -+ -+ /* point to first "spare" message buffer */ -+ cmRail->NextSpareMsg = 0; -+ -+ /* Compute the branching ratios from the switcy arity */ -+ for (lvl = 0; lvl < CM_MAX_LEVELS; lvl++) -+ BranchingRatios[lvl] = (lvl < pos->pos_levels) ? pos->pos_arity[pos->pos_levels - lvl - 1] : 4; -+ -+ /* now determine the number of levels of hierachy we have */ -+ /* and how many nodes per level there are */ -+ for (lvl = 0, nn = 1, n = pos->pos_nodes; -+ n > 1; -+ nn *= BranchingRatios[lvl], n = n / BranchingRatios[lvl], lvl++) -+ { -+ int nSegs = (n > BranchingRatios[lvl]) ? BranchingRatios[lvl] : n; -+ int nNodes = nn * nSegs; -+ CM_LEVEL *level = &cmRail->Levels[lvl]; -+ -+ for (clvl = 0, span = pos->pos_arity[pos->pos_levels - clvl - 1]; -+ span < nNodes && clvl < pos->pos_levels - 1; -+ clvl++, span *= pos->pos_arity[pos->pos_levels - clvl - 1]) -+ ; -+ -+ level->SwitchLevel = clvl; -+ level->MinNodeId = (pos->pos_nodeid / nNodes) * nNodes; -+ level->NumNodes = nNodes; -+ level->NumSegs = nSegs; -+ } -+ -+ cmRail->NumLevels = lvl; -+ cmRail->BroadcastLevel = lvl-1; -+ -+ CPRINTF4 (2, "%s: NodeId=%d NumNodes=%d NumLevels=%d\n", -+ rail->Name, pos->pos_nodeid, pos->pos_nodes, cmRail->NumLevels); -+ -+ LoadRouteTable (cmRail); -+ -+ /* Init SGMT constants */ -+ for (lvl = 0; lvl < cmRail->NumLevels; lvl++) -+ { -+ CM_LEVEL *level = &cmRail->Levels[lvl]; -+ -+ level->MySgmt = SegmentNo (cmRail, cmRail->NodeId, lvl); -+ -+ for (i = 0; i < CM_SGMTS_PER_LEVEL; i++) -+ { -+ CM_SGMT *sgmt = &level->Sgmts[i]; -+ -+ sgmt->MsgNumber = lvl * CM_SGMTS_PER_LEVEL + i; -+ sgmt->Level = lvl; -+ sgmt->Sgmt = i; -+ } -+ } -+ -+ /* Init maps for each cluster level */ -+ for (clvl = 0; clvl < cmRail->NumLevels; clvl++) -+ { -+ int nNodes = cmRail->Levels[clvl].NumNodes; -+ int mapBits = (nNodes * CM_GSTATUS_BITS) + (nNodes * EP_SVC_NUM_INDICATORS); -+ int clmin; -+ int clmax; -+ int clid = ClusterIds (cmRail, clvl, &clmin, &clmax); -+ -+ for (lvl = 0; lvl <= clvl; lvl++) -+ { -+ CM_LEVEL *level = &cmRail->Levels[lvl]; -+ -+ level->SubordinateMap[clvl] = statemap_create (mapBits); -+ -+ for (i = 0; i < level->NumSegs; i++) -+ { -+ level->Sgmts[i].Maps[clvl].CurrentInputMap = statemap_create (mapBits); -+ level->Sgmts[i].Maps[clvl].InputMap = statemap_create (mapBits); -+ level->Sgmts[i].Maps[clvl].OutputMap = statemap_create (mapBits); -+ } -+ } -+ -+ cmRail->Levels[clvl].Online = 0; -+ -+ cmRail->Levels[clvl].TmpMap = statemap_create (mapBits); -+ cmRail->Levels[clvl].GlobalMap = statemap_create (mapBits); -+ cmRail->Levels[clvl].LastGlobalMap = statemap_create (mapBits); -+ cmRail->Levels[clvl].SubTreeMap = statemap_create (mapBits); -+ cmRail->Levels[clvl].LocalMap = statemap_create (mapBits); -+ -+ /* Flag everyone outside my next lower cluster as sensed offline... */ -+ for (i = 0; i < clmin; i++) -+ statemap_setbits (cmRail->Levels[clvl].LocalMap, i * CM_GSTATUS_BITS, CM_GSTATUS_MAY_START, CM_GSTATUS_BITS); -+ -+ for (i = clmax + 1; i < nNodes; i++) -+ statemap_setbits (cmRail->Levels[clvl].LocalMap, i * CM_GSTATUS_BITS, CM_GSTATUS_MAY_START, CM_GSTATUS_BITS); -+ -+ /* ...and set my own state */ -+ statemap_setbits (cmRail->Levels[clvl].LocalMap, clid * CM_GSTATUS_BITS, -+ CM_GSTATUS_CLOSING | CM_GSTATUS_MAY_START, CM_GSTATUS_BITS); -+ } -+ -+ /* compute parameter hash to add to messages */ -+ cmRail->ParamHash = EP_PROTOCOL_VERSION; -+ cmRail->ParamHash = cmRail->ParamHash * 127 + CM_PERIODIC_DISCOVER_INTERVAL; -+ cmRail->ParamHash = cmRail->ParamHash * 127 + CM_URGENT_DISCOVER_INTERVAL; -+ cmRail->ParamHash = cmRail->ParamHash * 127 + CM_HEARTBEAT_INTERVAL; -+ cmRail->ParamHash = cmRail->ParamHash * 127 + CM_P2P_DMA_RETRIES; -+ cmRail->ParamHash = cmRail->ParamHash * 127 + CM_P2P_MSG_RETRIES; -+ cmRail->ParamHash = cmRail->ParamHash * 127 + CM_BCAST_MSG_RETRIES; -+ cmRail->ParamHash = cmRail->ParamHash * 127 + CM_TIMER_SCHEDULE_TIMEOUT; -+ cmRail->ParamHash = cmRail->ParamHash * 127 + CM_HEARTBEAT_TIMEOUT; -+ cmRail->ParamHash = cmRail->ParamHash * 127 + CM_DISCOVER_TIMEOUT; -+ cmRail->ParamHash = cmRail->ParamHash * 127 + BT_NBIPUL; -+ cmRail->ParamHash = cmRail->ParamHash * 127 + CM_GSTATUS_BITS; -+ cmRail->ParamHash = cmRail->ParamHash * 127 + EP_SVC_NUM_INDICATORS; -+ cmRail->ParamHash = cmRail->ParamHash * 127 + cmRail->NumLevels; -+ cmRail->ParamHash = cmRail->ParamHash * 127 + cmRail->NumNodes; -+ for (i = 0; i < cmRail->NumLevels; i++) -+ cmRail->ParamHash = cmRail->ParamHash * 127 + BranchingRatios[i]; -+ -+ spin_lock_irqsave (&cmRail->Lock, flags); -+ -+ /* Initialise the timer, but don't add it yet, since -+ * __Schedule_Heartbeat() will do this. */ -+ -+ init_timer (&cmRail->HeartbeatTimer); -+ -+ cmRail->HeartbeatTimer.function = cm_heartbeat_timer; -+ cmRail->HeartbeatTimer.data = (unsigned long) cmRail; -+ cmRail->HeartbeatTimer.expires = lbolt + hz; -+ -+ /* Indicate that heartbeats should be sent -+ * as soon as the timer is run from inside -+ * LowerTopLevel */ -+ cmRail->NextHeartbeatTime = lbolt; -+ -+ /* start discovering who else is out there */ -+ LowerTopLevel (cmRail, 0); -+ -+ /* connect to myself straight away - I know I'm here */ -+ ep_connect_node (rail, cmRail->NodeId); -+ -+ /* add to all rails */ -+ sys->Rails[rail->Number] = cmRail; -+ rail->ClusterRail = (void *) cmRail; -+ -+ spin_unlock_irqrestore (&cmRail->Lock, flags); -+ -+ /* Enable the input queues */ -+ ep_enable_inputq (rail, cmRail->PolledQueue); -+ ep_enable_inputq (rail, cmRail->IntrQueue); -+ -+ /* Create the procfs entries */ -+ cm_procfs_rail_init (cmRail); -+ -+ return 0; -+ -+ failed: -+ cm_remove_rail (subsys, epsys, rail); -+ return -ENOMEM; -+} -+ -+static void -+cm_fini (EP_SUBSYS *subsys, EP_SYS *epsys) -+{ -+ CM_SUBSYS *sys = (CM_SUBSYS *) subsys; -+ -+ cm_procfs_fini(sys); -+ -+ KMEM_FREE (sys, sizeof (CM_SUBSYS)); -+} -+ -+int -+cm_init (EP_SYS *sys) -+{ -+ CM_SUBSYS *subsys; -+ -+ KMEM_ZALLOC (subsys, CM_SUBSYS *, sizeof (CM_SUBSYS), 1); -+ -+ if (subsys == NULL) -+ return (ENOMEM); -+ -+ subsys->Subsys.Sys = sys; -+ subsys->Subsys.Name = "cm"; -+ subsys->Subsys.Destroy = cm_fini; -+ subsys->Subsys.AddRail = cm_add_rail; -+ subsys->Subsys.RemoveRail = cm_remove_rail; -+ -+ ep_subsys_add (sys, &subsys->Subsys); -+ -+ cm_procfs_init (subsys); -+ -+ /* -+ * Initialise the machineid if it wasn't specified by -+ * the modules.conf file - otherwise truncate it to -+ * 16 bits. -+ */ -+ if (MachineId != -1) -+ MachineId = (uint16_t) MachineId; -+ else -+ { -+#if defined(LINUX_ALPHA) -+ MachineId = (uint16_t)((5 << 12) | HZ); -+#elif defined(LINUX_SPARC) -+ MachineId = (uint16_t)((4 << 12) | HZ); -+#elif defined(LINUX_I386) -+ MachineId = (uint16_t)((3 << 12) | HZ); -+#elif defined( LINUX_IA64) -+ MachineId = (uint16_t)((2 << 12) | HZ); -+#elif defined(LINUX_X86_64) -+ MachineId = (uint16_t)((1 << 12) | HZ); -+#else -+ MachineId = (uint16_t)((0 << 12) | HZ); -+#endif -+ } -+ -+ return (0); -+} -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/ep/cm.h linux-2.6.9/drivers/net/qsnet/ep/cm.h ---- clean/drivers/net/qsnet/ep/cm.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/cm.h 2005-03-30 09:06:34.000000000 -0500 -@@ -0,0 +1,396 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __ELAN_CM_H -+#define __ELAN_CM_H -+ -+#ident "@(#)$Id: cm.h,v 1.16 2005/03/30 14:06:34 mike Exp $" -+/* $Source: /cvs/master/quadrics/epmod/cm.h,v $*/ -+ -+#include -+ -+#if defined(DIGITAL_UNIX) -+/* -+ * On Tru64 - SMP doesn't mean Symmetric - cpu 0 is a master cpu and is responsible -+ * for handling all PCI interrupts and "funneled" operations. When a kernel thread -+ * is made runnable, the scheduler will choose which cpu it will run on at that time, -+ * and will only execute a higher priority thread from another cpu's run queue when -+ * it becomes totally idle (apparently also including user processes). Also the -+ * assert_wait_mesg_timo function uses a per-cpu timeout - these can only get executed -+ * at "preemptable" places - so again have no guarantee on when they will execute if -+ * they happen to be queued on a "hogged" cpu. The combination of these mean that the Tru64 -+ * is incapable of scheduling a high priority kernel thread within a deterministic time -+ * of when it should have become runnable - wonderfull. -+ * -+ * Hence the solution Compaq have proposed it to schedule a timeout onto all of the -+ * cpu's timeouts lists at the maximum frequency that we could want to execute code, -+ * then to handle the scheduling of work between these ourselves. With a bit of luck -+ * ..... at least one cpu will be sufficiently unloaded to allow us to get a chance -+ * to do our important work. -+ * -+ * However ..... this still is not reliable, since timeouts under Tru64 are still -+ * only run when the currently running kernel thread "co-operates" by calling one -+ * of a number of functions which is permitted to run the "lwc"s AND is not holding -+ * any spinlocks AND is running ai IPL 0. However Compaq are unable to provide -+ * any upper limit on the time between the "lwc"'s being run and so it is possible -+ * for all 4 cpus to not run them for an unbounded time. -+ * -+ * The solution proposed is to use the RM_TEMP_BACKDOOR hook which was added to -+ * hardclock() to "solve" this problem for Memory Channel. However, since it -+ * is called within the clock interrupt it is not permissible to aquire any -+ * spinlocks, nor to run for "too long". This means that it is not possible to -+ * call the heartbeat algorithm from this hook. -+ * -+ * Our solution to these limitations is to use the hook to cause an elan interrupt -+ * to be delivered, by issueing a mis-aligned SetEvent command - this causes the device -+ * to trap and ep_cprocTrap() can then run the heartbeat code. However there is a lock -+ * order violation between the elan_dev::IntrLock and ep_dev::Lock, so we have to -+ * use a trylock and if we fail, then hope that when the interrupt is delievered again -+ * some time later we will succeed. -+ * -+ * However this only works if the kernel is able to respond to the Elan interrupt, -+ * so we panic inside the RM_TEMP_BACKDOOR hook if the SetEvent's interrupt has -+ * not been taken for more than an CM_TIMER_SCHEDULE_TIMEOUT interval. -+ * -+ * In fact this is exactly the mechanism that other operating systems use to -+ * execute timeouts, since the hardclock interrupt posts a low priority -+ * "soft interrupt" which "pre-eempts" the currently running thread and then -+ * executes the timeouts.To block timeouts you use splsoftclock() the same as -+ * in Tru64. -+ */ -+#define PER_CPU_TIMEOUT TRUE -+#endif -+ -+ -+#define CM_SGMTS_PER_LEVEL 8 /* maximum nodes in each segment */ -+#define CM_MAX_LEVELS 6 /* maximum depth of tree */ -+ -+/* message buffers/dmas/events etc */ -+#define CM_NUM_NODE_MSG_BUFFERS (CM_MAX_LEVELS * CM_SGMTS_PER_LEVEL) /* subordinates and leader */ -+#define CM_NUM_SPARE_MSG_BUFFERS 8 /* spare msg buffers for non-connected nodes */ -+#define CM_NUM_MSG_BUFFERS (CM_NUM_NODE_MSG_BUFFERS + CM_NUM_SPARE_MSG_BUFFERS) -+ -+#define CM_INPUTQ_ENTRIES 128 /* # entries in input queue */ -+ -+#define CM_PERIODIC_DISCOVER_INTERVAL (5000) /* 5s (infrequent resolution of established leader conflicts) */ -+#define CM_URGENT_DISCOVER_INTERVAL (50) /* 0.05s (more frequently than heartbeats 'cause they don't retry) */ -+#define CM_HEARTBEAT_INTERVAL (125) /* 0.125s */ -+#define CM_TIMER_SCHEDULE_TIMEOUT (4000) /* 4s Maximum time before a timer that's secheduled to run gets to run (eg blocked in interrupt handlers etc) */ -+#define CM_THREAD_SCHEDULE_TIMEOUT (30000) /* 30s Maximum time before a thread that's scheduled to run gets to run */ -+#define CM_THREAD_RUNNING_TIMEOUT (30000) /* 30s Don't expect the manager thread to be running longer than this */ -+ -+#ifdef PER_CPU_TIMEOUT -+#define CM_PERCPU_TIMEOUT_INTERVAL (50) /* 0.05s (must be less than all above intervals) */ -+#define CM_PACEMAKER_INTERVAL (500) /* 0.05s */ -+ -+#define CM_HEARTBEAT_OVERDUE (250) /* 0.25s Maximum time a timeout can be overdue before taking extreme action */ -+#endif -+ -+#define CM_P2P_DMA_RETRIES 31 -+ -+/* We expect at least 1 point-to-point message in CM_P2P_MSG_RETRIES -+ * attempts to send one to be successfully received */ -+#define CM_P2P_MSG_RETRIES 8 -+ -+/* We expect at least 1 broadcast message in CM_BCAST_MSG_RETRIES attempts -+ * to send one to be successfully received. */ -+#define CM_BCAST_MSG_RETRIES 40 -+ -+/* Heartbeat timeout allows for a node stalling and still getting its -+ * heartbeat. The 2 is to allow for unsynchronised polling times. */ -+#define CM_HEARTBEAT_TIMEOUT (CM_TIMER_SCHEDULE_TIMEOUT + (2 + CM_P2P_MSG_RETRIES) * CM_HEARTBEAT_INTERVAL) -+ -+/* Discover timeout must be > CM_HEARTBEAT_TIMEOUT to guarantee that people -+ * who don't see discovery are considered dead by their leader. This -+ * ensures that by the time a node "discovers" it is a leader of a segment, -+ * the previous leader of that segment will have been deemed to be dead by -+ * its the parent segment's leader */ -+#define CM_DISCOVER_TIMEOUT (CM_TIMER_SCHEDULE_TIMEOUT + (2 + CM_BCAST_MSG_RETRIES) * CM_URGENT_DISCOVER_INTERVAL) -+ -+#define CM_WAITING_TIMEOUT (CM_DISCOVER_TIMEOUT * 100) -+ -+/* -+ * Convert all timeouts specified in mS into "ticks" -+ */ -+#define MSEC2TICKS(MSEC) (((MSEC)*HZ)/1000) -+ -+ -+/* statemap entry */ -+typedef struct cm_state_entry -+{ -+ int16_t level; /* cluster level to apply to */ -+ int16_t offset; /* from statemap_findchange() */ -+ uint16_t seg[BT_NBIPUL/16]; /* ditto */ -+} CM_STATEMAP_ENTRY; -+ -+/* offset is >= 0 for a change to apply and */ -+#define STATEMAP_NOMORECHANGES (-1) /* end of a set of updates */ -+#define STATEMAP_RESET (-2) /* reset the target map */ -+#define STATEMAP_NOOP (-3) /* null token */ -+ -+/* CM message format */ -+typedef int8_t CM_SEQ; /* heartbeat sequence numbers; at least 2 bits, signed */ -+ -+/* -+ * The message header is received into the last 64 byte block of -+ * the input queue and the Version *MUST* be the last word of the -+ * block to ensure that we can see that the whole of the message -+ * has reached main memory after we've seen the input queue pointer -+ * have been updated. -+ */ -+typedef struct ep_cm_hdr -+{ -+ uint32_t Pad0; -+ uint32_t Pad1; -+ -+ uint8_t Type; -+ uint8_t Level; -+ CM_SEQ Seq; /* precision at least 2 bits each*/ -+ CM_SEQ AckSeq; -+ -+ uint16_t NumMaps; -+ uint16_t MachineId; -+ -+ uint16_t NodeId; -+ uint16_t Checksum; -+ -+ uint32_t Timestamp; -+ uint32_t ParamHash; -+ uint32_t Version; -+} CM_HDR; -+ -+#define CM_HDR_SIZE sizeof (CM_HDR) -+ -+typedef struct cm_msg -+{ -+ union { -+ CM_STATEMAP_ENTRY Statemaps[1]; /* piggy-backed statemap updates start here */ -+ uint8_t Space[EP_SYSTEMQ_MSG_MAX - CM_HDR_SIZE]; -+ } Payload; -+ -+ CM_HDR Hdr; -+} CM_MSG; -+ -+/* The maximum number of statemap entries that can fit within an EP_CM_MSG_BUFFER */ -+#define CM_MSG_MAXMAPS (offsetof (CM_MSG, Hdr) / sizeof (CM_STATEMAP_ENTRY)) -+#define CM_MSG_MAP(mapno) (CM_MSG_MAXMAPS - (mapno) - 1) -+ -+/* The actual special message base & size, including 'nmaps' piggy-backed statemap entries */ -+#define CM_MSG_BASE(nmaps) (nmaps == 0 ? offsetof (CM_MSG, Hdr) : offsetof (CM_MSG, Payload.Statemaps[CM_MSG_MAXMAPS - nmaps])) -+#define CM_MSG_SIZE(nmaps) (sizeof (CM_MSG) - CM_MSG_BASE(nmaps)) -+ -+#define CM_MSG_VERSION 0xcad00005 -+#define CM_MSG_TYPE_RESOLVE_LEADER 0 -+#define CM_MSG_TYPE_DISCOVER_LEADER 1 -+#define CM_MSG_TYPE_NOTIFY 2 -+#define CM_MSG_TYPE_DISCOVER_SUBORDINATE 3 -+#define CM_MSG_TYPE_IMCOMING 4 -+#define CM_MSG_TYPE_HEARTBEAT 5 -+#define CM_MSG_TYPE_REJOIN 6 -+ -+/* CM machine segment */ -+typedef struct cm_sgmtMaps -+{ -+ u_char InputMapValid; /* Input map has been set */ -+ u_char OutputMapValid; /* Output map has been set */ -+ u_char SentChanges; /* got an outstanding STATEMAP_NOMORECHANGES to send */ -+ statemap_t *OutputMap; /* state to send */ -+ statemap_t *InputMap; /* state received */ -+ statemap_t *CurrentInputMap; /* state being received */ -+} CM_SGMTMAPS; -+ -+typedef struct cm_sgmt -+{ -+ u_char State; -+ u_char SendMaps; -+ u_char MsgAcked; -+ CM_SEQ MsgSeq; -+ CM_SEQ AckSeq; -+ u_int NodeId; -+ long UpdateTick; -+ long WaitingTick; -+ uint32_t Timestamp; -+ CM_SGMTMAPS Maps[CM_MAX_LEVELS]; /* Maps[i] == state for cluster level i */ -+ u_short MsgNumber; /* msg buffer to use */ -+ u_short NumMaps; /* # maps in message buffer */ -+ u_short Level; -+ u_short Sgmt; -+} CM_SGMT; -+ -+#define CM_SGMT_ABSENT 0 /* no one there at all */ -+#define CM_SGMT_WAITING 1 /* waiting for subtree to connect */ -+#define CM_SGMT_COMING 2 /* expecting a subtree to reconnect */ -+#define CM_SGMT_PRESENT 3 /* connected */ -+ -+typedef struct cm_level -+{ -+ int SwitchLevel; -+ u_int MinNodeId; -+ u_int NumNodes; -+ u_int NumSegs; -+ u_int MySgmt; -+ -+ /* SubordinateMap[i] == OR of all subordinate maps on this level and down for cluster level i */ -+ u_char SubordinateMapValid[CM_MAX_LEVELS]; -+ statemap_t *SubordinateMap[CM_MAX_LEVELS]; -+ -+ /* maps/flags for this cluster level */ -+ u_int Online:1; /* I've gone online (seen myself running) */ -+ u_int Restarting:1; /* driving my owm restart bit */ -+ u_char OfflineReasons; /* forced offline by broadcast */ -+ -+ u_char GlobalMapValid; -+ u_char SubTreeMapValid; -+ u_long Connected; -+ -+ statemap_t *LocalMap; /* state bits I drive */ -+ statemap_t *SubTreeMap; /* OR of my and my subtree states */ -+ statemap_t *GlobalMap; /* OR of all node states */ -+ statemap_t *LastGlobalMap; /* last map I saw */ -+ statemap_t *TmpMap; /* scratchpad */ -+ -+ CM_SGMT Sgmts[CM_SGMTS_PER_LEVEL]; -+} CM_LEVEL; -+ -+#define CM_ROLE_LEADER_CANDIDATE 0 -+#define CM_ROLE_LEADER 1 -+#define CM_ROLE_SUBORDINATE 2 -+ -+/* global status bits */ -+#define CM_GSTATUS_STATUS_MASK 0x03 /* bits nodes drive to broadcast their status */ -+#define CM_GSTATUS_ABSENT 0x00 /* Off the network */ -+#define CM_GSTATUS_STARTING 0x01 /* I'm waiting for everyone to see me online */ -+#define CM_GSTATUS_RUNNING 0x03 /* up and running */ -+#define CM_GSTATUS_CLOSING 0x02 /* I'm waiting for everyone to see me offline */ -+ -+#define CM_GSTATUS_ACK_MASK 0x0c /* bits node drive to ack other status */ -+#define CM_GSTATUS_MAY_START 0x04 /* Everyone thinks I may not start */ -+#define CM_GSTATUS_MAY_RUN 0x08 /* Everyone thinks I may not run */ -+ -+#define CM_GSTATUS_RESTART 0x10 /* Someone thinks I should restart */ -+#define CM_GSTATUS_BITS 5 -+ -+#define CM_GSTATUS_BASE(node) ((node) * CM_GSTATUS_BITS) -+ -+#if defined(PER_CPU_TIMEOUT) -+typedef struct cm_timeout_data -+{ -+ long ScheduledAt; /* lbolt timeout was scheduled to run at */ -+ -+ unsigned long EarlyCount; /* # times run early than NextRun */ -+ unsigned long MissedCount; /* # times run on time - but someone else was running it */ -+ unsigned long WastedCount; /* # times we failed to get the spinlock */ -+ unsigned long WorkCount; /* # times we're the one running */ -+ -+ unsigned long WorstDelay; /* worst scheduling delay */ -+ unsigned long BestDelay; /* best scheduling delay */ -+ -+ unsigned long WorstLockDelay; /* worst delay before getting rail->Lock */ -+ -+ unsigned long WorstHearbeatDelay; /* worst delay before calling DoHeartbeatWork */ -+} CM_TIMEOUT_DATA; -+#endif -+ -+typedef struct cm_rail -+{ -+ EP_RAIL *Rail; /* rail we're associated with */ -+ struct list_head Link; /* and linked on the CM_SUBSYS */ -+ -+ uint32_t ParamHash; /* hash of critical parameters */ -+ uint32_t Timestamp; -+ long DiscoverStartTick; /* when discovery start */ -+ -+ unsigned int NodeId; /* my node id */ -+ unsigned int NumNodes; /* and number of nodes */ -+ unsigned int NumLevels; /* number of levels computed from machine size */ -+ int BroadcastLevel; -+ long BroadcastLevelTick; -+ unsigned int TopLevel; /* level at which I'm not a leader */ -+ unsigned char Role; /* state at TopLevel */ -+ -+ EP_INPUTQ *PolledQueue; /* polled input queue */ -+ EP_INPUTQ *IntrQueue; /* intr input queue */ -+ EP_OUTPUTQ *MsgQueue; /* message */ -+ unsigned int NextSpareMsg; /* next "spare" message buffer to use */ -+ -+ EP_CM_RAIL_STATS Stats; /* statistics */ -+ -+ kmutex_t Mutex; -+ spinlock_t Lock; -+ -+ long NextHeartbeatTime; /* next time to check/send heartbeats */ -+ long NextDiscoverTime; /* next time to progress discovery */ -+ long NextRunTime; /* the earlier of the above two or intr requires inputq poll*/ -+ -+ unsigned int OfflineReasons; /* forced offline by procfs/manager thread stuck */ -+ -+#if defined(PER_CPU_TIMEOUT) -+ spinlock_t HeartbeatTimeoutsLock; /* spinlock to sequentialise per-cpu timeouts */ -+ long HeartbeatTimeoutsStarted; /* bitmap of which timeouts have started */ -+ long HeartbeatTimeoutsStopped; /* bitmap of which timeouts have stopped */ -+ long HeartbeatTimeoutsShouldStop; /* flag to indicate timeouts should stop */ -+ kcondvar_t HeartbeatTimeoutsWait; /* place to sleep waiting for timeouts to stop */ -+ long HeartbeatTimeoutRunning; /* someone is running the timeout - don't try for the lock */ -+ -+ long HeartbeatTimeoutOverdue; /* heartbeat seen as overdue - interrupt requested */ -+ -+ CM_TIMEOUT_DATA *HeartbeatTimeoutsData; /* per timeout data */ -+#else -+ struct timer_list HeartbeatTimer; /* timer for heartbeat/discovery */ -+#endif -+ -+ CM_LEVEL Levels[CM_MAX_LEVELS]; -+} CM_RAIL; -+ -+/* OfflineReasons (both per-rail and */ -+#define CM_OFFLINE_BROADCAST (1 << 0) -+#define CM_OFFLINE_PROCFS (1 << 1) -+#define CM_OFFLINE_MANAGER (1 << 2) -+ -+typedef struct cm_subsys -+{ -+ EP_SUBSYS Subsys; -+ CM_RAIL *Rails[EP_MAX_RAILS]; -+} CM_SUBSYS; -+ -+extern int MachineId; -+ -+extern void cm_node_disconnected (EP_RAIL *rail, unsigned nodeId); -+extern void cm_restart_node (EP_RAIL *rail, unsigned nodeId); -+extern void cm_restart_comms (CM_RAIL *cmRail); -+extern int cm_init (EP_SYS *sys); -+ -+extern void DisplayRail(EP_RAIL *rail); -+extern void DisplaySegs (EP_RAIL *rail); -+extern void DisplayStatus (EP_RAIL *rail); -+ -+extern void DisplayNodeMaps (DisplayInfo *di, CM_RAIL *cmRail); -+extern void DisplayNodeSgmts (DisplayInfo *di, CM_RAIL *cmRail); -+extern void DisplayRailDo (DisplayInfo *di, EP_RAIL *rail); -+ -+extern int cm_read_cluster(EP_RAIL *rail,char *page); -+extern void cm_force_offline (EP_RAIL *rail, int offline, unsigned int reason); -+ -+extern int cm_svc_indicator_set (EP_RAIL *rail, int svc_indicator); -+extern int cm_svc_indicator_clear (EP_RAIL *rail, int svc_indicator); -+extern int cm_svc_indicator_is_set (EP_RAIL *rail, int svc_indicator, int nodeId); -+extern int cm_svc_indicator_bitmap (EP_RAIL *rail, int svc_indicator, bitmap_t * bitmap, int low, int nnodes); -+ -+/* cm_procfs.c */ -+extern void cm_procfs_init (CM_SUBSYS *subsys); -+extern void cm_procfs_fini (CM_SUBSYS *subsys); -+extern void cm_procfs_rail_init (CM_RAIL *rail); -+extern void cm_procfs_rail_fini (CM_RAIL *rail); -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -+#endif /* __ELAN_CM_H */ -+ -diff -urN clean/drivers/net/qsnet/ep/cm_procfs.c linux-2.6.9/drivers/net/qsnet/ep/cm_procfs.c ---- clean/drivers/net/qsnet/ep/cm_procfs.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/cm_procfs.c 2004-05-14 05:23:13.000000000 -0400 -@@ -0,0 +1,254 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2005 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: cm_procfs.c,v 1.5 2004/05/14 09:23:13 daniel Exp $" -+/* $Source: /cvs/master/quadrics/epmod/cm_procfs.c,v $ */ -+ -+#include -+ -+#include -+ -+#include "kcomm_vp.h" -+#include "debug.h" -+#include "cm.h" -+#include -+ -+#include -+ -+extern char *sprintClPeers (char *str, CM_RAIL *cmRail, int clvl); -+ -+static int -+proc_read_cluster(char *page, char **start, off_t off, -+ int count, int *eof, void *data) -+{ -+ CM_RAIL *cmRail = (CM_RAIL *) data; -+ char *p = page; -+ -+ page[0] = 0; -+ -+ if (cmRail->Rail->State != EP_RAIL_STATE_RUNNING) -+ p += sprintf(p, "\n"); -+ else -+ { -+ CM_LEVEL *cmLevel; -+ unsigned long flags; -+ int i, j; -+ char clNodeStr[32]; /* [%d-%d][%d-%d] */ -+ char seperate_with; -+ -+ struct { int val; char *name; } bitvals[] = { -+ {CM_OFFLINE_BROADCAST, "Broadcast"}, -+ {CM_OFFLINE_PROCFS, "Offline"}, -+ {CM_OFFLINE_MANAGER, "Manager"}}; -+ -+ spin_lock_irqsave (&cmRail->Lock, flags); -+ -+ for (i = 0; i < cmRail->NumLevels; i++) -+ { -+ cmLevel = &cmRail->Levels[i]; -+ -+ p += sprintf(p, "%23s %7s ", sprintClPeers (clNodeStr, cmRail, i), cmLevel->Online?"Online":"Offline"); -+ -+ if ((cmLevel->Online ) | ( cmLevel->Connected > 0)) -+ p += sprintf(p, "Connected=%lu ", cmLevel->Connected); -+ -+ seperate_with = '<'; -+ -+ if ( cmLevel->Restarting ) { -+ p += sprintf(p, "%cRestarting", seperate_with); -+ seperate_with = ','; -+ } -+ -+ if ( ! (cmLevel->GlobalMapValid & cmLevel->SubTreeMapValid )) { -+ p += sprintf(p, "%cMap Not Valid", seperate_with); -+ seperate_with = ','; -+ } -+ -+ if ( cmLevel->OfflineReasons ) { -+ for (j = 0; j < sizeof (bitvals)/sizeof(bitvals[0]); j++) -+ if (cmLevel->OfflineReasons & bitvals[j].val) { -+ p += sprintf(p, "%c%s", seperate_with, bitvals[j].name); -+ seperate_with = ','; -+ } -+ } -+ if ( cmRail->OfflineReasons ) { -+ for (j = 0; j < sizeof (bitvals)/sizeof(bitvals[0]); j++) -+ if (cmRail->OfflineReasons & bitvals[j].val) { -+ p += sprintf(p, "%c%s", seperate_with, bitvals[j].name); -+ seperate_with = ','; -+ } -+ } -+ -+ if ( seperate_with != '<' ) -+ p += sprintf(p,">\n"); -+ else -+ p += sprintf(p,"\n"); -+ } -+ -+ spin_unlock_irqrestore (&cmRail->Lock, flags); -+ } -+ -+ return qsnet_proc_calc_metrics (page, start, off, count, eof, p - page); -+} -+ -+static struct rail_info -+{ -+ char *name; -+ int (*read_func) (char *page, char **start, off_t off, int count, int *eof, void *data); -+ int (*write_func) (struct file *file, const char *buf, unsigned long count, void *data); -+} rail_info[] = { -+ {"cluster", proc_read_cluster, NULL}, -+}; -+ -+struct proc_dir_entry *svc_indicators_root; -+ -+typedef struct svc_indicator_data -+{ -+ int svc_indicator; -+ EP_RAIL *rail; -+} SVC_INDICATOR_DATA; -+ -+static SVC_INDICATOR_DATA svc_indicator_data[EP_SVC_NUM_INDICATORS][EP_MAX_RAILS]; -+static char *svc_indicator_names[EP_SVC_NUM_INDICATORS] = EP_SVC_NAMES; -+ -+static int -+proc_read_svc_indicator_rail_bitmap (char *page, char **start, off_t off, -+ int count, int *eof, void *data) -+{ -+ SVC_INDICATOR_DATA *svc_data = (SVC_INDICATOR_DATA *)data; -+ unsigned int nnodes = ep_numnodes (ep_system()); -+ bitmap_t *bitmap; -+ -+ KMEM_ZALLOC (bitmap, bitmap_t *, (BT_BITOUL(EP_MAX_NODES) * sizeof (bitmap_t)), 1); -+ -+ cm_svc_indicator_bitmap (svc_data->rail, svc_data->svc_indicator, bitmap, 0, nnodes); -+ -+ ep_sprintf_bitmap (page, PAGESIZE, bitmap, 0, 0, nnodes); -+ -+ KMEM_FREE (bitmap, (BT_BITOUL(EP_MAX_NODES) * sizeof (bitmap_t))); -+ -+ strcat (page, "\n"); -+ -+ return (qsnet_proc_calc_metrics (page, start, off, count, eof, strlen(page))); -+} -+ -+static int -+proc_read_svc_indicator_bitmap(char *page, char **start, off_t off, -+ int count, int *eof, void *data) -+{ -+ unsigned int num = (unsigned long) data; -+ EP_SYS *sys = ep_system(); -+ unsigned int nnodes = ep_numnodes (sys); -+ bitmap_t *bitmap; -+ -+ KMEM_ALLOC(bitmap, bitmap_t *, (BT_BITOUL(EP_MAX_NODES) * sizeof (bitmap_t)), 1); -+ -+ ep_svc_indicator_bitmap (sys, num, bitmap, 0, nnodes); -+ -+ ep_sprintf_bitmap (page, PAGESIZE, bitmap, 0, 0, nnodes); -+ -+ KMEM_FREE (bitmap, (BT_BITOUL(EP_MAX_NODES) * sizeof (bitmap_t))); -+ -+ strcat (page, "\n"); -+ -+ return (qsnet_proc_calc_metrics (page, start, off, count, eof, strlen(page))); -+} -+ -+void -+cm_procfs_rail_init (CM_RAIL *cmRail) -+{ -+ EP_RAIL *rail = cmRail->Rail; -+ struct proc_dir_entry *p; -+ int i; -+ -+ for (i = 0; i < sizeof (rail_info)/sizeof (rail_info[0]); i++) -+ { -+ if ((p = create_proc_entry (rail_info[i].name, 0, cmRail->Rail->ProcDir)) != NULL) -+ { -+ p->read_proc = rail_info[i].read_func; -+ p->write_proc = rail_info[i].write_func; -+ p->data = cmRail; -+ p->owner = THIS_MODULE; -+ } -+ } -+ -+ if ((rail->SvcIndicatorDir = proc_mkdir ("svc_indicators", cmRail->Rail->ProcDir)) != NULL) -+ { -+ for (i = 0; i < EP_SVC_NUM_INDICATORS; i++) -+ { -+ if ((p = create_proc_entry (svc_indicator_names[i], 0, rail->SvcIndicatorDir)) != NULL) -+ { -+ svc_indicator_data[i][rail->Number].svc_indicator = i; -+ svc_indicator_data[i][rail->Number].rail = rail; -+ -+ p->write_proc = NULL; -+ p->read_proc = proc_read_svc_indicator_rail_bitmap; -+ p->data = (void *)&svc_indicator_data[i][rail->Number]; -+ p->owner = THIS_MODULE; -+ } -+ } -+ } -+} -+ -+void -+cm_procfs_rail_fini (CM_RAIL *cmRail) -+{ -+ EP_RAIL *rail = cmRail->Rail; -+ int i; -+ -+ if (rail->SvcIndicatorDir) -+ { -+ for (i = 0; i < EP_SVC_NUM_INDICATORS; i++) -+ remove_proc_entry (svc_indicator_names[i], rail->SvcIndicatorDir); -+ -+ remove_proc_entry ("svc_indicators", cmRail->Rail->ProcDir); -+ } -+ -+ for (i = 0; i < sizeof (rail_info)/sizeof (rail_info[0]); i++) -+ remove_proc_entry (rail_info[i].name, cmRail->Rail->ProcDir); -+} -+ -+void -+cm_procfs_init (CM_SUBSYS *subsys) -+{ -+ struct proc_dir_entry *p; -+ int i; -+ -+ qsnet_proc_register_hex (ep_config_root, "machine_id", &MachineId, 0); -+ -+ if ((svc_indicators_root = proc_mkdir("svc_indicators", ep_procfs_root)) != NULL) -+ { -+ for (i = 0; i < EP_SVC_NUM_INDICATORS; i++) -+ { -+ if ((p = create_proc_entry (svc_indicator_names[i], 0, svc_indicators_root)) != NULL) -+ { -+ p->write_proc = NULL; -+ p->read_proc = proc_read_svc_indicator_bitmap; -+ p->data = (void *)(long) i; -+ p->owner = THIS_MODULE; -+ } -+ } -+ -+ } -+} -+ -+void -+cm_procfs_fini (CM_SUBSYS *subsys) -+{ -+ int i; -+ -+ if (svc_indicators_root) -+ { -+ for (i = 0; i < EP_SVC_NUM_INDICATORS; i++) -+ remove_proc_entry (svc_indicator_names[i], svc_indicators_root); -+ -+ remove_proc_entry ("svc_indicators", ep_procfs_root); -+ } -+ -+ remove_proc_entry ("machine_id", ep_config_root); -+} -diff -urN clean/drivers/net/qsnet/ep/commands_elan4.c linux-2.6.9/drivers/net/qsnet/ep/commands_elan4.c ---- clean/drivers/net/qsnet/ep/commands_elan4.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/commands_elan4.c 2005-07-20 08:01:33.000000000 -0400 -@@ -0,0 +1,173 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: commands_elan4.c,v 1.2.10.1 2005/07/20 12:01:33 mike Exp $ $Name: QSNETMODULES-5-11-3_20050907 $" -+/* $Source: /cvs/master/quadrics/epmod/commands_elan4.c,v $*/ -+ -+#include -+ -+#include -+ -+#include "kcomm_vp.h" -+#include "kcomm_elan4.h" -+#include "debug.h" -+ -+#include -+ -+static __inline__ void -+elan4_command_write (ELAN4_CQ *cq, E4_uint64 val, unsigned off) -+{ -+ writeq (val, (void *)(cq->cq_mapping + offsetof (E4_CommandPort, Command[off]))); -+} -+ -+void -+elan4_nop_cmd (ELAN4_CQ *cq, E4_uint64 tag) -+{ -+ elan4_command_write (cq, tag | NOP_CMD, 0); -+} -+ -+void -+elan4_write_dword_cmd (ELAN4_CQ *cq, E4_Addr addr, E4_uint64 data) -+{ -+ elan4_command_write (cq, addr | WRITE_DWORD_CMD, 0); -+ elan4_command_write (cq, data, 1); -+} -+ -+void -+elan4_add_dword_cmd (ELAN4_CQ *cq, E4_Addr addr, E4_uint64 data) -+{ -+ elan4_command_write (cq, addr | ADD_DWORD_CMD, 0); -+ elan4_command_write (cq, data, 1); -+} -+ -+void -+elan4_copy64_cmd (ELAN4_CQ *cq, E4_Addr from, E4_Addr to, E4_uint32 datatype) -+{ -+ elan4_command_write (cq, from | (datatype << COPY64_DATA_TYPE_SHIFT) | COPY64_CMD, 0); -+ elan4_command_write (cq, to | (datatype << COPY64_DATA_TYPE_SHIFT), 1); -+} -+ -+void -+elan4_interrupt_cmd (ELAN4_CQ *cq, E4_uint64 cookie) -+{ -+ elan4_command_write (cq, (cookie << E4_MAIN_INT_SHIFT) | INTERRUPT_CMD, 0); -+} -+ -+ -+void -+elan4_run_thread_cmd (ELAN4_CQ *cq, E4_ThreadRegs *regs) -+{ -+ elan4_command_write (cq, regs->Registers[0] | RUN_THREAD_CMD, 0); -+ elan4_command_write (cq, regs->Registers[1], 1); -+ elan4_command_write (cq, regs->Registers[2], 2); -+ elan4_command_write (cq, regs->Registers[3], 3); -+ elan4_command_write (cq, regs->Registers[4], 4); -+ elan4_command_write (cq, regs->Registers[5], 5); -+ elan4_command_write (cq, regs->Registers[6], 6); -+} -+ -+void -+elan4_run_dma_cmd (ELAN4_CQ *cq, E4_DMA *dma) -+{ -+ E4_uint64 *dmaptr = (E4_uint64 *) dma; -+ -+ elan4_command_write (cq, dmaptr[0] | RUN_DMA_CMD, 0); -+ elan4_command_write (cq, dmaptr[1], 1); -+ elan4_command_write (cq, dmaptr[2], 2); -+ elan4_command_write (cq, dmaptr[3], 3); -+ elan4_command_write (cq, dmaptr[4], 4); -+ elan4_command_write (cq, dmaptr[5], 5); -+ elan4_command_write (cq, dmaptr[6], 6); -+} -+ -+void -+elan4_set_event_cmd (ELAN4_CQ *cq, E4_Addr event) -+{ -+ elan4_command_write (cq, event | SET_EVENT_CMD, 0); -+} -+ -+void -+elan4_set_eventn_cmd (ELAN4_CQ *cq, E4_Addr event, E4_uint32 count) -+{ -+ elan4_command_write (cq, SET_EVENTN_CMD,0); -+ elan4_command_write (cq, event | count, 1); -+} -+ -+void -+elan4_wait_event_cmd (ELAN4_CQ *cq, E4_Addr event, E4_uint64 candt, E4_uint64 param0, E4_uint64 param1) -+{ -+ elan4_command_write (cq, event | WAIT_EVENT_CMD, 0); -+ elan4_command_write (cq, candt, 1); -+ elan4_command_write (cq, param0, 2); -+ elan4_command_write (cq, param1, 3); -+} -+ -+void -+elan4_open_packet (ELAN4_CQ *cq, E4_uint64 command) -+{ -+ elan4_command_write (cq, command | OPEN_STEN_PKT_CMD, 0); -+} -+ -+void -+elan4_guard (ELAN4_CQ *cq, E4_uint64 command) -+{ -+ elan4_command_write (cq, command | GUARD_CMD, 0); -+} -+ -+void -+elan4_sendtrans0 (ELAN4_CQ *cq, E4_uint16 trtype, E4_uint64 addr) -+{ -+ elan4_command_write (cq, (trtype << 16) | SEND_TRANS_CMD, 0); -+ elan4_command_write (cq, addr, 1); -+} -+ -+void -+elan4_sendtrans1 (ELAN4_CQ *cq, E4_uint16 trtype, E4_uint64 addr, E4_uint64 p0) -+{ -+ elan4_command_write (cq, (trtype << 16) | SEND_TRANS_CMD, 0); -+ elan4_command_write (cq, addr, 1); -+ elan4_command_write (cq, p0, 2); -+} -+ -+void -+elan4_sendtrans2 (ELAN4_CQ *cq, E4_uint16 trtype, E4_uint64 addr, E4_uint64 p0, E4_uint64 p1) -+{ -+ elan4_command_write (cq, (trtype << 16) | SEND_TRANS_CMD, 0); -+ elan4_command_write (cq, addr, 1); -+ elan4_command_write (cq, p0, 2); -+ elan4_command_write (cq, p1, 3); -+} -+ -+void -+elan4_sendtransn (ELAN4_CQ *cq, E4_uint16 trtype, E4_uint64 addr, ...) -+{ -+ E4_uint32 ndword = ((trtype & TR_SIZE_MASK) >> TR_SIZE_SHIFT); -+ va_list ap; -+ register int i; -+ -+ elan4_command_write (cq, (trtype << 16) | SEND_TRANS_CMD, 0); -+ elan4_command_write (cq, addr, 1); -+ -+ va_start (ap, addr); -+ for (i = 2; i < ndword+2; i++) -+ elan4_command_write (cq, va_arg (ap, E4_uint64), i); -+ va_end (ap); -+} -+ -+void -+elan4_sendtransp (ELAN4_CQ *cq, E4_uint16 trtype, E4_uint64 addr, E4_uint64 *ptr) -+{ -+ E4_uint32 ndword = ((trtype &TR_SIZE_MASK) >> TR_SIZE_SHIFT); -+ register int i; -+ -+ elan4_command_write (cq, (trtype << 16) | SEND_TRANS_CMD, 0); -+ elan4_command_write (cq, addr, 1); -+ for (i = 2; i < ndword+2; i++) -+ elan4_command_write (cq, *ptr++, i); -+} -+ -diff -urN clean/drivers/net/qsnet/ep/conf_linux.c linux-2.6.9/drivers/net/qsnet/ep/conf_linux.c ---- clean/drivers/net/qsnet/ep/conf_linux.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/conf_linux.c 2005-09-07 10:35:03.000000000 -0400 -@@ -0,0 +1,311 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: conf_linux.c,v 1.40.2.3 2005/09/07 14:35:03 mike Exp $" -+/* $Source: /cvs/master/quadrics/epmod/conf_linux.c,v $ */ -+ -+#include -+#include -+ -+#include -+#include -+#include -+ -+#include "cm.h" -+ -+#include "conf_linux.h" -+ -+#include -+#include -+#include -+#include -+ -+/* Module parameters */ -+unsigned int epdebug = 0; -+unsigned int epdebug_console = 0; -+unsigned int epdebug_cmlevel = 0; -+#if ! defined(CONFIG_EP_NO_CHECK_SUM) -+unsigned int epdebug_check_sum = 0; -+#endif -+int disabled = 0; -+int sdram_assert = 0; -+int assfail_mode = 0; -+int txd_stabilise = 7; -+int portals_envelopes = 0; -+ -+/* External module parameters */ -+extern int MaxSwitchLevels; -+extern int RejoinCheck; -+extern int RejoinPanic; -+extern int PositionCheck; -+extern int MachineId; -+ -+/* Module globals */ -+EP_SYS epsys; -+ -+#ifdef MODULE -+MODULE_AUTHOR("Quadrics Ltd"); -+MODULE_DESCRIPTION("Elan Kernel Comms"); -+ -+MODULE_LICENSE("GPL"); -+ -+module_param(epdebug, uint, 0); -+module_param(epdebug_console, uint, 0); -+module_param(epdebug_cmlevel, uint, 0); -+#if ! defined(CONFIG_EP_NO_CHECK_SUM) -+module_param(epdebug_check_sum, uint, 0); -+#endif -+module_param(disabled, uint, 0); -+ -+module_param(MachineId, uint, 0); -+module_param(RejoinPanic, uint, 0); -+module_param(RejoinCheck, uint, 0); -+module_param(PositionCheck, uint, 0); -+module_param(MaxSwitchLevels, uint, 0); -+ -+module_param(sdram_assert, uint, 0); -+module_param(assfail_mode, uint, 0); -+module_param(txd_stabilise, uint, 0); -+module_param(portals_envelopes,uint, 0); -+ -+/* epcomms.c large message service functions */ -+EXPORT_SYMBOL(ep_alloc_xmtr); -+EXPORT_SYMBOL(ep_free_xmtr); -+EXPORT_SYMBOL(ep_transmit_message); -+EXPORT_SYMBOL(ep_multicast_message); -+EXPORT_SYMBOL(ep_transmit_rpc); -+ -+EXPORT_SYMBOL(ep_alloc_rcvr); -+EXPORT_SYMBOL(ep_free_rcvr); -+EXPORT_SYMBOL(ep_queue_receive); -+EXPORT_SYMBOL(ep_requeue_receive); -+EXPORT_SYMBOL(ep_rpc_put); -+EXPORT_SYMBOL(ep_rpc_get); -+EXPORT_SYMBOL(ep_complete_rpc); -+EXPORT_SYMBOL(ep_complete_receive); -+ -+EXPORT_SYMBOL(ep_poll_transmits); -+EXPORT_SYMBOL(ep_enable_txcallbacks); -+EXPORT_SYMBOL(ep_disable_txcallbacks); -+ -+/* epcomms.c functions for accessing fields of rxds/txds */ -+EXPORT_SYMBOL(ep_rxd_arg); -+EXPORT_SYMBOL(ep_rxd_len); -+EXPORT_SYMBOL(ep_rxd_isrpc); -+EXPORT_SYMBOL(ep_rxd_envelope); -+EXPORT_SYMBOL(ep_rxd_payload); -+EXPORT_SYMBOL(ep_rxd_node); -+EXPORT_SYMBOL(ep_rxd_status); -+EXPORT_SYMBOL(ep_rxd_statusblk); -+EXPORT_SYMBOL(ep_txd_node); -+EXPORT_SYMBOL(ep_txd_statusblk); -+ -+/* kmap.c, nmh.c - handling mapping of pages into network memory */ -+EXPORT_SYMBOL(ep_dvma_reserve); -+EXPORT_SYMBOL(ep_dvma_release); -+EXPORT_SYMBOL(ep_dvma_load); -+EXPORT_SYMBOL(ep_dvma_unload); -+EXPORT_SYMBOL(ep_nmd_subset); -+EXPORT_SYMBOL(ep_nmd_merge); -+ -+EXPORT_SYMBOL(ep_system); -+ -+/* kcomm.c */ -+EXPORT_SYMBOL(ep_nodeid); -+EXPORT_SYMBOL(ep_numnodes); -+EXPORT_SYMBOL(ep_waitfor_nodeid); -+ -+/* railhints.c */ -+EXPORT_SYMBOL(ep_pickRail); -+EXPORT_SYMBOL(ep_xmtr_bcastrail); -+EXPORT_SYMBOL(ep_xmtr_prefrail); -+EXPORT_SYMBOL(ep_xmtr_availrails); -+EXPORT_SYMBOL(ep_xmtr_noderails); -+EXPORT_SYMBOL(ep_rcvr_prefrail); -+EXPORT_SYMBOL(ep_rcvr_availrails); -+EXPORT_SYMBOL(ep_rxd_railmask); -+ -+EXPORT_SYMBOL(ep_svc_indicator_bitmap); -+EXPORT_SYMBOL(ep_svc_indicator_is_set); -+EXPORT_SYMBOL(ep_svc_indicator_clear); -+EXPORT_SYMBOL(ep_svc_indicator_set); -+ -+/* cm.c */ -+EXPORT_SYMBOL(cm_svc_indicator_clear); -+EXPORT_SYMBOL(cm_svc_indicator_set); -+EXPORT_SYMBOL(cm_svc_indicator_is_set); -+EXPORT_SYMBOL(cm_svc_indicator_bitmap); -+ -+#endif -+ -+EP_SYS * -+ep_system() -+{ -+ return (&epsys); -+} -+ -+void -+ep_mod_inc_usecount() -+{ -+ MOD_INC_USE_COUNT; -+} -+ -+void -+ep_mod_dec_usecount() -+{ -+ MOD_DEC_USE_COUNT; -+} -+ -+#if defined(CONFIG_DUMP) || defined(CONFIG_DUMP_MODULE) -+ -+#include -+ -+static int -+ep_dump_event (struct notifier_block *self, unsigned long event, void *buffer) -+{ -+ if (event == DUMP_BEGIN) -+ ep_shutdown (&epsys); -+ -+ return (NOTIFY_DONE); -+} -+static struct notifier_block ep_dump_notifier = -+{ -+ notifier_call: ep_dump_event, -+ priority: 0, -+}; -+ -+#endif -+ -+static int -+ep_reboot_event (struct notifier_block *self, unsigned long event, void *buffer) -+{ -+ if ((event == SYS_RESTART || event == SYS_HALT || event == SYS_POWER_OFF)) -+ ep_shutdown (&epsys); -+ -+ return (NOTIFY_DONE); -+} -+ -+static struct notifier_block ep_reboot_notifier = -+{ -+ notifier_call: ep_reboot_event, -+ priority: 0, -+}; -+ -+#if !defined(NO_PANIC_NOTIFIER) -+static int -+ep_panic_event (struct notifier_block *self, unsigned long event, void *buffer) -+{ -+ ep_shutdown (&epsys); -+ -+ return (NOTIFY_DONE); -+} -+ -+static struct notifier_block ep_panic_notifier = -+{ -+ notifier_call: ep_panic_event, -+ priority: 0, -+}; -+#endif -+ -+/* -+ * Module configuration. -+ */ -+#ifdef MODULE -+static int __init ep_init(void) -+#else -+__initfunc(int ep_init(void)) -+#endif -+{ -+ register int rmask = 0; -+ -+ ep_procfs_init (); -+ -+ ep_sys_init (&epsys); -+ -+#if defined(CONFIG_ELAN4) || defined(CONFIG_ELAN4_MODULE) -+ rmask = ep4_create_rails (&epsys, disabled); -+#endif -+ -+ /* If we've brought up an elan4 rail, then disable all elan3 rails. */ -+ if ((rmask & ~disabled) != 0) -+ disabled = ~rmask; -+ -+#if defined(CONFIG_ELAN3) || defined(CONFIG_ELAN3_MODULE) -+ rmask = ep3_create_rails (&epsys, disabled); -+#endif -+ -+#if defined(CONFIG_DUMP) || defined(CONFIG_DUMP_MODULE) -+ register_dump_notifier (&ep_dump_notifier); -+#endif -+ register_reboot_notifier (&ep_reboot_notifier); -+ -+#if !defined(NO_PANIC_NOTIFIER) -+ notifier_chain_register (&panic_notifier_list, &ep_panic_notifier); -+#endif -+ -+ return (0); -+} -+ -+/* -+ * Module removal. -+ */ -+#ifdef MODULE -+static void -+__exit ep_exit(void) -+{ -+ register int i; -+ -+#if defined(CONFIG_DUMP) || defined(CONFIG_DUMP_MODULE) -+ unregister_dump_notifier (&ep_dump_notifier); -+#endif -+ unregister_reboot_notifier (&ep_reboot_notifier); -+ -+#if !defined(NO_PANIC_NOTIFIER) -+ notifier_chain_unregister (&panic_notifier_list, &ep_panic_notifier); -+#endif -+ -+ for (i = 0; i < EP_MAX_RAILS; i++) -+ { -+ if (epsys.Rails[i]) -+ { -+ switch (epsys.Rails[i]->State) -+ { -+ case EP_RAIL_STATE_UNINITIALISED: -+ break; -+ -+ case EP_RAIL_STATE_STARTED: -+ case EP_RAIL_STATE_RUNNING: -+ case EP_RAIL_STATE_INCOMPATIBLE: -+ /* remove per-rail CM proc entries */ -+ ep_stop_rail (epsys.Rails[i]); -+ break; -+ } -+ -+ /* remove EP proc rail entries after per-rail CM entries */ -+ ep_procfs_rail_fini (epsys.Rails[i]); -+ ep_destroy_rail (epsys.Rails[i]); -+ } -+ } -+ -+ ep_sys_fini (&epsys); -+ -+ ep_procfs_fini (); -+} -+ -+/* Declare the module init and exit functions */ -+module_init(ep_init); -+module_exit(ep_exit); -+ -+#endif -+ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/ep/conf_linux.h linux-2.6.9/drivers/net/qsnet/ep/conf_linux.h ---- clean/drivers/net/qsnet/ep/conf_linux.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/conf_linux.h 2003-10-02 10:16:07.000000000 -0400 -@@ -0,0 +1,29 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: conf_linux.h,v 1.6 2003/10/02 14:16:07 mike Exp $" -+/* $Source: /cvs/master/quadrics/epmod/conf_linux.h,v $*/ -+ -+#ifndef __ELAN_CONF_LINUX_H -+#define __ELAN_CONF_LINUX_H -+ -+extern void ep_procfs_init(void); -+extern void ep_procfs_fini(void); -+extern void ep_procfs_rail_init(EP_RAIL *rail); -+extern void ep_procfs_rail_fini(EP_RAIL *rail); -+ -+extern void ep_procfs_svc_indicator_create(int svc_indicator, char *name); -+extern void ep_procfs_svc_indicator_remove(int svc_indicator, char *name); -+ -+#endif /* __ELAN_CONF_LINUX_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/ep/debug.c linux-2.6.9/drivers/net/qsnet/ep/debug.c ---- clean/drivers/net/qsnet/ep/debug.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/debug.c 2004-11-12 05:55:03.000000000 -0500 -@@ -0,0 +1,145 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: debug.c,v 1.30 2004/11/12 10:55:03 mike Exp $" -+/* $Source: /cvs/master/quadrics/epmod/debug.c,v $*/ -+ -+#include -+ -+#include -+ -+#include "debug.h" -+ -+DisplayInfo di_ep_debug = {ep_debugf, DBG_DEBUG}; -+ -+/* -+ * Generate a partial bitmap string, for the bitmap from offset "off" for "count" bits, -+ * to allow for displaying of subsets, treat entry 0 of the bitmap as having value "base". -+ */ -+int -+ep_sprintf_bitmap (char *str, unsigned nbytes, bitmap_t *bitmap, int base, int off, int nbits) -+{ -+ char entry[12]; /* space for N-N */ -+ register int i, j, len; -+ register int notstart = off; -+ register int notfirst = 0; -+ char *p = str; -+ -+ for (i = off; i < nbits; i++) -+ { -+ if (BT_TEST (bitmap, i)) -+ { -+ for (j = i+1; j < nbits; j++) -+ if (! BT_TEST (bitmap, j)) -+ break; -+ -+ if (j == (i+1)) -+ len = (int)sprintf (entry, "%d", base + i); -+ else -+ len = (int)sprintf (entry, "%d-%d", base + i, base + j-1); -+ -+ /* NOTE the 2 is for: one for comma, one for (possible) closing bracket */ -+ if ((p - str) <= (nbytes - (len+3))) -+ p += (int)sprintf (p, "%c%s", notfirst++ ? ',' : notstart ? ' ' : '[', entry); -+ else -+ { -+ /* no more space on this line, so move onto next */ -+ sprintf (p, "%c", notfirst++ ? ',' : '['); -+ -+ return (i); -+ } -+ -+ i = j; -+ } -+ } -+ -+ if (!notfirst) -+ sprintf (str, ""); -+ else -+ strcpy (p, "]"); -+ -+ return (-1); -+} -+ -+void -+ep_display_bitmap (char *prefix, char *tag, bitmap_t *bitmap, unsigned base, unsigned nbits) -+{ -+ /* Tru64 kernel printf() truncates lines at 128 bytes - the man pages for printf (9) -+ * do not mention this restriction, nor that it does not terminate the line with a -+ * carriage return, this is pretty naff. -+ * Linux has a similar limit though is much more generous at 1024 - and you can just -+ * look at the code to see why this has been done. -+ * -+ * Our nodeset information could well be longer than 128 characters, so we're going to -+ * have to split it into a number of lines. */ -+ -+#define LINEBUF_SIZE 128 -+ char *p, linebuf[LINEBUF_SIZE+1]; /* +1 for null termination */ -+ int i, noff, off = 0; -+ -+ do { -+ if (off == 0) -+ p = linebuf + (int)sprintf (linebuf, "%s: %s ", prefix, tag); -+ else -+ { -+ p = linebuf + (int)sprintf (linebuf, "%s: ", prefix); -+ for (i = 0; tag[i] != '\0'; i++) -+ *p++ = ' '; -+ } -+ -+ noff = ep_sprintf_bitmap (p, &linebuf[LINEBUF_SIZE-1]-p, bitmap, base, off, nbits); -+ -+ printk ("%s\n", linebuf); -+ -+ } while ((off = noff) != -1); -+ -+#undef LINEBUF_SIZE -+} -+ -+void -+ep_debugf (long mode, char *fmt, ...) -+{ -+ va_list ap; -+ char prefix[32]; -+ -+ va_start (ap, fmt); -+#if defined(LINUX) -+ sprintf (prefix, "[%08d.%04d] ", (int) lbolt, current->pid); -+#else -+ sprintf (prefix, "[%08d.----] ", (int) lbolt); -+#endif -+ qsnet_vdebugf ((mode & epdebug_console ? QSNET_DEBUG_CONSOLE: 0) | QSNET_DEBUG_BUFFER, prefix, fmt, ap); -+ va_end (ap); -+} -+ -+int -+ep_assfail (EP_RAIL *rail, const char *ex, const char *func, const char *file, const int line) -+{ -+ qsnet_debugf (QSNET_DEBUG_BUFFER, "ep: assertion failure: %s, function: %s, file %s, line: %d\n", ex, func, file, line); -+ -+ printk (KERN_EMERG "ep: assertion failure: %s, function: %s, file %s, line: %d\n", ex, func, file, line); -+ -+ if (panicstr) -+ return (0); -+ -+ if (assfail_mode & 1) /* return to BUG() */ -+ return 1; -+ -+ if (assfail_mode & 2) -+ panic ("ep: assertion failure: %s, function: %s, file %s, line: %d\n", ex, func, file, line); -+ if (assfail_mode & 4) -+ epdebug = 0; -+ -+ return 0; -+} -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/ep/debug_elan4.c linux-2.6.9/drivers/net/qsnet/ep/debug_elan4.c ---- clean/drivers/net/qsnet/ep/debug_elan4.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/debug_elan4.c 2004-05-19 06:21:04.000000000 -0400 -@@ -0,0 +1,59 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: debug_elan4.c,v 1.1 2004/05/19 10:21:04 david Exp $ $Name: QSNETMODULES-5-11-3_20050907 $" -+/* $Source: /cvs/master/quadrics/epmod/debug_elan4.c,v $*/ -+ -+#include -+ -+#include -+ -+#include "kcomm_vp.h" -+#include "kcomm_elan4.h" -+#include "conf_linux.h" -+#include "debug.h" -+ -+static void -+ep4_display_ecqs (EP4_RAIL *rail) -+{ -+ struct list_head *el; -+ unsigned long flags; -+ int i; -+ -+ spin_lock_irqsave (&rail->r_ecq_lock, flags); -+ for (i = 0; i r_ecq_list[i]) { -+ EP4_ECQ *ecq = list_entry (el, EP4_ECQ, ecq_link); -+ -+ ep_debugf (DBG_DEBUG, "ECQ: type %d: avail %d cqnum %d\n", i, ecq->ecq_avail, elan4_cq2num (ecq->ecq_cq)); -+ } -+ } -+ spin_unlock_irqrestore (&rail->r_ecq_lock, flags); -+} -+ -+void -+ep4_debug_rail (EP_RAIL *r) -+{ -+ EP4_RAIL *rail = (EP4_RAIL *) r; -+ EP_SYS *sys = rail->r_generic.System; -+ -+ ep_debugf (DBG_DEBUG, "ep%d: is elan4 %d rev %c\n", rail->r_generic.Number, -+ rail->r_generic.Devinfo.dev_instance, 'a' + rail->r_generic.Devinfo.dev_revision_id); -+ -+ ep4_display_ecqs (rail); -+ -+ ep_display_alloc (&sys->Allocator); -+ ep_display_rmap (sys->Allocator.ResourceMap); -+ -+ ep_display_alloc (&rail->r_generic.ElanAllocator); -+ ep_display_alloc (&rail->r_generic.MainAllocator); -+ -+ ep_display_rmap (rail->r_generic.ElanAllocator.ResourceMap); -+} -+ -diff -urN clean/drivers/net/qsnet/ep/debug.h linux-2.6.9/drivers/net/qsnet/ep/debug.h ---- clean/drivers/net/qsnet/ep/debug.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/debug.h 2005-04-05 12:36:28.000000000 -0400 -@@ -0,0 +1,111 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef _ELAN3_EPDEBUG_H -+#define _ELAN3_EPDEBUG_H -+ -+#ident "$Id: debug.h,v 1.21 2005/04/05 16:36:28 mike Exp $" -+/* $Source: /cvs/master/quadrics/epmod/debug.h,v $ */ -+ -+extern unsigned int epdebug; -+extern unsigned int epdebug_console; -+extern unsigned int epdebug_cmlevel; -+#if ! defined(CONFIG_EP_NO_CHECK_SUM) -+extern unsigned int epdebug_check_sum; -+#endif -+#define DBG_CONFIG 0x00000001 /* Module configuration */ -+#define DBG_PROBE 0x00000002 -+#define DBG_ROUTETABLE 0x00000004 -+#define DBG_STATEMAP 0x00000008 -+ -+#define DBG_CM 0x00000020 -+#define DBG_XMTR 0x00000040 -+#define DBG_RCVR 0x00000080 -+#define DBG_FORWARD 0x00000100 -+#define DBG_DISCON 0x00000200 -+#define DBG_EPTRAP 0x00000400 -+#define DBG_COMMAND 0x00000800 -+#define DBG_RETRY 0x00001000 -+#define DBG_DEBUG 0x00002000 -+#define DBG_NETWORK_ERROR 0x00004000 -+#define DBG_MSGSYS 0x00008000 -+#define DBG_MANAGER 0x00010000 -+#define DBG_KMAP 0x00020000 -+#define DBG_FAILOVER 0x00040000 -+#define DBG_MAPNMD 0x00080000 -+#define DBG_KMSG 0x00100000 -+#define DBG_SVC 0x00200000 -+#define DBG_STABILISE 0x00400000 -+ -+#if defined(DEBUG_PRINTF) -+ -+# define EPRINTF0(m,fmt) ((epdebug&(m)) ? ep_debugf(m,fmt) : (void)0) -+# define EPRINTF1(m,fmt,a) ((epdebug&(m)) ? ep_debugf(m,fmt,a) : (void)0) -+# define EPRINTF2(m,fmt,a,b) ((epdebug&(m)) ? ep_debugf(m,fmt,a,b) : (void)0) -+# define EPRINTF3(m,fmt,a,b,c) ((epdebug&(m)) ? ep_debugf(m,fmt,a,b,c) : (void)0) -+# define EPRINTF4(m,fmt,a,b,c,d) ((epdebug&(m)) ? ep_debugf(m,fmt,a,b,c,d) : (void)0) -+# define EPRINTF5(m,fmt,a,b,c,d,e) ((epdebug&(m)) ? ep_debugf(m,fmt,a,b,c,d,e) : (void)0) -+# define EPRINTF6(m,fmt,a,b,c,d,e,f) ((epdebug&(m)) ? ep_debugf(m,fmt,a,b,c,d,e,f) : (void)0) -+# define EPRINTF7(m,fmt,a,b,c,d,e,f,g) ((epdebug&(m)) ? ep_debugf(m,fmt,a,b,c,d,e,f,g) : (void)0) -+# define EPRINTF8(m,fmt,a,b,c,d,e,f,g,h) ((epdebug&(m)) ? ep_debugf(m,fmt,a,b,c,d,e,f,g,h) : (void)0) -+# define EPRINTF9(m,fmt,a,b,c,d,e,f,g,h,i) ((epdebug&(m)) ? ep_debugf(m,fmt,a,b,c,d,e,f,g,h,i) : (void)0) -+# define EPRINTF10(m,fmt,a,b,c,d,e,f,g,h,i,j) ((epdebug&(m)) ? ep_debugf(m,fmt,a,b,c,d,e,f,g,h,i,j) : (void)0) -+ -+# define CPRINTF0(lvl,fmt) (((lvl) <= epdebug_cmlevel) ? EPRINTF0(DBG_CM,fmt) : (void)0) -+# define CPRINTF1(lvl,fmt,a) (((lvl) <= epdebug_cmlevel) ? EPRINTF1(DBG_CM,fmt,a) : (void)0) -+# define CPRINTF2(lvl,fmt,a,b) (((lvl) <= epdebug_cmlevel) ? EPRINTF2(DBG_CM,fmt,a,b) : (void)0) -+# define CPRINTF3(lvl,fmt,a,b,c) (((lvl) <= epdebug_cmlevel) ? EPRINTF3(DBG_CM,fmt,a,b,c) : (void)0) -+# define CPRINTF4(lvl,fmt,a,b,c,d) (((lvl) <= epdebug_cmlevel) ? EPRINTF4(DBG_CM,fmt,a,b,c,d) : (void)0) -+# define CPRINTF5(lvl,fmt,a,b,c,d,e) (((lvl) <= epdebug_cmlevel) ? EPRINTF5(DBG_CM,fmt,a,b,c,d,e) : (void)0) -+# define CPRINTF6(lvl,fmt,a,b,c,d,e,f) (((lvl) <= epdebug_cmlevel) ? EPRINTF6(DBG_CM,fmt,a,b,c,d,e,f) : (void)0) -+# define CPRINTF7(lvl,fmt,a,b,c,d,e,f,g) (((lvl) <= epdebug_cmlevel) ? EPRINTF7(DBG_CM,fmt,a,b,c,d,e,f,g) : (void)0) -+# define CPRINTF8(lvl,fmt,a,b,c,d,e,f,g,h) (((lvl) <= epdebug_cmlevel) ? EPRINTF8(DBG_CM,fmt,a,b,c,d,e,f,g,h) : (void)0) -+# define CPRINTF9(lvl,fmt,a,b,c,d,e,f,g,h,i) (((lvl) <= epdebug_cmlevel) ? EPRINTF9(DBG_CM,fmt,a,b,c,d,e,f,g,h,i) : (void)0) -+ -+#if defined __GNUC__ -+extern void ep_debugf (long mode, char *fmt, ...) __attribute__ ((format (printf,2,3))); -+#else -+extern void ep_debugf (long mode, char *fmt, ...); -+#endif -+ -+#else -+ -+# define EPRINTF0(m,fmt) (0) -+# define EPRINTF1(m,fmt,a) (0) -+# define EPRINTF2(m,fmt,a,b) (0) -+# define EPRINTF3(m,fmt,a,b,c) (0) -+# define EPRINTF4(m,fmt,a,b,c,d) (0) -+# define EPRINTF5(m,fmt,a,b,c,d,e) (0) -+# define EPRINTF6(m,fmt,a,b,c,d,e,f) (0) -+# define EPRINTF7(m,fmt,a,b,c,d,e,f,g) (0) -+# define EPRINTF8(m,fmt,a,b,c,d,e,f,g,h) (0) -+# define EPRINTF9(m,fmt,a,b,c,d,e,f,g,h,i) (0) -+# define EPRINTF9(m,fmt,a,b,c,d,e,f,g,h,i,j) (0) -+ -+# define CPRINTF0(lvl,fmt) (0) -+# define CPRINTF1(lvl,fmt,a) (0) -+# define CPRINTF2(lvl,fmt,a,b) (0) -+# define CPRINTF3(lvl,fmt,a,b,c) (0) -+# define CPRINTF4(lvl,fmt,a,b,c,d) (0) -+# define CPRINTF5(lvl,fmt,a,b,c,d,e) (0) -+# define CPRINTF6(lvl,fmt,a,b,c,d,e,f) (0) -+# define CPRINTF7(lvl,fmt,a,b,c,d,e,f,g) (0) -+# define CPRINTF8(lvl,fmt,a,b,c,d,e,f,g,h) (0) -+# define CPRINTF9(lvl,fmt,a,b,c,d,e,f,g,h,i) (0) -+ -+#endif /* DEBUG */ -+ -+extern DisplayInfo di_ep_debug; -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -+#endif /* _ELAN3_EPDEBUG_H */ -+ -diff -urN clean/drivers/net/qsnet/ep/epcomms_asm_elan4_thread.S linux-2.6.9/drivers/net/qsnet/ep/epcomms_asm_elan4_thread.S ---- clean/drivers/net/qsnet/ep/epcomms_asm_elan4_thread.S 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/epcomms_asm_elan4_thread.S 2004-04-25 07:25:43.000000000 -0400 -@@ -0,0 +1,133 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: epcomms_asm_elan4_thread.S,v 1.5 2004/04/25 11:25:43 david Exp $ $Name: QSNETMODULES-5-11-3_20050907 $" -+/* $Source: /cvs/master/quadrics/epmod/epcomms_asm_elan4_thread.S,v $*/ -+ -+#include -+#include -+ -+#include "assym_elan4.h" -+ -+/* XXXXX - registers.h */ -+#define E4_MAIN_INT_SHIFT 14 -+ -+/* -+ * c_waitevent_interrupt (E4_uint64 *commandport, E4_Event *event, E4_uint64 count, E4_uint64 intcookie) -+ */ -+ .global c_waitevent_interrupt -+c_waitevent_interrupt: -+ add %sp, -192, %sp -+ st64 %r16, [%sp + 64] // preserve call preserved registers -+ st64 %r24, [%sp + 128] // - see CALL_USED_REGISTERS. -+ mov %r16,%r16 // BUG FIX: E4 RevA -+ mov %r24,%r24 // BUG FIX: E4 RevA -+ nop // BUG FIX: E4 RevA -+ nop // BUG FIX: E4 RevA -+ -+ mov %r7, %r18 // (%r2) return pc -+1: call 2f -+ mov %sp, %r17 // (%r1) SP -+2: add %r7, (3f-1b), %r16 // (%r0) PC -+ st32 %r16, [%sp] // event source block -+ mov MAKE_EXT_CLEAN_CMD, %r23 -+ st8 %r23, [%sp+56] // event source block -+ mov %r16,%r16 // BUG FIX: E4 RevA -+ mov %r23,%r23 // BUG FIX: E4 RevA -+ nop // BUG FIX: E4 RevA -+ nop // BUG FIX: E4 RevA -+ -+ or %r9, WAIT_EVENT_CMD, %r16 ! WAIT_EVENT_CMD | event -+ sll8 %r10, 32, %r17 -+ or %r17, E4_EVENT_TYPE_VALUE(E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, 8), %r17 ! ev_CountAndType -+ mov %sp, %r18 ! ev_Source -+ mov %r8, %r19 ! ev_Dest -+ sll8 %r11, E4_MAIN_INT_SHIFT, %r20 -+ or %r20, INTERRUPT_CMD, %r20 ! INTERRUPT_CMD | (cookie << E4_MAIN_INT_SHIFT) -+ mov NOP_CMD, %r21 -+ mov NOP_CMD, %r22 -+ mov NOP_CMD, %r23 -+ -+ st64suspend %r16, [%r8] -+ -+3: ld64 [%sp + 64], %r16 // restore call preserved register -+ ld64 [%sp + 128], %r24 -+ jmpl %r2+8, %r0 // and return -+ add %sp, 192, %sp -+ -+ -+#define EP4_RCVR_PENDING_STALLED 1 /* indicates thread has stalled for no descriptor (rcvr_pending_head) */ -+ -+#define RXD_DEBUG(VAL,RXD,TMP) \ -+ mov VAL, TMP; \ -+ st8 TMP, [RXD + EP4_RXD_DEBUG] -+ -+ -+ /* -+ * %r2 - rcvr elan -+ * %r3 - rxd elan -+ */ -+ .global c_queue_rxd -+c_queue_rxd: -+ RXD_DEBUG(1, %r3, %r23) -+ -+ ld16 [%r2 + EP4_RCVR_PENDING_TAILP], %r18 /* r18 == tailp, r19 = head */ -+ add %r3, EP4_RXD_NEXT, %r4 -+ -+ st8 %r0, [%r3 + EP4_RXD_NEXT] /* rxd->rxd_next = NULL */ -+ st8 %r4, [%r2 + EP4_RCVR_PENDING_TAILP] /* tailp = &rxd->rxd_next */ -+ st8 %r3, [%r18] /* *tailp = rxd */ -+ -+ cmp %r19, EP4_RCVR_PENDING_STALLED /* thread stalled ? */ -+ beq 1f -+ mov %r18, %r16 /* must have used %r16, %r19, %r23 */ -+ mov %r3, %r23 -+ -+ RXD_DEBUG(2, %r3, %r23) -+ -+ st8suspend %r16, [%r3 + EP4_RXD_QUEUED] /* no - mark as queued - all done */ -+ -+1: st8 %r16, [%r3 + EP4_RXD_QUEUED] /* mark as queued */ -+ -+ RXD_DEBUG(3, %r3, %r23) -+ -+ mov %r3, %r8 /* return rxd from c_stall_thread */ -+ ba .epcomms_resume_thread /* resume the thread */ -+ ld64 [%r2 + EP4_RCVR_THREAD_STALL], %r0 -+ -+ /* -+ * c_stall_thread (EP4_RCVR_ELAN *rcvrElan) -+ */ -+ .global c_stall_thread -+c_stall_thread: -+ add %sp, -192, %sp -+ st64 %r16, [%sp + 64] // preserve call preserved registers -+ st64 %r24, [%sp + 128] // - see CALL_USED_REGISTERS. -+ mov %r16,%r16 // BUG FIX: E4 RevA -+ mov %r24,%r24 // BUG FIX: E4 RevA -+ nop // BUG FIX: E4 RevA -+ nop // BUG FIX: E4 RevA -+ -+ mov EP4_RCVR_PENDING_STALLED, %r9 // Mark rcvr as stalled -+ st8 %r9, [%r8 + EP4_RCVR_PENDING_HEAD] -+ -+ // XXXX _ TBD should generate interrupt -+ -+ mov %r1, %r17 // SP -+ mov %r7, %r23 // return pc -+ -+ st64suspend %r16, [%r8 + EP4_RCVR_THREAD_STALL] -+ -+.epcomms_resume_thread: -+ /* %r8 == rxdElan */ -+ -+ ld64 [%sp + 64], %r16 // restore call preserved register -+ ld64 [%sp + 128], %r24 -+ jmpl %r7+8, %r0 // and return -+ add %sp, 192, %sp -+ -diff -urN clean/drivers/net/qsnet/ep/epcomms.c linux-2.6.9/drivers/net/qsnet/ep/epcomms.c ---- clean/drivers/net/qsnet/ep/epcomms.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/epcomms.c 2004-11-30 07:02:06.000000000 -0500 -@@ -0,0 +1,484 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: epcomms.c,v 1.77 2004/11/30 12:02:06 mike Exp $" -+/* $Source: /cvs/master/quadrics/epmod/epcomms.c,v $ */ -+ -+#include -+#include -+#include -+ -+#include -+#include -+#include -+#include "cm.h" -+#include "debug.h" -+ -+static void -+ep_comms_thread (void *arg) -+{ -+ EP_COMMS_SUBSYS *subsys = (EP_COMMS_SUBSYS *) arg; -+ struct list_head *el; -+ -+ kernel_thread_init ("ep_comms"); -+ -+ /* since ep_alloc_xmtr() has incremented the module use count, -+ * we would be preventing the module from being unloaded, so -+ * we decrement the use count since this thread must terminate -+ * during unload of the module. -+ */ -+ ep_mod_dec_usecount(); -+ -+ for (;;) -+ { -+ long nextRunTime = 0; -+ -+ /* NOTE - subsys->Lock serializes us against flush/relocations -+ * caused by rail nodeset transitions. -+ */ -+ kmutex_lock (&subsys->Lock); -+ list_for_each (el, &subsys->Transmitters) { -+ nextRunTime = ep_check_xmtr (list_entry (el, EP_XMTR, Link), nextRunTime); -+ } -+ -+ list_for_each (el, &subsys->Receivers) { -+ nextRunTime = ep_check_rcvr (list_entry (el, EP_RCVR, Link), nextRunTime); -+ } -+ kmutex_unlock (&subsys->Lock); -+ -+#if ! defined(CONFIG_EP_NO_CHECK_SUM) -+ ep_csum_rxds (subsys); -+#endif -+ nextRunTime = ep_forward_rxds (subsys, nextRunTime); -+ -+ if (ep_kthread_sleep (&subsys->Thread, nextRunTime) < 0) -+ break; -+ } -+ -+ ep_mod_inc_usecount(); -+ -+ ep_kthread_stopped (&subsys->Thread); -+ kernel_thread_exit(); -+} -+ -+int -+ep_comms_add_rail (EP_SUBSYS *s, EP_SYS *sys, EP_RAIL *rail) -+{ -+ EP_COMMS_SUBSYS *subsys = (EP_COMMS_SUBSYS *) s; -+ EP_COMMS_RAIL *commsRail; -+ struct list_head *el; -+ -+ printk ("%s: vendorid=%x deviceid=%x\n", rail->Name, rail->Devinfo.dev_vendor_id, rail->Devinfo.dev_device_id); -+ -+ switch (rail->Devinfo.dev_device_id) -+ { -+#if defined(CONFIG_ELAN3) || defined(CONFIG_ELAN3_MODULE) -+ case PCI_DEVICE_ID_ELAN3: -+ commsRail = ep3comms_add_rail (s, sys, rail); -+ break; -+#endif -+#if defined(CONFIG_ELAN4) || defined(CONFIG_ELAN4_MODULE) -+ case PCI_DEVICE_ID_ELAN4: -+ commsRail = ep4comms_add_rail (s, sys, rail); -+ break; -+#endif -+ default: -+ return 0; -+ } -+ -+ if (commsRail == NULL) -+ return 1; -+ -+ commsRail->Rail = rail; -+ commsRail->Subsys = subsys; -+ -+ kmutex_lock (&subsys->Lock); -+ list_add_tail (&commsRail->Link, &subsys->Rails); -+ -+ list_for_each (el, &subsys->Receivers) { -+ EP_RCVR *rcvr = list_entry (el, EP_RCVR, Link); -+ -+ EP_RAIL_OP (commsRail, Rcvr.AddRail) (rcvr, commsRail); -+ } -+ -+ list_for_each (el, &subsys->Transmitters) { -+ EP_XMTR *xmtr = list_entry (el, EP_XMTR, Link); -+ -+ EP_RAIL_OP (commsRail, Xmtr.AddRail) (xmtr, commsRail); -+ } -+ -+ kmutex_unlock (&subsys->Lock); -+ -+ return 0; -+} -+ -+void -+ep_comms_del_rail (EP_SUBSYS *s, EP_SYS *sys, EP_RAIL *rail) -+{ -+ EP_COMMS_SUBSYS *subsys = (EP_COMMS_SUBSYS *) s; -+ EP_COMMS_RAIL *commsRail = NULL; -+ struct list_head *el; -+ -+ kmutex_lock (&subsys->Lock); -+ /* find out rail entry and remove from system list */ -+ list_for_each (el, &subsys->Rails) { -+ if ((commsRail = list_entry (el, EP_COMMS_RAIL, Link))->Rail == rail) -+ break; -+ } -+ -+ list_del (&commsRail->Link); -+ -+ list_for_each (el, &subsys->Receivers) { -+ EP_RCVR *rcvr = list_entry (el, EP_RCVR, Link); -+ -+ EP_RAIL_OP(commsRail, Rcvr.DelRail) (rcvr, commsRail); -+ } -+ -+ list_for_each (el, &subsys->Transmitters) { -+ EP_XMTR *xmtr = list_entry (el, EP_XMTR, Link); -+ -+ EP_RAIL_OP(commsRail,Xmtr.DelRail) (xmtr, commsRail); -+ } -+ -+ kmutex_unlock (&subsys->Lock); -+ -+ EP_RAIL_OP (commsRail, DelRail) (commsRail); -+} -+ -+void -+ep_comms_fini (EP_SUBSYS *s, EP_SYS *sys) -+{ -+ EP_COMMS_SUBSYS *subsys = (EP_COMMS_SUBSYS *) s; -+ -+ ep_kthread_stop (&subsys->Thread); -+ ep_kthread_destroy (&subsys->Thread); -+ -+ if (subsys->ForwardXmtr) -+ ep_free_xmtr (subsys->ForwardXmtr); -+ -+ spin_lock_destroy (&subsys->ForwardDescLock); -+ -+#if ! defined(CONFIG_EP_NO_CHECK_SUM) -+ spin_lock_destroy (&subsys->CheckSumDescLock); -+#endif -+ -+ kmutex_destroy (&subsys->Lock); -+ -+ KMEM_FREE (subsys, sizeof (EP_COMMS_SUBSYS)); -+} -+ -+int -+ep_comms_init (EP_SYS *sys) -+{ -+ EP_COMMS_SUBSYS *subsys; -+ -+ KMEM_ZALLOC (subsys, EP_COMMS_SUBSYS *, sizeof (EP_COMMS_SUBSYS), 1); -+ -+ if (subsys == NULL) -+ return (ENOMEM); -+ -+ INIT_LIST_HEAD (&subsys->Rails); -+ INIT_LIST_HEAD (&subsys->Receivers); -+ INIT_LIST_HEAD (&subsys->Transmitters); -+ INIT_LIST_HEAD (&subsys->ForwardDescList); -+ -+ kmutex_init (&subsys->Lock); -+ spin_lock_init (&subsys->ForwardDescLock); -+ -+#if ! defined(CONFIG_EP_NO_CHECK_SUM) -+ INIT_LIST_HEAD (&subsys->CheckSumDescList); -+ spin_lock_init (&subsys->CheckSumDescLock); -+#endif -+ -+ subsys->Subsys.Sys = sys; -+ subsys->Subsys.Name = "epcomms"; -+ subsys->Subsys.Destroy = ep_comms_fini; -+ subsys->Subsys.AddRail = ep_comms_add_rail; -+ subsys->Subsys.RemoveRail = ep_comms_del_rail; -+ -+ ep_subsys_add (sys, &subsys->Subsys); -+ ep_kthread_init (&subsys->Thread); -+ -+ if ((subsys->ForwardXmtr = ep_alloc_xmtr (subsys->Subsys.Sys)) == NULL) -+ goto failed; -+ -+ if (kernel_thread_create (ep_comms_thread, subsys) == NULL) -+ goto failed; -+ ep_kthread_started (&subsys->Thread); -+ -+ return (0); -+ -+ failed: -+ ep_subsys_del (sys, &subsys->Subsys); -+ ep_comms_fini (&subsys->Subsys, sys); -+ -+ return (ENOMEM); -+} -+ -+void -+ep_comms_display (EP_SYS *sys, char *how) -+{ -+ EP_COMMS_SUBSYS *subsys = (EP_COMMS_SUBSYS *) ep_subsys_find (sys, EPCOMMS_SUBSYS_NAME); -+ struct list_head *el; -+ -+ if (how == NULL || !strncmp (how, "rail", 4)) -+ { -+ kmutex_lock (&subsys->Lock); -+ list_for_each (el, &subsys->Rails) { -+ EP_COMMS_RAIL *commsRail = list_entry (el, EP_COMMS_RAIL, Link); -+ -+ EP_RAIL_OP(commsRail, DisplayRail) (commsRail); -+ } -+ kmutex_unlock (&subsys->Lock); -+ } -+ -+ if (how == NULL || !strncmp (how, "xmtr", 4)) -+ list_for_each (el, &subsys->Transmitters) -+ ep_display_xmtr (&di_ep_debug, list_entry (el, EP_XMTR, Link)); -+ -+ if (how == NULL || !strncmp (how, "rcvr", 4)) -+ list_for_each (el, &subsys->Receivers) -+ ep_display_rcvr (&di_ep_debug, list_entry (el, EP_RCVR, Link), (how && how[4] == ',') ? 1 : 0); -+} -+ -+int -+ep_svc_indicator_set (EP_SYS *epsys, int svc_indicator) -+{ -+ EP_COMMS_SUBSYS *subsys; -+ struct list_head *el; -+ -+ EPRINTF1 (DBG_SVC,"ep_svc_indicator_set: %d \n",svc_indicator); -+ -+ if (svc_indicator < 0 || svc_indicator > EP_SVC_NUM_INDICATORS) -+ return (EP_EINVAL); -+ -+ if ((subsys = (EP_COMMS_SUBSYS *) ep_subsys_find (epsys, "epcomms")) == NULL) { -+ EPRINTF0 (DBG_SVC,"ep_svc_indicator_set: ep_subsys_find failed\n"); -+ return (EP_EINVAL); -+ } -+ -+ -+ kmutex_lock (&subsys->Lock); /* walking rails list and setting info on Rail */ -+ list_for_each (el, &subsys->Rails) { -+ EP_COMMS_RAIL *commsRail = list_entry (el, EP_COMMS_RAIL, Link); -+ -+ cm_svc_indicator_set(commsRail->Rail, svc_indicator); -+ } -+ kmutex_unlock (&subsys->Lock); -+ -+ EPRINTF1 (DBG_SVC,"ep_svc_indicator_set: %d success\n",svc_indicator); -+ return (EP_SUCCESS); -+} -+ -+int -+ep_svc_indicator_clear (EP_SYS *epsys, int svc_indicator) -+{ -+ EP_COMMS_SUBSYS *subsys; -+ struct list_head *el; -+ -+ EPRINTF1 (DBG_SVC,"ep_svc_indicator_clear: %d \n",svc_indicator); -+ -+ if (svc_indicator < 0 || svc_indicator >= EP_SVC_NUM_INDICATORS) -+ return (EP_EINVAL); -+ -+ if ((subsys = (EP_COMMS_SUBSYS *) ep_subsys_find (epsys, "epcomms")) == NULL) { -+ EPRINTF0 (DBG_SVC,"ep_svc_indicator_clear: ep_subsys_find failed\n"); -+ return (EP_EINVAL); -+ } -+ -+ kmutex_lock (&subsys->Lock); /* walking rails list and setting info on Rail */ -+ list_for_each (el, &subsys->Rails) { -+ EP_COMMS_RAIL *commsRail = list_entry (el, EP_COMMS_RAIL, Link); -+ -+ cm_svc_indicator_clear(commsRail->Rail, svc_indicator); -+ } -+ kmutex_unlock (&subsys->Lock); -+ -+ EPRINTF1 (DBG_SVC,"ep_svc_indicator_clear: %d success\n",svc_indicator); -+ return (EP_SUCCESS); -+} -+ -+int -+ep_svc_indicator_is_set (EP_SYS *epsys, int svc_indicator, int nodeId) -+{ -+ EP_COMMS_SUBSYS *subsys; -+ struct list_head *el; -+ int set = 0; -+ -+ EPRINTF2 (DBG_SVC,"ep_svc_indicator_is_set: svc %d node %d \n", svc_indicator, nodeId); -+ -+ if ((subsys = (EP_COMMS_SUBSYS *) ep_subsys_find (epsys, "epcomms")) == NULL) { -+ EPRINTF0 (DBG_SVC,"ep_svc_indicator_is_set: ep_subsys_find failed\n"); -+ return (0); -+ } -+ -+ kmutex_lock (&subsys->Lock); /* walking rails list and setting info on Rail */ -+ list_for_each (el, &subsys->Rails) { -+ EP_COMMS_RAIL *commsRail = list_entry (el, EP_COMMS_RAIL, Link); -+ -+ set |= cm_svc_indicator_is_set(commsRail->Rail, svc_indicator, nodeId); -+ } -+ kmutex_unlock (&subsys->Lock); -+ -+ EPRINTF3 (DBG_SVC,"ep_svc_indicator_is_set: svc %d node %d returning %d\n", svc_indicator, nodeId, set); -+ return set; -+} -+ -+int -+ep_svc_indicator_bitmap (EP_SYS *epsys, int svc_indicator, bitmap_t * bitmap, int low, int nnodes) -+{ -+ EP_COMMS_SUBSYS *subsys; -+ struct list_head *el; -+ -+ EPRINTF1 (DBG_SVC,"ep_svc_indicator_bitmap: svc %d\n", svc_indicator); -+ -+ if (svc_indicator < 0 || svc_indicator >= EP_SVC_NUM_INDICATORS) -+ return (-1); -+ -+ if ((subsys = (EP_COMMS_SUBSYS *) ep_subsys_find (epsys, "epcomms")) == NULL) { -+ EPRINTF0 (DBG_SVC,"ep_svc_indicator_bitmap: ep_subsys_find failed\n"); -+ return (-2); -+ } -+ -+ /* clear bitmap */ -+ bt_zero (bitmap, nnodes); -+ -+ kmutex_lock (&subsys->Lock); /* walking rails list and setting info on Rail */ -+ list_for_each (el, &subsys->Rails) { -+ EP_COMMS_RAIL *commsRail = list_entry (el, EP_COMMS_RAIL, Link); -+ -+ /* this will or in each bit map */ -+ cm_svc_indicator_bitmap (commsRail->Rail, svc_indicator, bitmap, low, nnodes); -+ } -+ kmutex_unlock (&subsys->Lock); -+ -+ return (0); -+} -+ -+int -+ep_xmtr_svc_indicator_bitmap (EP_XMTR *xmtr, int svc_indicator, bitmap_t * bitmap, int low, int nnodes) -+{ -+ int i; -+ -+ EPRINTF1 (DBG_SVC,"ep_xmtr_svc_indicator_bitmap: svc %d\n", svc_indicator); -+ -+ if (svc_indicator < 0 || svc_indicator >= EP_SVC_NUM_INDICATORS) -+ return (-1); -+ -+ /* clear bitmap */ -+ bt_zero (bitmap, nnodes); -+ -+ for (i = 0; i < EP_MAX_RAILS; i++) -+ { -+ if (xmtr->RailMask & (1 << i) ) -+ { -+ /* this will or in each bit map */ -+ cm_svc_indicator_bitmap (xmtr->Rails[i]->CommsRail->Rail, svc_indicator, bitmap, low, nnodes); -+ } -+ } -+ -+ return (0); -+} -+ -+EP_RAILMASK -+ep_svc_indicator_railmask (EP_SYS *epsys, int svc_indicator, int nodeId) -+{ -+ EP_COMMS_SUBSYS *subsys; -+ struct list_head *el; -+ EP_RAILMASK rmask=0; -+ -+ if ((subsys = (EP_COMMS_SUBSYS *) ep_subsys_find (epsys, "epcomms")) == NULL) -+ return (rmask); -+ -+ kmutex_lock (&subsys->Lock); /* walking rails list and reading info from Rail */ -+ list_for_each (el, &subsys->Rails) { -+ EP_COMMS_RAIL *commsRail = list_entry (el, EP_COMMS_RAIL, Link); -+ -+ if ( cm_svc_indicator_is_set(commsRail->Rail, svc_indicator,nodeId)) -+ rmask |= EP_RAIL2RAILMASK(commsRail->Rail->Number); -+ } -+ kmutex_unlock (&subsys->Lock); -+ -+ return (rmask); -+} -+ -+EP_RAILMASK -+ep_xmtr_svc_indicator_railmask (EP_XMTR *xmtr, int svc_indicator, int nodeId) -+{ -+ EP_RAILMASK rmask=0; -+ EP_COMMS_RAIL *commsRail; -+ int i; -+ -+ for (i = 0; i < EP_MAX_RAILS; i++) -+ { -+ if (xmtr->RailMask & (1 << i) ) -+ { -+ commsRail = xmtr->Rails[i]->CommsRail; -+ -+ if ( cm_svc_indicator_is_set(commsRail->Rail, svc_indicator,nodeId)) -+ rmask |= EP_RAIL2RAILMASK(commsRail->Rail->Number); -+ } -+ } -+ -+ EPRINTF3 (DBG_SVC, "ep_xmtr_svc_indicator_railmask: svc %d node %d mask 0x%x\n", svc_indicator, nodeId, rmask); -+ -+ return (rmask); -+} -+ -+EP_RAILMASK -+ep_rcvr_railmask (EP_SYS *epsys, EP_SERVICE service) -+{ -+ EP_COMMS_SUBSYS *subsys; -+ EP_RAILMASK rmask=0; -+ struct list_head *el; -+ -+ if ((subsys = (EP_COMMS_SUBSYS *) ep_subsys_find (epsys, "epcomms")) == NULL) -+ return (rmask); -+ -+ kmutex_lock (&subsys->Lock); -+ list_for_each (el, &subsys->Receivers) { -+ EP_RCVR *rcvr = list_entry (el, EP_RCVR, Link); -+ -+ if (rcvr->Service == service) -+ rmask |= rcvr->RailMask; -+ } -+ kmutex_unlock(&subsys->Lock); -+ -+ return (rmask); -+} -+ -+#if ! defined(CONFIG_EP_NO_CHECK_SUM) -+uint32_t -+ep_calc_check_sum (EP_SYS *sys, EP_ENVELOPE *env, EP_NMD *nmd, int nFrags) -+{ -+ EP_NMH *nmh; -+ int i; -+ uint16_t check_data = 0; -+ uint16_t check_env = 0; -+ -+ for (i = 0; i < nFrags; i++) { -+ /* find the nmh for this frag */ -+ nmh = ep_nmh_find (&sys->MappingTable, &nmd[i]); -+ -+ ASSERT( nmh != NULL); -+ -+ /* add the next frag to the check sum */ -+ check_data = nmh->nmh_ops->op_calc_check_sum (sys, nmh, &nmd[i], check_data); -+ } -+ -+ check_env = rolling_check_sum ((char *) env, offsetof(EP_ENVELOPE, CheckSum), 0); -+ -+ return (EP_ENVELOPE_CHECK_SUM | ( (check_env & 0x7FFF) << 16) | (check_data & 0xFFFF)); -+} -+#endif -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/ep/epcomms_elan3.c linux-2.6.9/drivers/net/qsnet/ep/epcomms_elan3.c ---- clean/drivers/net/qsnet/ep/epcomms_elan3.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/epcomms_elan3.c 2004-08-03 07:34:34.000000000 -0400 -@@ -0,0 +1,191 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: epcomms_elan3.c,v 1.60 2004/08/03 11:34:34 david Exp $" -+/* $Source: /cvs/master/quadrics/epmod/epcomms_elan3.c,v $ */ -+ -+#include -+ -+#include -+#include -+#include -+ -+#include "kcomm_elan3.h" -+#include "epcomms_elan3.h" -+ -+void -+ep3comms_flush_callback (void *arg, statemap_t *map) -+{ -+ EP_COMMS_RAIL *commsRail = (EP_COMMS_RAIL *) arg; -+ EP_COMMS_SUBSYS *subsys = commsRail->Subsys; -+ struct list_head *el; -+ -+ kmutex_lock (&subsys->Lock); -+ list_for_each (el, &subsys->Transmitters) { -+ EP_XMTR *xmtr = list_entry (el, EP_XMTR, Link); -+ -+ if (xmtr->Rails[commsRail->Rail->Number]) -+ ep3xmtr_flush_callback (xmtr, (EP3_XMTR_RAIL *) xmtr->Rails[commsRail->Rail->Number]); -+ } -+ -+ list_for_each (el, &subsys->Receivers) { -+ EP_RCVR *rcvr = list_entry (el, EP_RCVR, Link); -+ -+ if (rcvr->Rails[commsRail->Rail->Number]) -+ ep3rcvr_flush_callback (rcvr, (EP3_RCVR_RAIL *) rcvr->Rails[commsRail->Rail->Number]); -+ } -+ kmutex_unlock (&subsys->Lock); -+} -+ -+void -+ep3comms_failover_callback (void *arg, statemap_t *map) -+{ -+ EP_COMMS_RAIL *commsRail = (EP_COMMS_RAIL *) arg; -+ EP_COMMS_SUBSYS *subsys = commsRail->Subsys; -+ struct list_head *el; -+ -+ kmutex_lock (&subsys->Lock); -+ list_for_each (el, &subsys->Transmitters) { -+ EP_XMTR *xmtr = list_entry (el, EP_XMTR, Link); -+ -+ if (xmtr->Rails[commsRail->Rail->Number]) -+ ep3xmtr_failover_callback (xmtr, (EP3_XMTR_RAIL *) xmtr->Rails[commsRail->Rail->Number]); -+ } -+ -+ list_for_each (el, &subsys->Receivers) { -+ EP_RCVR *rcvr = list_entry (el, EP_RCVR, Link); -+ -+ if (rcvr->Rails[commsRail->Rail->Number]) -+ ep3rcvr_failover_callback (rcvr, (EP3_RCVR_RAIL *) rcvr->Rails[commsRail->Rail->Number]); -+ } -+ kmutex_unlock (&subsys->Lock); -+} -+ -+void -+ep3comms_disconnect_callback (void *arg, statemap_t *map) -+{ -+ EP_COMMS_RAIL *commsRail = (EP_COMMS_RAIL *) arg; -+ EP_COMMS_SUBSYS *subsys = commsRail->Subsys; -+ struct list_head *el; -+ -+ kmutex_lock (&subsys->Lock); -+ list_for_each (el, &subsys->Transmitters) { -+ EP_XMTR *xmtr = list_entry (el, EP_XMTR, Link); -+ -+ if (xmtr->Rails[commsRail->Rail->Number]) -+ ep3xmtr_disconnect_callback (xmtr, (EP3_XMTR_RAIL *) xmtr->Rails[commsRail->Rail->Number]); -+ } -+ -+ list_for_each (el, &subsys->Receivers) { -+ EP_RCVR *rcvr = list_entry (el, EP_RCVR, Link); -+ -+ if (rcvr->Rails[commsRail->Rail->Number]) -+ ep3rcvr_disconnect_callback (rcvr, (EP3_RCVR_RAIL *) rcvr->Rails[commsRail->Rail->Number]); -+ } -+ kmutex_unlock (&subsys->Lock); -+} -+ -+EP_COMMS_RAIL * -+ep3comms_add_rail (EP_SUBSYS *s, EP_SYS *sys, EP_RAIL *r) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) r; -+ ELAN3_DEV *dev = rail->Device; -+ EP3_COMMS_RAIL *commsRail; -+ EP3_InputQueue qdesc; -+ int i; -+ -+ KMEM_ZALLOC (commsRail, EP3_COMMS_RAIL *, sizeof (EP3_COMMS_RAIL), TRUE); -+ -+ if (commsRail == NULL) -+ return NULL; -+ -+ commsRail->Generic.Ops.DelRail = ep3comms_del_rail; -+ commsRail->Generic.Ops.DisplayRail = ep3comms_display_rail; -+ commsRail->Generic.Ops.Rcvr.AddRail = ep3rcvr_add_rail; -+ commsRail->Generic.Ops.Rcvr.DelRail = ep3rcvr_del_rail; -+ commsRail->Generic.Ops.Rcvr.Check = ep3rcvr_check; -+ commsRail->Generic.Ops.Rcvr.QueueRxd = ep3rcvr_queue_rxd; -+ commsRail->Generic.Ops.Rcvr.RpcPut = ep3rcvr_rpc_put; -+ commsRail->Generic.Ops.Rcvr.RpcGet = ep3rcvr_rpc_get; -+ commsRail->Generic.Ops.Rcvr.RpcComplete = ep3rcvr_rpc_complete; -+ -+ commsRail->Generic.Ops.Rcvr.StealRxd = ep3rcvr_steal_rxd; -+ -+ commsRail->Generic.Ops.Rcvr.FillOutRailStats = ep3rcvr_fillout_rail_stats; -+ -+ commsRail->Generic.Ops.Rcvr.DisplayRcvr = ep3rcvr_display_rcvr; -+ commsRail->Generic.Ops.Rcvr.DisplayRxd = ep3rcvr_display_rxd; -+ -+ commsRail->Generic.Ops.Xmtr.AddRail = ep3xmtr_add_rail; -+ commsRail->Generic.Ops.Xmtr.DelRail = ep3xmtr_del_rail; -+ commsRail->Generic.Ops.Xmtr.Check = ep3xmtr_check; -+ commsRail->Generic.Ops.Xmtr.BindTxd = ep3xmtr_bind_txd; -+ commsRail->Generic.Ops.Xmtr.UnbindTxd = ep3xmtr_unbind_txd; -+ commsRail->Generic.Ops.Xmtr.PollTxd = ep3xmtr_poll_txd; -+ commsRail->Generic.Ops.Xmtr.CheckTxdState = ep3xmtr_check_txd_state; -+ -+ commsRail->Generic.Ops.Xmtr.DisplayXmtr = ep3xmtr_display_xmtr; -+ commsRail->Generic.Ops.Xmtr.DisplayTxd = ep3xmtr_display_txd; -+ -+ commsRail->Generic.Ops.Xmtr.FillOutRailStats = ep3xmtr_fillout_rail_stats; -+ -+ /* Allocate the input queues at their fixed elan address */ -+ if (! (commsRail->QueueDescs = ep_alloc_memory_elan (r, EP_EPCOMMS_QUEUE_BASE, roundup (EP_MSG_NSVC * sizeof (EP3_InputQueue), PAGESIZE), EP_PERM_ALL, 0))) -+ { -+ KMEM_FREE (commsRail, sizeof (EP3_COMMS_RAIL)); -+ return NULL; -+ } -+ -+ qdesc.q_state = E3_QUEUE_FULL; -+ qdesc.q_base = 0; -+ qdesc.q_top = 0; -+ qdesc.q_fptr = 0; -+ qdesc.q_bptr = 0; -+ qdesc.q_size = 0; -+ qdesc.q_event.ev_Count = 0; -+ qdesc.q_event.ev_Type = 0; -+ -+ /* Initialise all queue entries to be full */ -+ for (i = 0; i < EP_MSG_NSVC; i++) -+ elan3_sdram_copyl_to_sdram (dev, &qdesc, commsRail->QueueDescs + (i * sizeof (EP3_InputQueue)), sizeof (EP3_InputQueue)); -+ -+ ep_register_callback (r, EP_CB_FLUSH_FILTERING, ep3comms_flush_callback, commsRail); -+ ep_register_callback (r, EP_CB_FLUSH_FLUSHING, ep3comms_flush_callback, commsRail); -+ ep_register_callback (r, EP_CB_FAILOVER, ep3comms_failover_callback, commsRail); -+ ep_register_callback (r, EP_CB_DISCONNECTING, ep3comms_disconnect_callback, commsRail); -+ -+ return (EP_COMMS_RAIL *) commsRail; -+} -+ -+void -+ep3comms_del_rail (EP_COMMS_RAIL *r) -+{ -+ EP3_COMMS_RAIL *commsRail = (EP3_COMMS_RAIL *) r; -+ EP_RAIL *rail = commsRail->Generic.Rail; -+ -+ ep_remove_callback (rail, EP_CB_FLUSH_FILTERING, ep3comms_flush_callback, commsRail); -+ ep_remove_callback (rail, EP_CB_FLUSH_FLUSHING, ep3comms_flush_callback, commsRail); -+ ep_remove_callback (rail, EP_CB_FAILOVER, ep3comms_failover_callback, commsRail); -+ ep_remove_callback (rail, EP_CB_DISCONNECTING, ep3comms_disconnect_callback, commsRail); -+ -+ ep_free_memory_elan (rail, EP_EPCOMMS_QUEUE_BASE); -+ -+ KMEM_FREE (commsRail, sizeof (EP3_COMMS_RAIL)); -+} -+ -+void -+ep3comms_display_rail (EP_COMMS_RAIL *r) -+{ -+ -+} -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/ep/epcomms_elan3.h linux-2.6.9/drivers/net/qsnet/ep/epcomms_elan3.h ---- clean/drivers/net/qsnet/ep/epcomms_elan3.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/epcomms_elan3.h 2004-11-12 05:55:03.000000000 -0500 -@@ -0,0 +1,330 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __EPCOMMS_ELAN3_H -+#define __EPCOMMS_ELAN3_H -+ -+#ident "@(#)$Id: epcomms_elan3.h,v 1.28 2004/11/12 10:55:03 mike Exp $" -+/* $Source: /cvs/master/quadrics/epmod/epcomms_elan3.h,v $ */ -+ -+#define EP3_DMAFAILCOUNT 3 -+ -+ -+/* Main/Elan spinlock */ -+typedef struct ep3_spinlock_elan -+{ -+ volatile E3_uint32 sl_lock; /* main wants a lock */ -+ volatile E3_uint32 sl_seq; /* thread owns this word */ -+ /* NOTE: The lock/seq words must be within the same 32-byte Elan cache-line */ -+ E3_uint64 sl_pad[14]; /* pad to 64-bytes */ -+} EP3_SPINLOCK_ELAN; -+ -+/* Declare this as a main memory cache block for efficiency */ -+typedef struct ep3_spinlock_main { -+ volatile E3_uint32 sl_seq; /* copy of seq number updated by Elan */ -+ volatile E3_uint32 sl_pad[15]; /* pad to 64-bytes */ -+} EP3_SPINLOCK_MAIN; -+ -+#if defined (__ELAN3__) -+ -+extern void ep3_spinblock (EP3_SPINLOCK_ELAN *, EP3_SPINLOCK_MAIN *); -+ -+#define EP3_SPINENTER(SLE,SL) \ -+do {\ -+ (SLE)->sl_seq++; \ -+ if ((SLE)->sl_lock) \ -+ ep3_spinblock(SLE, SL);\ -+} while (0) -+ -+#define EP3_SPINEXIT(SLE,SL) \ -+do {\ -+ (SL)->sl_seq = (SLE)->sl_seq;\ -+} while (0) -+ -+#else -+ -+#define EP3_SPINENTER(DEV,SLE,SL) do { \ -+ E3_uint32 seq; \ -+\ -+ mb();\ -+ elan3_sdram_writel (DEV, (SLE) + offsetof (EP3_SPINLOCK_ELAN, sl_lock), 1);\ -+ mb();\ -+ seq = elan3_sdram_readl (DEV, (SLE) + offsetof (EP3_SPINLOCK_ELAN, sl_seq));\ -+ while (seq != (SL)->sl_seq)\ -+ {\ -+ while ((SL)->sl_seq == (seq - 1))\ -+ {\ -+ mb();\ -+\ -+ DELAY (1); \ -+ }\ -+ seq = elan3_sdram_readl (DEV, (SLE) + offsetof (EP3_SPINLOCK_ELAN, sl_seq));\ -+ }\ -+} while (0) -+ -+#define EP3_SPINEXIT(DEV,SLE,SL) do { \ -+ wmb(); \ -+ elan3_sdram_writel (DEV, (SLE) + offsetof (EP3_SPINLOCK_ELAN, sl_lock), 0);\ -+ mmiob(); \ -+} while (0) -+ -+#endif /* ! __ELAN3__ */ -+ -+/* per-rail elan memory portion receive descriptor */ -+typedef struct ep3_rxd_rail_elan -+{ -+ E3_DMA Dmas[EP_MAXFRAG+1]; /* Dma's for fetching data/putting data & status blk */ -+ E3_Event ChainEvent[EP_MAXFRAG]; /* Events to chain dmas */ -+ E3_BlockCopyEvent DataEvent; /* message received block event */ -+ E3_BlockCopyEvent DoneEvent; /* RPC status block event */ -+ -+ EP_NMD Data; /* Network mapping handle for receive data */ -+ -+ E3_Addr RxdMain; /* pointer to main memory portion */ -+ -+ E3_Addr Next; /* linked list when on pending list (elan address) */ -+ -+ E3_uint64 MainAddr; /* kernel address of ep_rxd_main */ -+} EP3_RXD_RAIL_ELAN; -+ -+#define EP3_RXD_RAIL_ELAN_SIZE roundup (sizeof (EP3_RXD_RAIL_ELAN), E3_DMA_ALIGN) -+ -+/* per-rail main memory portion of receive descriptor */ -+typedef struct ep3_rxd_rail_main -+{ -+ E3_uint32 DataEvent; /* dest for done event */ -+ E3_uint32 DoneEvent; /* dest for done event */ -+} EP3_RXD_RAIL_MAIN; -+ -+#define EP3_RXD_RAIL_MAIN_SIZE roundup (sizeof(EP3_RXD_RAIL_MAIN), sizeof (E3_uint32)) -+ -+#if !defined(__ELAN3__) -+/* Kernel memory portion of per-rail receive descriptor */ -+typedef struct ep3_rxd_rail -+{ -+ EP_RXD_RAIL Generic; /* generic rxd rail */ -+ -+ EP3_COOKIE DataCookie; /* Event cookie */ -+ EP3_COOKIE DoneCookie; /* Event cookie */ -+ EP3_COOKIE ChainCookie[EP_MAXFRAG]; /* Event cookie */ -+ -+ sdramaddr_t RxdElan; /* per-rail elan receive descriptor */ -+ E3_Addr RxdElanAddr; /* and elan address */ -+ -+ EP3_RXD_RAIL_MAIN *RxdMain; /* per-rail main receive descriptor */ -+ E3_Addr RxdMainAddr; /* and elan address */ -+ -+ EP_BACKOFF Backoff; /* dma backoff */ -+} EP3_RXD_RAIL; -+ -+#define EP3_NUM_RXD_PER_BLOCK 16 -+ -+typedef struct ep3_rxd_rail_block -+{ -+ struct list_head Link; -+ -+ EP3_RXD_RAIL Rxd[EP3_NUM_RXD_PER_BLOCK]; -+} EP3_RXD_RAIL_BLOCK; -+ -+#endif /* ! __ELAN3__ */ -+ -+typedef struct ep3_rcvr_rail_elan /* Elan memory service structure */ -+{ -+ EP3_SPINLOCK_ELAN ThreadLock; /* elan memory portion of spin lock */ -+ EP3_SPINLOCK_ELAN PendingLock; /* spin lock for pending rx list */ -+ -+ E3_Addr PendingDescs; /* list of pending receive descriptors */ -+ E3_uint32 ThreadShouldHalt; /* marks that the thread should halt */ -+ -+ E3_uint64 MainAddr; /* kernel address of ep_rcvr (for StallThreadForNoDescs)*/ -+} EP3_RCVR_RAIL_ELAN; -+ -+typedef struct ep3_rcvr_rail_main /* Main memory service strucure */ -+{ -+ EP3_SPINLOCK_MAIN ThreadLock; /* main memory portion of spin lock */ -+ EP3_SPINLOCK_MAIN PendingLock; /* spinlock for pending rx list */ -+ -+ volatile unsigned PendingDescsTailp; /* next pointer of last receive descriptor on pending list */ -+} EP3_RCVR_RAIL_MAIN; -+ -+#if !defined(__ELAN3__) -+ -+typedef struct ep3_rcvr_rail_stats -+{ -+ unsigned long some_stat; -+} EP3_RCVR_RAIL_STATS; -+ -+typedef struct ep3_rcvr_rail -+{ -+ EP_RCVR_RAIL Generic; /* generic portion */ -+ -+ EP3_RCVR_RAIL_MAIN *RcvrMain; -+ E3_Addr RcvrMainAddr; -+ sdramaddr_t RcvrElan; -+ E3_Addr RcvrElanAddr; -+ -+ sdramaddr_t InputQueueBase; /* base of receive queue */ -+ E3_Addr InputQueueAddr; /* elan address of receive queue */ -+ -+ E3_Addr ThreadStack; /* Thread processor stack */ -+ E3_Addr ThreadWaiting; /* Elan thread is waiting as no receive descriptors pending (sp stored here ) */ -+ E3_Addr ThreadHalted; /* Elan thread is waiting as it was requested to halt */ -+ -+ struct list_head FreeDescList; /* freelist of per-rail receive descriptors */ -+ unsigned int FreeDescCount; /* and number on free list */ -+ unsigned int TotalDescCount; /* total number created */ -+ spinlock_t FreeDescLock; /* and lock for free list */ -+ struct list_head DescBlockList; /* list of receive descriptor blocks */ -+ -+ unsigned int FreeDescWaiting; /* waiting for descriptors to be freed */ -+ kcondvar_t FreeDescSleep; /* and sleep here */ -+ -+ unsigned int CleanupWaiting; /* waiting for cleanup */ -+ kcondvar_t CleanupSleep; /* and sleep here */ -+ -+ EP3_RCVR_RAIL_STATS stats; /* elan3 specific rcvr_rail stats */ -+} EP3_RCVR_RAIL; -+ -+#endif /* ! __ELAN3__ */ -+ -+/* per-rail portion of transmit descriptor */ -+typedef struct ep3_txd_rail_elan -+{ -+ EP_ENVELOPE Envelope; /* message envelope */ -+ EP_PAYLOAD Payload; /* message payload */ -+ -+ E3_BlockCopyEvent EnveEvent; /* envelope event */ -+ E3_BlockCopyEvent DataEvent; /* data transfer event */ -+ E3_BlockCopyEvent DoneEvent; /* rpc done event */ -+} EP3_TXD_RAIL_ELAN; -+ -+#define EP3_TXD_RAIL_ELAN_SIZE roundup (sizeof (EP3_TXD_RAIL_ELAN), E3_BLK_ALIGN) -+ -+typedef struct ep3_txd_rail_main -+{ -+ E3_uint32 EnveEvent; /* dest for envelope event */ -+ E3_uint32 DataEvent; /* dest for data transfer event */ -+ E3_uint32 DoneEvent; /* dest for rpc done event */ -+} EP3_TXD_RAIL_MAIN; -+ -+#define EP3_TXD_RAIL_MAIN_SIZE roundup (sizeof(EP3_TXD_RAIL_MAIN), E3_BLK_ALIGN) -+ -+#if !defined(__ELAN3__) -+ -+typedef struct ep3_txd_rail -+{ -+ EP_TXD_RAIL Generic; /* generic txd rail */ -+ -+ EP3_COOKIE EnveCookie; /* Event cookies */ -+ EP3_COOKIE DataCookie; -+ EP3_COOKIE DoneCookie; -+ -+ sdramaddr_t TxdElan; /* Elan TX descriptor */ -+ E3_Addr TxdElanAddr; /* and elan address */ -+ -+ EP3_TXD_RAIL_MAIN *TxdMain; /* Elan Main memory tx descriptor */ -+ E3_Addr TxdMainAddr; /* and elan address */ -+ -+ EP_BACKOFF Backoff; /* dma backoff */ -+} EP3_TXD_RAIL; -+ -+ -+#define EP3_NUM_TXD_PER_BLOCK 16 -+ -+typedef struct ep3_txd_rail_block -+{ -+ struct list_head Link; -+ -+ EP3_TXD_RAIL Txd[EP3_NUM_TXD_PER_BLOCK]; -+} EP3_TXD_RAIL_BLOCK; -+ -+typedef struct ep3_xmtr_rail_stats -+{ -+ unsigned long some_stat; -+} EP3_XMTR_RAIL_STATS; -+ -+typedef struct ep3_xmtr_rail -+{ -+ EP_XMTR_RAIL Generic; /* generic portion */ -+ -+ struct list_head FreeDescList; /* freelist of per-rail receive descriptors */ -+ unsigned int FreeDescCount; /* and number on free list */ -+ unsigned int TotalDescCount; -+ spinlock_t FreeDescLock; /* and lock for free list */ -+ struct list_head DescBlockList; /* list of receive descriptor blocks */ -+ -+ unsigned int FreeDescWaiting; /* waiting for descriptors to be freed */ -+ kcondvar_t FreeDescSleep; /* and sleep here */ -+ -+ EP3_XMTR_RAIL_STATS stats; /* elan3 specific xmtr rail stats */ -+} EP3_XMTR_RAIL; -+ -+typedef struct ep3_comms_rail -+{ -+ EP_COMMS_RAIL Generic; /* generic comms rail */ -+ sdramaddr_t QueueDescs; /* input queue descriptors */ -+} EP3_COMMS_RAIL; -+ -+/* epcommxTx_elan3.c */ -+extern void ep3xmtr_flush_callback (EP_XMTR *xmtr, EP3_XMTR_RAIL *xmtrRail); -+extern void ep3xmtr_failover_callback (EP_XMTR *xmtr, EP3_XMTR_RAIL *xmtrRail); -+extern void ep3xmtr_disconnect_callback (EP_XMTR *xmtr, EP3_XMTR_RAIL *xmtrRail); -+ -+/* epcommsRx_elan3.c */ -+extern void CompleteEnvelope (EP3_RAIL *rail, E3_Addr rxdMainAddr, E3_uint32 PAckVal); -+extern void StallThreadForNoDescs (EP3_RAIL *rail, E3_Addr rcvrElanAddr, E3_Addr sp); -+extern void StallThreadForHalted (EP3_RAIL *rail, E3_Addr rcvrElanAddr, E3_Addr sp); -+ -+extern void ep3rcvr_flush_callback (EP_RCVR *rcvr, EP3_RCVR_RAIL *rcvrRail); -+extern void ep3rcvr_failover_callback (EP_RCVR *rcvr, EP3_RCVR_RAIL *rcvrRail); -+extern void ep3rcvr_disconnect_callback (EP_RCVR *rcvr, EP3_RCVR_RAIL *rcvrRail); -+ -+/* epcomms_elan3.c */ -+extern EP_COMMS_RAIL *ep3comms_add_rail (EP_SUBSYS *s, EP_SYS *sys, EP_RAIL *r); -+extern void ep3comms_del_rail (EP_COMMS_RAIL *r); -+extern void ep3comms_display_rail (EP_COMMS_RAIL *r); -+ -+/* epcommsTx_elan3.c */ -+extern int ep3xmtr_bind_txd (EP_TXD *txd, EP_XMTR_RAIL *xmtrRail, unsigned int phase); -+extern void ep3xmtr_unbind_txd (EP_TXD *txd, unsigned int phase); -+extern int ep3xmtr_poll_txd (EP_XMTR_RAIL *xmtrRail, EP_TXD_RAIL *txdRail, int how); -+extern long ep3xmtr_check (EP_XMTR_RAIL *xmtrRail, long nextRunTime); -+extern void ep3xmtr_add_rail (EP_XMTR *xmtr, EP_COMMS_RAIL *commsRail); -+extern void ep3xmtr_del_rail (EP_XMTR *xmtr, EP_COMMS_RAIL *commsRail); -+extern int ep3xmtr_check_txd_state(EP_TXD *txd); -+ -+extern void ep3xmtr_display_xmtr (DisplayInfo *di, EP_XMTR_RAIL *xmtrRail); -+extern void ep3xmtr_display_txd (DisplayInfo *di, EP_TXD_RAIL *txdRail); -+ -+extern void ep3xmtr_fillout_rail_stats (EP_XMTR_RAIL *xmtr_rail, char *str); -+ -+/* epcommsRx_elan3.c */ -+extern int ep3rcvr_queue_rxd (EP_RXD *rxd, EP_RCVR_RAIL *rcvrRail); -+extern void ep3rcvr_rpc_put (EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags); -+extern void ep3rcvr_rpc_get (EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags); -+extern void ep3rcvr_rpc_complete (EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags); -+ -+extern EP_RXD *ep3rcvr_steal_rxd (EP_RCVR_RAIL *rcvrRail); -+ -+extern long ep3rcvr_check (EP_RCVR_RAIL *rcvrRail, long nextRunTime); -+extern void ep3rcvr_add_rail (EP_RCVR *rcvr, EP_COMMS_RAIL *rail); -+extern void ep3rcvr_del_rail (EP_RCVR *rcvr, EP_COMMS_RAIL *rail); -+ -+extern void ep3rcvr_display_rcvr (DisplayInfo *di, EP_RCVR_RAIL *rcvrRail); -+extern void ep3rcvr_display_rxd (DisplayInfo *di, EP_RXD_RAIL *rxdRail); -+ -+extern void ep3rcvr_fillout_rail_stats (EP_RCVR_RAIL *rcvr_rail, char *str); -+ -+#endif /* !defined(__ELAN3__) */ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -+#endif /* __EPCOMMS_ELAN3_H */ -diff -urN clean/drivers/net/qsnet/ep/epcomms_elan3_thread.c linux-2.6.9/drivers/net/qsnet/ep/epcomms_elan3_thread.c ---- clean/drivers/net/qsnet/ep/epcomms_elan3_thread.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/epcomms_elan3_thread.c 2004-01-20 06:03:15.000000000 -0500 -@@ -0,0 +1,296 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: epcomms_elan3_thread.c,v 1.4 2004/01/20 11:03:15 david Exp $" -+/* $Source: /cvs/master/quadrics/epmod/epcomms_elan3_thread.c,v $ */ -+ -+//#include -+ -+typedef char int8_t; -+typedef unsigned char uint8_t; -+typedef short int16_t; -+typedef unsigned short uint16_t; -+typedef int int32_t; -+typedef unsigned int uint32_t; -+typedef long long int64_t; -+typedef unsigned long long uint64_t; -+ -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+ -+#include "kcomm_vp.h" -+#include "kcomm_elan3.h" -+#include "epcomms_elan3.h" -+ -+#ifndef offsetof -+#define offsetof(s, m) (unsigned long)(&(((s *)0)->m)) -+#endif -+ -+EP3_RAIL_ELAN *rail; -+EP3_RCVR_RAIL_ELAN *r; -+EP3_RCVR_RAIL_MAIN *rm; -+ -+void -+ep3comms_rcvr (EP3_RAIL_ELAN *rail, EP3_RCVR_RAIL_ELAN *rcvrElan, EP3_RCVR_RAIL_MAIN *rcvrMain, -+ EP3_InputQueue *q, unsigned int *cookies) -+{ -+ int count = 1; -+ E3_Addr nfptr = q->q_fptr + q->q_size; -+ E3_uint32 tmp; -+ int i; -+ E3_Addr buffer; -+ int len; -+ E3_DMA *dma; -+ E3_Event *event; -+ -+ /* clear the queue state to allow envelopes to arrive */ -+ q->q_state = 0; -+ -+ for (;;) -+ { -+ if (! rcvrElan->ThreadShouldHalt) -+ c_waitevent ((E3_Event *) &q->q_event, count); /* HALT POINT */ -+ -+ if (rcvrElan->ThreadShouldHalt && nfptr == q->q_bptr) -+ { -+ asm volatile ("mov %0, %%g1" : /* no outputs */ : "r" (rcvrElan)); -+ asm volatile ("ta %0" : /* no outputs */ : "i" (EP3_UNIMP_THREAD_HALTED)); /* HALT POINT */ -+ continue; -+ } -+ -+ count = 0; -+ do { -+ /* Process the message at nfptr */ -+ EP_ENVELOPE *env = (EP_ENVELOPE *) nfptr; -+ EP3_RXD_RAIL_ELAN *rxd; -+ int ack; -+ -+ EP3_SPINENTER(&rcvrElan->ThreadLock, &rcvrMain->ThreadLock); /* HALT POINT */ -+ -+ while ((rxd = (EP3_RXD_RAIL_ELAN *)rcvrElan->PendingDescs) == 0) -+ { -+ /* no receive descriptors, so trap to the kernel to wait -+ * for receive descriptor to be queued, we pass the rcvr -+ * in %g1, so that the trap handler can restart us. */ -+ EP3_SPINEXIT(&rcvrElan->ThreadLock, &rcvrMain->ThreadLock); -+ asm volatile ("mov %0, %%g1" : /* no outputs */ : "r" (rcvrElan)); -+ asm volatile ("ta %0" : /* no outputs */ : "i" (EP3_UNIMP_TRAP_NO_DESCS)); /* HALT POINT */ -+ EP3_SPINENTER(&rcvrElan->ThreadLock, &rcvrMain->ThreadLock); /* HALT POINT */ -+ } -+ -+ if (env->Version != EP_ENVELOPE_VERSION) -+ { -+ /* This envelope has been cancelled - so just consume it */ -+ EP3_SPINEXIT(&rcvrElan->ThreadLock, &rcvrMain->ThreadLock); -+ goto consume_envelope; -+ } -+ -+ dma = rxd->Dmas; -+ event = rxd->ChainEvent; -+ -+ if (EP_IS_MULTICAST(env->Attr)) -+ { -+ dma->dma_type = E3_DMA_TYPE (DMA_BYTE, DMA_READ, DMA_NORMAL, EP3_DMAFAILCOUNT); -+ dma->dma_size = BT_BITOUL(EP_MAX_NODES) * sizeof (bitmap_t); -+ dma->dma_source = env->TxdMain.nmd_addr + offsetof (EP_TXD_MAIN, Bitmap); -+ dma->dma_dest = (E3_Addr) &((EP_RXD_MAIN *) rxd->RxdMain)->Bitmap; -+ dma->dma_destEvent = (E3_Addr) event; -+ dma->dma_destCookieVProc = DMA_COOKIE_THREAD | DMA_COOKIE (cookies[env->NodeId], EP_VP_DATA (rail->NodeId)); -+ dma->dma_srcEvent = env->TxdRail + offsetof (EP3_TXD_RAIL_ELAN, DataEvent); -+ dma->dma_srcCookieVProc = DMA_COOKIE_THREAD | DMA_REMOTE_COOKIE (cookies[env->NodeId], EP_VP_DATA (env->NodeId)); -+ -+ event->ev_Count = 1; -+ -+ dma++; event++; -+ } -+ -+ if (env->nFrags == 0) -+ { -+ /* Generate a "get" DMA to accept the envelope and fire the rx handler */ -+ dma->dma_type = E3_DMA_TYPE(DMA_BYTE, DMA_READ, DMA_NORMAL, EP3_DMAFAILCOUNT); -+ dma->dma_size = 0; -+ dma->dma_destEvent = (E3_Addr) &rxd->DataEvent; -+ dma->dma_destCookieVProc = DMA_COOKIE_THREAD | DMA_COOKIE (cookies[env->NodeId], EP_VP_DATA (rail->NodeId)); -+ dma->dma_srcEvent = env->TxdRail + offsetof (EP3_TXD_RAIL_ELAN, DataEvent); -+ dma->dma_srcCookieVProc = DMA_COOKIE_THREAD | DMA_REMOTE_COOKIE (cookies[env->NodeId], EP_VP_DATA (env->NodeId)); -+ len = 0; -+ } -+ else -+ { -+ /* Generate the DMA chain to fetch the data */ -+ for (i = 0, buffer = rxd->Data.nmd_addr, len = 0; i < env->nFrags; i++, dma++, event++) -+ { -+ dma->dma_type = E3_DMA_TYPE(DMA_BYTE, DMA_READ, DMA_NORMAL, EP3_DMAFAILCOUNT); -+ dma->dma_size = env->Frags[i].nmd_len; -+ dma->dma_source = env->Frags[i].nmd_addr; -+ dma->dma_dest = buffer; -+ dma->dma_destEvent = (E3_Addr) event; -+ dma->dma_destCookieVProc = DMA_COOKIE_THREAD | DMA_COOKIE (cookies[env->NodeId], EP_VP_DATA (rail->NodeId)); -+ dma->dma_srcEvent = env->TxdRail + offsetof (EP3_TXD_RAIL_ELAN, DataEvent); -+ dma->dma_srcCookieVProc = DMA_COOKIE_THREAD | DMA_REMOTE_COOKIE (cookies[env->NodeId], EP_VP_DATA (env->NodeId)); -+ -+ event->ev_Count = 1; -+ -+ buffer += dma->dma_size; -+ len += dma->dma_size; -+ } -+ -+ /* Point the last dma at the done event */ -+ (--dma)->dma_destEvent = (E3_Addr) &rxd->DataEvent; -+ -+ if (rxd->Data.nmd_len < len) -+ { -+ /* The receive descriptor was too small for the message */ -+ /* complete the message anyway, but don't transfer any */ -+ /* data, we set the length to EP_MSG_TOO_BIG */ -+ for (i = 0, dma = rxd->Dmas; i < env->nFrags; i++, dma++) -+ dma->dma_size = 0; -+ -+ len = EP_MSG_TOO_BIG; -+ } -+ } -+ -+ /* Store the received message length in the rxdElan for CompleteEnvelope */ -+ rxd->Data.nmd_len = len; -+ -+ /* Initialise %g1 with the "rxd" so the trap handler can -+ * complete the envelope processing if we trap while sending the -+ * packet */ -+ asm volatile ("mov %0, %%g1" : /* no outputs */ : "r" (rxd)); -+ -+ /* Generate a packet to start the data transfer */ -+ c_open (EP_VP_DATA (env->NodeId)); -+ c_sendtrans2 (TR_THREADIDENTIFY, rxd->Dmas->dma_destCookieVProc, 0, 0); -+ c_sendmem (TR_SENDACK | TR_REMOTEDMA, 0, rxd->Dmas); -+ ack = c_close(); -+ -+ /* -+ * If we trapped for an output timeout, then the trap handler will have -+ * completed processing this envelope and cleared the spinlock, so we just -+ * need to update the queue descriptor. -+ */ -+ if (ack == EP3_PAckStolen) -+ goto consume_envelope; -+ -+ if (ack != E3_PAckOk) -+ { -+ /* our packet got nacked, so trap into the kernel so that -+ * it can complete processing of this envelope. -+ */ -+ asm volatile ("ta %0" : /* no outputs */ : "i" (EP3_UNIMP_TRAP_PACKET_NACKED)); /* HALT POINT */ -+ goto consume_envelope; -+ } -+ -+ /* remove the RXD from the pending list */ -+ EP3_SPINENTER (&rcvrElan->PendingLock, &rcvrMain->PendingLock); -+ if ((rcvrElan->PendingDescs = rxd->Next) == 0) -+ rcvrMain->PendingDescsTailp = 0; -+ EP3_SPINEXIT (&rcvrElan->PendingLock, &rcvrMain->PendingLock); -+ -+ /* Copy the envelope information - as 5 64 byte chunks. -+ * We force the parameters in g5, g6 so that they aren't -+ * trashed by the loadblk32 into the locals/ins -+ */ -+ if (EP_HAS_PAYLOAD(env->Attr)) -+ { -+ register void *src asm ("g5") = (void *) env; -+ register void *dst asm ("g6") = (void *) &((EP_RXD_MAIN *) rxd->RxdMain)->Envelope; -+ -+ asm volatile ( -+ "and %%sp,63,%%g7 ! Calculate stack alignment\n" -+ "add %%g7,64,%%g7 ! Space to save the registers\n" -+ "sub %%sp,%%g7,%%sp ! align stack\n" -+ "stblock64 %%l0,[%%sp] ! save the locals and ins\n" -+ -+ "ldblock64 [%0 + 0],%%l0 ! load 64-byte block into locals/ins\n" /* copy envelope */ -+ "stblock64 %%l0,[%1 + 0] ! store 64-byte block from local/ins\n" -+ "ldblock64 [%0 + 64],%%l0 ! load 64-byte block into locals/ins\n" -+ "stblock64 %%l0,[%1 + 64] ! store 64-byte block from local/ins\n" -+ -+ "ldblock64 [%0 + 128],%%l0 ! load 64-byte block into locals/ins\n" /* copy payload */ -+ "stblock64 %%l0,[%1 + 128] ! store 64-byte block from local/ins\n" -+ "ldblock64 [%0 + 192],%%l0 ! load 64-byte block into locals/ins\n" -+ "stblock64 %%l0,[%1 + 192] ! store 64-byte block from local/ins\n" -+ -+ "ldblock64 [%%sp],%%l0 ! restore locals and ins\n" -+ "add %%sp,%%g7,%%sp ! restore stack pointer\n" -+ : /* outputs */ -+ : /* inputs */ "r" (src), "r" (dst) -+ : /* clobbered */ "g5", "g6", "g7" ); -+ } -+ else -+ { -+ register void *src asm ("g5") = (void *) env; -+ register void *dst asm ("g6") = (void *) &((EP_RXD_MAIN *) rxd->RxdMain)->Envelope; -+ -+ asm volatile ( -+ "and %%sp,63,%%g7 ! Calculate stack alignment\n" -+ "add %%g7,64,%%g7 ! Space to save the registers\n" -+ "sub %%sp,%%g7,%%sp ! align stack\n" -+ "stblock64 %%l0,[%%sp] ! save the locals and ins\n" -+ -+ "ldblock64 [%0 + 0],%%l0 ! load 64-byte block into locals/ins\n" -+ "stblock64 %%l0,[%1 + 0] ! store 64-byte block from local/ins\n" -+ "ldblock64 [%0 + 64],%%l0 ! load 64-byte block into locals/ins\n" -+ "stblock64 %%l0,[%1 + 64] ! store 64-byte block from local/ins\n" -+ -+ "ldblock64 [%%sp],%%l0 ! restore locals and ins\n" -+ "add %%sp,%%g7,%%sp ! restore stack pointer\n" -+ : /* outputs */ -+ : /* inputs */ "r" (src), "r" (dst) -+ : /* clobbered */ "g5", "g6", "g7" ); -+ } -+ -+ /* Store the message length to indicate that I've finished */ -+ ((EP_RXD_MAIN *) rxd->RxdMain)->Len = rxd->Data.nmd_len; /* PCI write */ -+ -+ EP3_SPINEXIT(&rcvrElan->ThreadLock, &rcvrMain->ThreadLock); -+ -+ consume_envelope: -+ /* Sample the queue full bit *BEFORE* moving the fptr. -+ * Then only clear it if it was full before, otherwise, -+ * as soon as the fptr is moved on the queue could fill -+ * up, and so clearing it could mark a full queue as -+ * empty. -+ * -+ * While the full bit is set, the queue is in a 'steady -+ * state', so it is safe to set the q_state -+ * -+ */ -+ if (((tmp = q->q_state) & E3_QUEUE_FULL) == 0) -+ q->q_fptr = nfptr; /* update queue */ -+ else -+ { -+ q->q_fptr = nfptr; /* update queue */ -+ q->q_state = tmp &~E3_QUEUE_FULL; /* and clear full flag */ -+ } -+ -+ count++; /* bump message count */ -+ if (nfptr == q->q_top) /* queue wrap */ -+ nfptr = q->q_base; -+ else -+ nfptr += q->q_size; -+ -+ c_break_busywait(); /* be nice HALT POINT */ -+ -+ } while (nfptr != q->q_bptr); /* loop until Fptr == Bptr */ -+ } -+} -+ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/ep/epcomms_elan4.c linux-2.6.9/drivers/net/qsnet/ep/epcomms_elan4.c ---- clean/drivers/net/qsnet/ep/epcomms_elan4.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/epcomms_elan4.c 2005-08-09 05:57:14.000000000 -0400 -@@ -0,0 +1,393 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: epcomms_elan4.c,v 1.12.2.1 2005/08/09 09:57:14 mike Exp $" -+/* $Source: /cvs/master/quadrics/epmod/epcomms_elan4.c,v $ */ -+ -+#include -+ -+#include -+#include -+#include -+ -+#include "debug.h" -+#include "kcomm_elan4.h" -+#include "epcomms_elan4.h" -+ -+static void -+ep4comms_flush_interrupt (EP4_RAIL *rail, void *arg) -+{ -+ EP4_COMMS_RAIL *commsRail = (EP4_COMMS_RAIL *) arg; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&commsRail->r_flush_lock, flags); -+ commsRail->r_flush_count = 0; -+ kcondvar_wakeupall (&commsRail->r_flush_sleep, &commsRail->r_flush_lock); -+ spin_unlock_irqrestore (&commsRail->r_flush_lock, flags); -+} -+ -+void -+ep4comms_flush_start (EP4_COMMS_RAIL *commsRail) -+{ -+ kmutex_lock (&commsRail->r_flush_mutex); -+} -+ -+void -+ep4comms_flush_wait (EP4_COMMS_RAIL *commsRail) -+{ -+ unsigned long flags; -+ -+ ep4_wait_event_cmd (commsRail->r_flush_mcq, -+ commsRail->r_elan_addr + offsetof (EP4_COMMS_RAIL_ELAN, r_flush_event), -+ E4_EVENT_INIT_VALUE (-32 * commsRail->r_flush_count, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0), -+ commsRail->r_flush_ecq->ecq_addr, -+ INTERRUPT_CMD | (commsRail->r_flush_intcookie.int_val << E4_MAIN_INT_SHIFT)); -+ -+ spin_lock_irqsave (&commsRail->r_flush_lock, flags); -+ while (commsRail->r_flush_count != 0) -+ if (kcondvar_timedwait (&commsRail->r_flush_sleep, &commsRail->r_flush_lock, &flags, (lbolt + (HZ*10))) == -1) -+ elan4_hardware_lock_check(((EP4_RAIL *)(commsRail->r_generic.Rail))->r_ctxt.ctxt_dev, "flush_wait"); -+ spin_unlock_irqrestore (&commsRail->r_flush_lock, flags); -+ -+ kmutex_unlock (&commsRail->r_flush_mutex); -+} -+ -+void -+ep4comms_flush_setevent (EP4_COMMS_RAIL *commsRail, ELAN4_CQ *cq) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave (&commsRail->r_flush_lock, flags); -+ -+ elan4_set_event_cmd (cq, commsRail->r_elan_addr + offsetof (EP4_COMMS_RAIL_ELAN, r_flush_event)); -+ -+ commsRail->r_flush_count++; -+ -+ spin_unlock_irqrestore (&commsRail->r_flush_lock, flags); -+} -+ -+void -+ep4comms_flush_callback (void *arg, statemap_t *map) -+{ -+ EP4_COMMS_RAIL *commsRail = (EP4_COMMS_RAIL *) arg; -+ EP_COMMS_SUBSYS *subsys = commsRail->r_generic.Subsys; -+ EP4_RAIL *rail = (EP4_RAIL *) commsRail->r_generic.Rail; -+ unsigned int rnum = rail->r_generic.Number; -+ struct list_head *el; -+ -+ /* -+ * We stall the retry thread from CB_FLUSH_FILTERING until -+ * we've finished CB_FLUSH_FLUSHING to ensure that sten -+ * packets can not be being retried while we flush them -+ * through. -+ */ -+ switch (rail->r_generic.CallbackStep) -+ { -+ case EP_CB_FLUSH_FILTERING: -+ ep_kthread_stall (&rail->r_retry_thread); -+ -+ ep4comms_flush_start (commsRail); -+ break; -+ -+ case EP_CB_FLUSH_FLUSHING: -+ break; -+ } -+ -+ kmutex_lock (&subsys->Lock); -+ list_for_each (el, &subsys->Transmitters) { -+ EP_XMTR *xmtr = list_entry (el, EP_XMTR, Link); -+ -+ if (xmtr->Rails[rnum]) -+ ep4xmtr_flush_callback (xmtr, (EP4_XMTR_RAIL *) xmtr->Rails[rnum]); -+ } -+ -+ list_for_each (el, &subsys->Receivers) { -+ EP_RCVR *rcvr = list_entry (el, EP_RCVR, Link); -+ -+ if (rcvr->Rails[rnum]) -+ ep4rcvr_flush_callback (rcvr, (EP4_RCVR_RAIL *) rcvr->Rails[rnum]); -+ } -+ kmutex_unlock (&subsys->Lock); -+ -+ switch (rail->r_generic.CallbackStep) -+ { -+ case EP_CB_FLUSH_FILTERING: -+ ep4comms_flush_wait (commsRail); -+ break; -+ -+ case EP_CB_FLUSH_FLUSHING: -+ ep_kthread_resume (&rail->r_retry_thread); -+ break; -+ } -+} -+ -+void -+ep4comms_failover_callback (void *arg, statemap_t *map) -+{ -+ EP_COMMS_RAIL *commsRail = (EP_COMMS_RAIL *) arg; -+ EP_COMMS_SUBSYS *subsys = commsRail->Subsys; -+ unsigned int rnum = commsRail->Rail->Number; -+ struct list_head *el; -+ -+ kmutex_lock (&subsys->Lock); -+ list_for_each (el, &subsys->Transmitters) { -+ EP_XMTR *xmtr = list_entry (el, EP_XMTR, Link); -+ -+ if (xmtr->Rails[rnum]) -+ ep4xmtr_failover_callback (xmtr, (EP4_XMTR_RAIL *) xmtr->Rails[rnum]); -+ } -+ -+ list_for_each (el, &subsys->Receivers) { -+ EP_RCVR *rcvr = list_entry (el, EP_RCVR, Link); -+ -+ if (rcvr->Rails[rnum]) -+ ep4rcvr_failover_callback (rcvr, (EP4_RCVR_RAIL *) rcvr->Rails[rnum]); -+ } -+ kmutex_unlock (&subsys->Lock); -+} -+ -+void -+ep4comms_disconnect_callback (void *arg, statemap_t *map) -+{ -+ EP_COMMS_RAIL *commsRail = (EP_COMMS_RAIL *) arg; -+ EP_COMMS_SUBSYS *subsys = commsRail->Subsys; -+ unsigned int rnum = commsRail->Rail->Number; -+ struct list_head *el; -+ -+ kmutex_lock (&subsys->Lock); -+ list_for_each (el, &subsys->Transmitters) { -+ EP_XMTR *xmtr = list_entry (el, EP_XMTR, Link); -+ -+ if (xmtr->Rails[rnum]) -+ ep4xmtr_disconnect_callback (xmtr, (EP4_XMTR_RAIL *) xmtr->Rails[rnum]); -+ } -+ -+ list_for_each (el, &subsys->Receivers) { -+ EP_RCVR *rcvr = list_entry (el, EP_RCVR, Link); -+ -+ if (rcvr->Rails[rnum]) -+ ep4rcvr_disconnect_callback (rcvr, (EP4_RCVR_RAIL *) rcvr->Rails[rnum]); -+ } -+ kmutex_unlock (&subsys->Lock); -+} -+ -+void -+ep4comms_neterr_callback (EP4_RAIL *rail, void *arg, unsigned int nodeId, EP_NETERR_COOKIE *cookies) -+{ -+ EP_COMMS_RAIL *commsRail = (EP_COMMS_RAIL *) arg; -+ EP_COMMS_SUBSYS *subsys = commsRail->Subsys; -+ unsigned int rnum = commsRail->Rail->Number; -+ struct list_head *el; -+ -+ /* First - stall the retry thread, so that it will no longer restart -+ * any sten packets from the retry lists */ -+ ep_kthread_stall (&rail->r_retry_thread); -+ -+ ep4comms_flush_start ((EP4_COMMS_RAIL *) commsRail); -+ -+ /* Second - flush through all command queues for xmtrs and rcvrs */ -+ kmutex_lock (&subsys->Lock); -+ list_for_each (el, &subsys->Transmitters) { -+ EP_XMTR *xmtr = list_entry (el, EP_XMTR, Link); -+ -+ if (xmtr->Rails[rnum]) -+ ep4xmtr_neterr_flush (xmtr, (EP4_XMTR_RAIL *) xmtr->Rails[rnum], nodeId, cookies); -+ } -+ -+ list_for_each (el, &subsys->Receivers) { -+ EP_RCVR *rcvr = list_entry (el, EP_RCVR, Link); -+ -+ if (rcvr->Rails[rnum]) -+ ep4rcvr_neterr_flush (rcvr, (EP4_RCVR_RAIL *) rcvr->Rails[rnum], nodeId, cookies); -+ } -+ kmutex_unlock (&subsys->Lock); -+ -+ /* Third - wait for flush to complete */ -+ ep4comms_flush_wait ((EP4_COMMS_RAIL *) commsRail); -+ -+ /* Fourth - flush through all command queues */ -+ ep4_flush_ecqs (rail); -+ -+ /* Fifth - search all the retry lists for the network error cookies */ -+ kmutex_lock (&subsys->Lock); -+ list_for_each (el, &subsys->Transmitters) { -+ EP_XMTR *xmtr = list_entry (el, EP_XMTR, Link); -+ -+ if (xmtr->Rails[rnum]) -+ ep4xmtr_neterr_check (xmtr, (EP4_XMTR_RAIL *) xmtr->Rails[rnum], nodeId, cookies); -+ } -+ -+ list_for_each (el, &subsys->Receivers) { -+ EP_RCVR *rcvr = list_entry (el, EP_RCVR, Link); -+ -+ if (rcvr->Rails[rnum]) -+ ep4rcvr_neterr_check (rcvr, (EP4_RCVR_RAIL *) rcvr->Rails[rnum], nodeId, cookies); -+ } -+ kmutex_unlock (&subsys->Lock); -+ -+ ep_kthread_resume (&rail->r_retry_thread); -+} -+ -+ -+EP_COMMS_RAIL * -+ep4comms_add_rail (EP_SUBSYS *s, EP_SYS *sys, EP_RAIL *r) -+{ -+ EP4_RAIL *rail = (EP4_RAIL *)r; -+ ELAN4_DEV *dev = rail->r_ctxt.ctxt_dev; -+ EP4_COMMS_RAIL *commsRail; -+ E4_InputQueue qdesc; -+ int i; -+ -+ KMEM_ZALLOC (commsRail, EP4_COMMS_RAIL *,sizeof (EP4_COMMS_RAIL), 1); -+ -+ if (commsRail == NULL) -+ return NULL; -+ -+ commsRail->r_generic.Ops.DelRail = ep4comms_del_rail; -+ commsRail->r_generic.Ops.DisplayRail = ep4comms_display_rail; -+ commsRail->r_generic.Ops.Rcvr.AddRail = ep4rcvr_add_rail; -+ commsRail->r_generic.Ops.Rcvr.DelRail = ep4rcvr_del_rail; -+ commsRail->r_generic.Ops.Rcvr.Check = ep4rcvr_check; -+ commsRail->r_generic.Ops.Rcvr.QueueRxd = ep4rcvr_queue_rxd; -+ commsRail->r_generic.Ops.Rcvr.RpcPut = ep4rcvr_rpc_put; -+ commsRail->r_generic.Ops.Rcvr.RpcGet = ep4rcvr_rpc_get; -+ commsRail->r_generic.Ops.Rcvr.RpcComplete = ep4rcvr_rpc_complete; -+ -+ commsRail->r_generic.Ops.Rcvr.StealRxd = ep4rcvr_steal_rxd; -+ -+ commsRail->r_generic.Ops.Rcvr.DisplayRcvr = ep4rcvr_display_rcvr; -+ commsRail->r_generic.Ops.Rcvr.DisplayRxd = ep4rcvr_display_rxd; -+ -+ commsRail->r_generic.Ops.Rcvr.FillOutRailStats = ep4rcvr_fillout_rail_stats; -+ -+ commsRail->r_generic.Ops.Xmtr.AddRail = ep4xmtr_add_rail; -+ commsRail->r_generic.Ops.Xmtr.DelRail = ep4xmtr_del_rail; -+ commsRail->r_generic.Ops.Xmtr.Check = ep4xmtr_check; -+ commsRail->r_generic.Ops.Xmtr.BindTxd = ep4xmtr_bind_txd; -+ commsRail->r_generic.Ops.Xmtr.UnbindTxd = ep4xmtr_unbind_txd; -+ commsRail->r_generic.Ops.Xmtr.PollTxd = ep4xmtr_poll_txd; -+ commsRail->r_generic.Ops.Xmtr.CheckTxdState = ep4xmtr_check_txd_state; -+ -+ commsRail->r_generic.Ops.Xmtr.DisplayXmtr = ep4xmtr_display_xmtr; -+ commsRail->r_generic.Ops.Xmtr.DisplayTxd = ep4xmtr_display_txd; -+ -+ commsRail->r_generic.Ops.Xmtr.FillOutRailStats = ep4xmtr_fillout_rail_stats; -+ -+ /* Allocate command queue space for flushing (1 dword for interrupt + 4 dwords for waitevent) */ -+ if ((commsRail->r_flush_ecq = ep4_get_ecq (rail, EP4_ECQ_EVENT, 1)) == NULL) -+ { -+ KMEM_FREE (commsRail, sizeof (EP4_COMMS_RAIL)); -+ return NULL; -+ } -+ -+ if ((commsRail->r_flush_mcq = ep4_get_ecq (rail, EP4_ECQ_MAIN, 4)) == NULL) -+ { -+ ep4_put_ecq (rail, commsRail->r_flush_ecq, 1); -+ KMEM_FREE (commsRail, sizeof (EP4_COMMS_RAIL)); -+ return NULL; -+ } -+ -+ /* Allocate and initialise the elan memory part */ -+ if ((commsRail->r_elan = ep_alloc_elan (r, EP4_COMMS_RAIL_ELAN_SIZE, 0, &commsRail->r_elan_addr)) == (sdramaddr_t) 0) -+ { -+ ep4_put_ecq (rail, commsRail->r_flush_mcq, 4); -+ ep4_put_ecq (rail, commsRail->r_flush_ecq, 1); -+ KMEM_FREE (commsRail, sizeof (EP4_COMMS_RAIL)); -+ return NULL; -+ } -+ -+ ep4_register_intcookie (rail, &commsRail->r_flush_intcookie, commsRail->r_elan_addr + offsetof (EP4_COMMS_RAIL_ELAN, r_flush_event), -+ ep4comms_flush_interrupt, commsRail); -+ -+ elan4_sdram_writeq (dev, commsRail->r_elan + offsetof (EP4_COMMS_RAIL_ELAN, r_flush_event.ev_CountAndType), -+ E4_EVENT_INIT_VALUE (0, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0)); -+ -+ -+ /* Allocate and initialise all the queue desriptors as "full" with no event */ -+ if ((commsRail->r_descs = ep_alloc_memory_elan (r, EP_EPCOMMS_QUEUE_BASE, roundup (EP_MSG_NSVC * EP_QUEUE_DESC_SIZE, SDRAM_PAGE_SIZE), EP_PERM_ALL, 0)) == (sdramaddr_t) 0) -+ { -+ ep_free_elan (r, commsRail->r_elan_addr, EP4_COMMS_RAIL_ELAN_SIZE); -+ ep4_put_ecq (rail, commsRail->r_flush_mcq, 4); -+ ep4_put_ecq (rail, commsRail->r_flush_ecq, 1); -+ KMEM_FREE (commsRail, sizeof (EP4_COMMS_RAIL)); -+ return NULL; -+ } -+ -+ qdesc.q_bptr = 0; -+ qdesc.q_fptr = 8; -+ qdesc.q_control = E4_InputQueueControl (qdesc.q_bptr,qdesc.q_fptr, 8); -+ qdesc.q_event = 0; -+ -+ for (i = 0; i < EP_MSG_NSVC; i++) -+ elan4_sdram_copyq_to_sdram (rail->r_ctxt.ctxt_dev, &qdesc, commsRail->r_descs + (i * EP_QUEUE_DESC_SIZE), -+ sizeof (E4_InputQueue)); -+ -+ kmutex_init (&commsRail->r_flush_mutex); -+ spin_lock_init (&commsRail->r_flush_lock); -+ kcondvar_init (&commsRail->r_flush_sleep); -+ -+ ep_register_callback (r, EP_CB_FLUSH_FILTERING, ep4comms_flush_callback, commsRail); -+ ep_register_callback (r, EP_CB_FLUSH_FLUSHING, ep4comms_flush_callback, commsRail); -+ ep_register_callback (r, EP_CB_FAILOVER, ep4comms_failover_callback, commsRail); -+ ep_register_callback (r, EP_CB_DISCONNECTING, ep4comms_disconnect_callback, commsRail); -+ -+ commsRail->r_neterr_ops.op_func = ep4comms_neterr_callback; -+ commsRail->r_neterr_ops.op_arg = commsRail; -+ -+ ep4_add_neterr_ops (rail, &commsRail->r_neterr_ops); -+ -+ return (EP_COMMS_RAIL *) commsRail; -+} -+ -+void -+ep4comms_del_rail (EP_COMMS_RAIL *r) -+{ -+ EP4_COMMS_RAIL *commsRail = (EP4_COMMS_RAIL *) r; -+ EP4_RAIL *rail = (EP4_RAIL *) commsRail->r_generic.Rail; -+ -+ ep_remove_callback (&rail->r_generic, EP_CB_FLUSH_FILTERING, ep4comms_flush_callback, commsRail); -+ ep_remove_callback (&rail->r_generic, EP_CB_FLUSH_FLUSHING, ep4comms_flush_callback, commsRail); -+ ep_remove_callback (&rail->r_generic, EP_CB_FAILOVER, ep4comms_failover_callback, commsRail); -+ ep_remove_callback (&rail->r_generic, EP_CB_DISCONNECTING, ep4comms_disconnect_callback, commsRail); -+ -+ kcondvar_destroy (&commsRail->r_flush_sleep); -+ spin_lock_destroy (&commsRail->r_flush_lock); -+ kmutex_destroy (&commsRail->r_flush_mutex); -+ -+ ep_free_memory_elan (&rail->r_generic, EP_EPCOMMS_QUEUE_BASE); -+ ep_free_elan (&rail->r_generic, commsRail->r_elan_addr, EP4_COMMS_RAIL_ELAN_SIZE); -+ -+ ep4_deregister_intcookie (rail, &commsRail->r_flush_intcookie); -+ -+ ep4_put_ecq (rail, commsRail->r_flush_mcq, 4); -+ ep4_put_ecq (rail, commsRail->r_flush_ecq, 1); -+ -+ KMEM_FREE (commsRail, sizeof (EP4_COMMS_RAIL)); -+} -+ -+void -+ep4comms_display_rail (EP_COMMS_RAIL *r) -+{ -+ EP4_COMMS_RAIL *commsRail = (EP4_COMMS_RAIL *) r; -+ EP4_RAIL *rail = (EP4_RAIL *) commsRail->r_generic.Rail; -+ ELAN4_DEV *dev = rail->r_ctxt.ctxt_dev; -+ -+ ep4_display_rail (rail); -+ -+ ep_debugf (DBG_DEBUG, " flush count=%d mcq=%p ecq=%p event %llx.%llx.%llx\n", -+ commsRail->r_flush_count, commsRail->r_flush_mcq, commsRail->r_flush_ecq, -+ elan4_sdram_readq (dev, commsRail->r_elan + offsetof (EP4_COMMS_RAIL_ELAN, r_flush_event.ev_CountAndType)), -+ elan4_sdram_readq (dev, commsRail->r_elan + offsetof (EP4_COMMS_RAIL_ELAN, r_flush_event.ev_WritePtr)), -+ elan4_sdram_readq (dev, commsRail->r_elan + offsetof (EP4_COMMS_RAIL_ELAN, r_flush_event.ev_WriteValue))); -+} -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/ep/epcomms_elan4.h linux-2.6.9/drivers/net/qsnet/ep/epcomms_elan4.h ---- clean/drivers/net/qsnet/ep/epcomms_elan4.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/epcomms_elan4.h 2005-03-22 11:47:36.000000000 -0500 -@@ -0,0 +1,471 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __EPCOMMS_ELAN4_H -+#define __EPCOMMS_ELAN4_H -+ -+#ident "@(#)$Id: epcomms_elan4.h,v 1.15 2005/03/22 16:47:36 david Exp $" -+/* $Source: /cvs/master/quadrics/epmod/epcomms_elan4.h,v $ */ -+ -+ -+#include -+ -+/* -+ * Elan4 spinlocks are a pair of 64 bit words, one in elan sdram and one in main memory -+ * the sdram word holds the thread sequence number in the bottom 32 bits and the main -+ * lock in the top 32 bits. The main memory word holds the sequence number only in -+ * it's bottom 32 bits */ -+ -+typedef volatile E4_uint64 EP4_SPINLOCK_MAIN; -+typedef volatile E4_uint64 EP4_SPINLOCK_ELAN; -+ -+#define EP4_SPINLOCK_SEQ 0 -+#define EP4_SPINLOCK_MLOCK 4 -+ -+#if defined(__elan4__) -+ -+#define EP4_SPINENTER(CPORT,SLE,SLM) \ -+do { \ -+ register long tmp; \ -+\ -+ asm volatile ("ld4 [%1], %0\n" \ -+ "inc %0\n" \ -+ "st4 %0, [%1]\n" \ -+ "ld4 [%1 + 4], %0\n" \ -+ "srl8,byte %0, 4, %0\n" \ -+ : /* outputs */ "=r" (tmp) \ -+ : /* inputs */ "r" (SLE), "r" (SLM)); \ -+\ -+ if (tmp) \ -+ ep4_spinblock (CPORT,SLE, SLM); \ -+} while (0) -+ -+extern void ep4_spinblock(E4_uint64 *cport, EP4_SPINLOCK_ELAN *sle, EP4_SPINLOCK_MAIN *slm); -+ -+#define EP4_SPINEXIT(CPORT,SLE,SLM) \ -+do { \ -+ register long tmp; \ -+\ -+ asm volatile ("ld4 [%1], %0\n" \ -+ "st4 %0, [%2]\n" \ -+ : /* outputs */ "=r" (tmp) \ -+ : /* inputs */ "r" (SLE), "r" (SLM)); \ -+} while (0) -+ -+#else -+ -+#define EP4_SPINENTER(DEV,SLE,SLM) \ -+do { \ -+ uint32_t seq; \ -+\ -+ mb(); \ -+ elan4_sdram_writel (DEV, (SLE) + EP4_SPINLOCK_MLOCK, 1); \ -+ mb(); \ -+ while ((seq = elan4_sdram_readl (DEV, (SLE) + EP4_SPINLOCK_SEQ)) != *((uint32_t *) (SLM))) \ -+ { \ -+ while (*((uint32_t *) (SLM)) == (seq - 1)) \ -+ { \ -+ mb(); \ -+ DELAY(1); \ -+ } \ -+ } \ -+} while (0) -+ -+#define EP4_SPINEXIT(DEV,SLE,SLM) \ -+do { \ -+ wmb(); \ -+ elan4_sdram_writel (DEV, (SLE) + EP4_SPINLOCK_MLOCK, 0); \ -+} while (0) -+ -+#endif /* !defined(__elan4__) */ -+ -+#define EP4_TXD_STEN_RETRYCOUNT 16 -+#define EP4_RXD_STEN_RETRYCOUNT 1 -+#define EP4_DMA_RETRYCOUNT 16 -+ -+typedef struct ep4_intr_cmd -+{ -+ E4_uint64 c_write_cmd; -+ E4_uint64 c_write_value; -+ E4_uint64 c_intr_cmd; -+} EP4_INTR_CMD; -+ -+#define EP4_INTR_CMD_NDWORDS (sizeof (EP4_INTR_CMD) / 8) -+ -+typedef struct ep4_rxd_sten_cmd -+{ -+ E4_uint64 c_open; -+ -+ E4_uint64 c_trans; -+ E4_uint64 c_cookie; -+ E4_uint64 c_dma_typeSize; -+ E4_uint64 c_dma_cookie; -+ E4_uint64 c_dma_vproc; -+ E4_uint64 c_dma_srcAddr; -+ E4_uint64 c_dma_dstAddr; -+ E4_uint64 c_dma_srcEvent; -+ E4_uint64 c_dma_dstEvent; -+ -+ E4_uint64 c_ok_guard; -+ E4_uint64 c_ok_write_cmd; -+ E4_uint64 c_ok_write_value; -+ -+ E4_uint64 c_fail_guard; -+ E4_uint64 c_fail_setevent; -+ -+ E4_uint64 c_nop_cmd; -+} EP4_RXD_STEN_CMD; -+ -+#define EP4_RXD_STEN_CMD_NDWORDS (sizeof (EP4_RXD_STEN_CMD) / 8) -+ -+typedef struct ep4_rxd_dma_cmd -+{ -+ E4_uint64 c_dma_typeSize; -+ E4_uint64 c_dma_cookie; -+ E4_uint64 c_dma_vproc; -+ E4_uint64 c_dma_srcAddr; -+ E4_uint64 c_dma_dstAddr; -+ E4_uint64 c_dma_srcEvent; -+ E4_uint64 c_dma_dstEvent; -+ E4_uint64 c_nop_cmd; -+} EP4_RXD_DMA_CMD; -+ -+#define EP4_RXD_DMA_CMD_NDWORDS (sizeof (EP4_RXD_DMA_CMD) / 8) -+#define EP4_RXD_START_CMD_NDWORDS (sizeof (E4_ThreadRegs) / 8) -+ -+typedef struct ep4_rxd_rail_elan -+{ -+ EP4_RXD_STEN_CMD rxd_sten[EP_MAXFRAG+1]; -+ -+ EP4_INTR_CMD rxd_done_cmd; /* command stream issued by done event (aligned to 64 bytes) */ -+ E4_Addr rxd_next; /* linked list when on pending list (pad to 32 bytes)*/ -+ E4_Event32 rxd_failed; /* event set when sten packet fails */ -+ -+ EP4_INTR_CMD rxd_failed_cmd; /* command stream issued by fail event (aligned to 64 bytes) */ -+ E4_uint64 rxd_queued; /* rxd queuing thread has executed (pad to 32 bytes)*/ -+ -+ E4_Event32 rxd_start; /* event to set to fire off and event chain (used as chain[0]) */ -+ E4_Event32 rxd_chain[EP_MAXFRAG]; /* chained events (aligned to 32 bytes) */ -+ E4_Event32 rxd_done; /* event to fire done command stream causing interrupt (used as chain[EP_MAXFRAG]) */ -+ -+ E4_Addr rxd_rxd; /* elan address of EP4_RXD_MAIN */ -+ E4_Addr rxd_main; /* elan address of EP4_RXD_RAIL_MAIN */ -+ E4_uint64 rxd_debug; /* thread debug value */ -+ -+ EP_NMD rxd_buffer; /* Network mapping descriptor for receive data */ -+} EP4_RXD_RAIL_ELAN; -+ -+#define EP4_RXD_RAIL_ELAN_SIZE roundup(sizeof (EP4_RXD_RAIL_ELAN), 64) -+ -+typedef struct ep4_rxd_rail_main -+{ -+ E4_uint64 rxd_sent[EP_MAXFRAG+1]; /* sten packet sent */ -+ E4_uint64 rxd_failed; /* sten packet failed */ -+ E4_uint64 rxd_done; /* operation complete */ -+ -+ E4_Addr rxd_scq; /* command port for scq */ -+} EP4_RXD_RAIL_MAIN; -+ -+#define EP4_RXD_RAIL_MAIN_SIZE roundup(sizeof (EP4_RXD_RAIL_MAIN), 8) -+ -+#if !defined(__elan4__) -+typedef struct ep4_rxd_rail -+{ -+ EP_RXD_RAIL rxd_generic; -+ -+ struct list_head rxd_retry_link; -+ unsigned long rxd_retry_time; -+ -+ EP4_INTCOOKIE rxd_intcookie; -+ -+ sdramaddr_t rxd_elan; -+ EP_ADDR rxd_elan_addr; -+ -+ EP4_RXD_RAIL_MAIN *rxd_main; -+ EP_ADDR rxd_main_addr; -+ -+ EP4_ECQ *rxd_ecq; /* cq with 128 bytes targetted by event */ -+ EP4_ECQ *rxd_scq; /* cq with 8 bytes targetted by main/thread store */ -+} EP4_RXD_RAIL; -+ -+#define EP4_NUM_RXD_PER_BLOCK 16 -+ -+typedef struct ep4_rxd_rail_block -+{ -+ struct list_head blk_link; -+ EP4_RXD_RAIL blk_rxds[EP4_NUM_RXD_PER_BLOCK]; -+} EP4_RXD_RAIL_BLOCK; -+ -+#endif /* !defined(__elan4__) */ -+ -+typedef struct ep4_rcvr_rail_elan -+{ -+ E4_uint64 rcvr_thread_stall[8]; /* place for thread to stall */ -+ E4_Event32 rcvr_qevent; /* Input queue event */ -+ E4_Event32 rcvr_thread_halt; /* place for thread to halt */ -+ -+ volatile E4_Addr rcvr_pending_tailp; /* list of pending rxd's (elan addr) */ -+ volatile E4_Addr rcvr_pending_head; /* -- this pair aligned to 16 bytes */ -+ -+ EP4_SPINLOCK_ELAN rcvr_thread_lock; /* spinlock for thread processing loop */ -+ -+ E4_uint64 rcvr_stall_intcookie; /* interrupt cookie to use when requseted to halt */ -+ -+ E4_uint64 rcvr_qbase; /* base of input queue */ -+ E4_uint64 rcvr_qlast; /* last item in input queue */ -+ -+ E4_uint64 rcvr_debug; /* thread debug value */ -+} EP4_RCVR_RAIL_ELAN; -+ -+typedef struct ep4_rcvr_rail_main -+{ -+ EP4_SPINLOCK_MAIN rcvr_thread_lock; /* spinlock for thread processing loop */ -+} EP4_RCVR_RAIL_MAIN; -+ -+#if !defined(__elan4__) -+ -+typedef struct ep4_rcvr_rail_stats -+{ -+ unsigned long some_stat; -+} EP4_RCVR_RAIL_STATS; -+ -+typedef struct ep4_rcvr_rail -+{ -+ EP_RCVR_RAIL rcvr_generic; /* generic portion */ -+ -+ sdramaddr_t rcvr_elan; -+ EP_ADDR rcvr_elan_addr; -+ -+ EP4_RCVR_RAIL_MAIN *rcvr_main; -+ EP_ADDR rcvr_main_addr; -+ -+ sdramaddr_t rcvr_slots; /* input queue slots */ -+ EP_ADDR rcvr_slots_addr; /* and elan address */ -+ -+ EP_ADDR rcvr_stack; /* stack for thread */ -+ -+ EP4_ECQ *rcvr_ecq; /* command queue space for thread STEN packets */ -+ EP4_ECQ *rcvr_resched; /* command queue space to reschedule the thread */ -+ -+ struct list_head rcvr_freelist; /* freelist of per-rail receive descriptors */ -+ unsigned int rcvr_freecount; /* and number on free list */ -+ unsigned int rcvr_totalcount; /* total number created */ -+ spinlock_t rcvr_freelock; /* and lock for free list */ -+ struct list_head rcvr_blocklist; /* list of receive descriptor blocks */ -+ -+ unsigned int rcvr_freewaiting; /* waiting for descriptors to be freed */ -+ kcondvar_t rcvr_freesleep; /* and sleep here */ -+ -+ EP4_INTCOOKIE rcvr_stall_intcookie; /* interrupt cookie for thread halt */ -+ unsigned char rcvr_thread_halted; /* thread has been halted */ -+ unsigned char rcvr_cleanup_waiting; /* waiting for cleanup */ -+ kcondvar_t rcvr_cleanup_sleep; /* and sleep here */ -+ -+ EP4_RETRY_OPS rcvr_retryops; -+ -+ struct list_head rcvr_retrylist; /* list of txd's to retry envelopes for */ -+ struct list_head rcvr_polllist; /* list of txd's to poll for completion */ -+ spinlock_t rcvr_retrylock; -+ -+ EP4_RCVR_RAIL_STATS rcvr_stats; /* elan4 specific rcvr_rail stats */ -+ -+} EP4_RCVR_RAIL; -+ -+#endif /* !defined(__elan4__) */ -+ -+typedef struct ep4_txd_rail_elan -+{ -+ EP4_INTR_CMD txd_env_cmd; /* command stream for envelope event (64 byte aligned) */ -+ E4_uint64 txd_pad0; /* pad to 32 bytes */ -+ E4_Event32 txd_env; /* event set when STEN packet fails */ -+ -+ EP4_INTR_CMD txd_done_cmd; /* command stream for done event (64 byte aligned) */ -+ E4_uint64 txd_pad1; /* pad to 32 bytes */ -+ E4_Event32 txd_done; /* event set when transmit complete */ -+ -+ E4_Event32 txd_data; /* event set when xmit completes (=> phase becomes passive) */ -+} EP4_TXD_RAIL_ELAN; -+ -+#define EP4_TXD_RAIL_ELAN_SIZE roundup(sizeof(EP4_TXD_RAIL_ELAN), 64) -+ -+typedef struct ep4_txd_rail_main -+{ -+ E4_uint64 txd_env; -+ E4_uint64 txd_data; -+ E4_uint64 txd_done; -+} EP4_TXD_RAIL_MAIN; -+ -+#define EP4_TXD_RAIL_MAIN_SIZE roundup(sizeof(EP4_TXD_RAIL_MAIN), 8) -+ -+#if !defined (__elan4__) -+typedef struct ep4_txd_rail -+{ -+ EP_TXD_RAIL txd_generic; -+ -+ struct list_head txd_retry_link; -+ unsigned long txd_retry_time; -+ -+ EP4_INTCOOKIE txd_intcookie; -+ -+ sdramaddr_t txd_elan; -+ EP_ADDR txd_elan_addr; -+ -+ EP4_TXD_RAIL_MAIN *txd_main; -+ EP_ADDR txd_main_addr; -+ -+ EP4_ECQ *txd_ecq; -+ -+ E4_uint64 txd_cookie; -+} EP4_TXD_RAIL; -+ -+#define EP4_NUM_TXD_PER_BLOCK 21 -+ -+typedef struct ep4_txd_rail_block -+{ -+ struct list_head blk_link; -+ EP4_TXD_RAIL blk_txds[EP4_NUM_TXD_PER_BLOCK]; -+} EP4_TXD_RAIL_BLOCK; -+ -+typedef struct ep4_xmtr_rail_main -+{ -+ E4_int64 xmtr_flowcnt; -+} EP4_XMTR_RAIL_MAIN; -+ -+typedef struct ep4_xmtr_rail_stats -+{ -+ unsigned long some_stat; -+} EP4_XMTR_RAIL_STATS; -+ -+#define EP4_TXD_LIST_POLL 0 -+#define EP4_TXD_LIST_STALLED 1 -+#define EP4_TXD_LIST_RETRY 2 -+#define EP4_TXD_NUM_LISTS 3 -+typedef struct ep4_xmtr_rail -+{ -+ EP_XMTR_RAIL xmtr_generic; -+ -+ EP4_XMTR_RAIL_MAIN *xmtr_main; -+ EP_ADDR xmtr_main_addr; -+ -+ struct list_head xmtr_freelist; -+ unsigned int xmtr_freecount; -+ unsigned int xmtr_totalcount; -+ spinlock_t xmtr_freelock; -+ struct list_head xmtr_blocklist; -+ unsigned int xmtr_freewaiting; -+ kcondvar_t xmtr_freesleep; -+ -+ EP4_INTCOOKIE xmtr_intcookie; /* interrupt cookie for "polled" descriptors */ -+ -+ ELAN4_CQ *xmtr_cq; -+ E4_int64 xmtr_flowcnt; -+ -+ EP4_RETRY_OPS xmtr_retryops; -+ -+ struct list_head xmtr_retrylist[EP4_TXD_NUM_LISTS]; /* list of txd's to retry envelopes for */ -+ struct list_head xmtr_polllist; /* list of txd's to poll for completion */ -+ spinlock_t xmtr_retrylock; -+ -+ EP4_XMTR_RAIL_STATS stats; /* elan4 specific xmtr rail stats */ -+} EP4_XMTR_RAIL; -+ -+#define EP4_XMTR_CQSIZE CQ_Size64K /* size of command queue for xmtr */ -+#define EP4_XMTR_FLOWCNT (CQ_Size(EP4_XMTR_CQSIZE) / 512) /* # of STEN packets which can fit in */ -+ -+typedef struct ep4_comms_rail_elan -+{ -+ E4_Event32 r_flush_event; -+} EP4_COMMS_RAIL_ELAN; -+ -+#define EP4_COMMS_RAIL_ELAN_SIZE roundup(sizeof (EP4_COMMS_RAIL_ELAN), 32) -+ -+typedef struct ep4_comms_rail -+{ -+ EP_COMMS_RAIL r_generic; /* generic comms rail */ -+ sdramaddr_t r_descs; /* input queue descriptors */ -+ -+ sdramaddr_t r_elan; /* elan portion */ -+ EP_ADDR r_elan_addr; -+ -+ kmutex_t r_flush_mutex; /* sequentialise flush usage */ -+ EP4_INTCOOKIE r_flush_intcookie; /* interrupt cookie to generate */ -+ -+ kcondvar_t r_flush_sleep; /* place to sleep waiting */ -+ spinlock_t r_flush_lock; /* and spinlock to use */ -+ -+ unsigned int r_flush_count; /* # setevents issued */ -+ EP4_ECQ *r_flush_ecq; /* command queue for interrupt */ -+ EP4_ECQ *r_flush_mcq; /* command queeu to issue waitevent */ -+ -+ EP4_NETERR_OPS r_neterr_ops; /* network error fixup ops */ -+} EP4_COMMS_RAIL; -+ -+/* epcommsTx_elan4.c */ -+extern void ep4xmtr_flush_callback (EP_XMTR *xmtr, EP4_XMTR_RAIL *xmtrRail); -+extern void ep4xmtr_failover_callback (EP_XMTR *xmtr, EP4_XMTR_RAIL *xmtrRail); -+extern void ep4xmtr_disconnect_callback (EP_XMTR *xmtr, EP4_XMTR_RAIL *xmtrRail); -+ -+extern void ep4xmtr_neterr_flush (EP_XMTR *xmtr, EP4_XMTR_RAIL *xmtrRail, unsigned int nodeId, EP_NETERR_COOKIE *cookies); -+extern void ep4xmtr_neterr_check (EP_XMTR *xmtr, EP4_XMTR_RAIL *xmtrRail, unsigned int nodeId, EP_NETERR_COOKIE *cookies); -+ -+/* epcommsRx_elan4.c */ -+extern void ep4rcvr_flush_callback (EP_RCVR *rcvr, EP4_RCVR_RAIL *rcvrRail); -+extern void ep4rcvr_failover_callback (EP_RCVR *rcvr, EP4_RCVR_RAIL *rcvrRail); -+extern void ep4rcvr_disconnect_callback (EP_RCVR *rcvr, EP4_RCVR_RAIL *rcvrRail); -+ -+extern void ep4rcvr_neterr_flush (EP_RCVR *rcvr, EP4_RCVR_RAIL *rcvrRail, unsigned int nodeId, EP_NETERR_COOKIE *cookies); -+extern void ep4rcvr_neterr_check (EP_RCVR *rcvr, EP4_RCVR_RAIL *rcvrRail, unsigned int nodeId, EP_NETERR_COOKIE *cookies); -+ -+/* epcomms_elan4.c */ -+extern void ep4comms_flush_start (EP4_COMMS_RAIL *commsRail); -+extern void ep4comms_flush_wait (EP4_COMMS_RAIL *commsRail); -+extern void ep4comms_flush_setevent (EP4_COMMS_RAIL *commsRail, ELAN4_CQ *cq); -+ -+extern EP_COMMS_RAIL *ep4comms_add_rail (EP_SUBSYS *s, EP_SYS *sys, EP_RAIL *r); -+extern void ep4comms_del_rail (EP_COMMS_RAIL *r); -+extern void ep4comms_display_rail (EP_COMMS_RAIL *r); -+ -+/* epcommsTx_elan4.c */ -+extern int ep4xmtr_bind_txd (EP_TXD *txd, EP_XMTR_RAIL *xmtrRail, unsigned int phase); -+extern void ep4xmtr_unbind_txd (EP_TXD *txd, unsigned int phase); -+extern int ep4xmtr_poll_txd (EP_XMTR_RAIL *xmtrRail, EP_TXD_RAIL *txdRail, int how); -+extern long ep4xmtr_check (EP_XMTR_RAIL *xmtrRail, long nextRunTime); -+extern void ep4xmtr_add_rail (EP_XMTR *xmtr, EP_COMMS_RAIL *commsRail); -+extern void ep4xmtr_del_rail (EP_XMTR *xmtr, EP_COMMS_RAIL *commsRail); -+extern int ep4xmtr_check_txd_state(EP_TXD *txd); -+ -+extern void ep4xmtr_display_xmtr (DisplayInfo *di, EP_XMTR_RAIL *xmtrRail); -+extern void ep4xmtr_display_txd (DisplayInfo *di, EP_TXD_RAIL *txdRail); -+ -+extern void ep4xmtr_fillout_rail_stats (EP_XMTR_RAIL *xmtr_rail, char *str); -+ -+/* epcommsRx_elan4.c */ -+extern int ep4rcvr_queue_rxd (EP_RXD *rxd, EP_RCVR_RAIL *rcvrRail); -+extern void ep4rcvr_rpc_put (EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags); -+extern void ep4rcvr_rpc_get (EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags); -+extern void ep4rcvr_rpc_complete (EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags); -+ -+extern EP_RXD *ep4rcvr_steal_rxd (EP_RCVR_RAIL *rcvrRail); -+ -+extern long ep4rcvr_check (EP_RCVR_RAIL *rcvrRail, long nextRunTime); -+extern void ep4rcvr_add_rail (EP_RCVR *rcvr, EP_COMMS_RAIL *rail); -+extern void ep4rcvr_del_rail (EP_RCVR *rcvr, EP_COMMS_RAIL *rail); -+ -+extern void ep4rcvr_display_rcvr (DisplayInfo *di, EP_RCVR_RAIL *rcvrRail); -+extern void ep4rcvr_display_rxd (DisplayInfo *di, EP_RXD_RAIL *rxdRail); -+ -+extern void ep4rcvr_fillout_rail_stats (EP_RCVR_RAIL *rcvr_rail, char *str); -+ -+#endif /* !defined(__elan4__) */ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -+#endif /* __EPCOMMS_ELAN4_H */ -diff -urN clean/drivers/net/qsnet/ep/epcomms_elan4_thread.c linux-2.6.9/drivers/net/qsnet/ep/epcomms_elan4_thread.c ---- clean/drivers/net/qsnet/ep/epcomms_elan4_thread.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/epcomms_elan4_thread.c 2005-03-22 09:41:55.000000000 -0500 -@@ -0,0 +1,347 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: epcomms_elan4_thread.c,v 1.13 2005/03/22 14:41:55 david Exp $ $Name: QSNETMODULES-5-11-3_20050907 $" -+/* $Source: /cvs/master/quadrics/epmod/epcomms_elan4_thread.c,v $*/ -+ -+//#include -+ -+typedef char int8_t; -+typedef unsigned char uint8_t; -+typedef short int16_t; -+typedef unsigned short uint16_t; -+typedef int int32_t; -+typedef unsigned int uint32_t; -+typedef long int64_t; -+typedef unsigned long uint64_t; -+ -+#include -+#include -+#include -+ -+#include -+ -+#include "kcomm_vp.h" -+#include "kcomm_elan4.h" -+#include "epcomms_elan4.h" -+ -+#include -+ -+/* assembler in epcomms_asm_elan4_thread.S */ -+extern void c_waitevent_interrupt (E4_uint64 *cport, E4_Event32 *event, E4_uint64 count, E4_uint64 intcookie); -+extern EP4_RXD_RAIL_ELAN *c_stall_thread (EP4_RCVR_RAIL_ELAN *rcvrRail); -+ -+#define R32_to_R47 "%r32", "%r33", "%r34", "%r35", "%r36", "%r37", "%r38", "%r39", \ -+ "%r40", "%r41", "%r42", "%r43", "%r44", "%r45", "%r46", "%r47" -+#define R48_to_R63 "%r48", "%r49", "%r50", "%r51", "%r52", "%r53", "%r54", "%r55", \ -+ "%r56", "%r57", "%r58", "%r59", "%r60", "%r61", "%r62", "%r63" -+ -+/* proto types for code in asm_elan4_thread.S */ -+extern void c_waitevent (E4_uint64 *commandport, E4_Addr event, E4_uint64 count); -+extern void c_reschedule(E4_uint64 *commandport); -+ -+static inline unsigned long -+c_load_u16(unsigned short *ptr) -+{ -+ unsigned long value; -+ -+ asm volatile ("ld2 [%1], %%r2\n" -+ "srl8,byte %%r2, %1, %0\n" -+ "sll8 %0, 48, %0\n" -+ "srl8 %0, 48, %0\n" -+ : /* outputs */ "=r" (value) -+ : /* inputs */ "r" (ptr) -+ : /* clobbered */ "%r2"); -+ return value; -+} -+ -+static inline unsigned long -+c_load_u32(unsigned int *ptr) -+{ -+ unsigned long value; -+ -+ asm volatile ("ld4 [%1], %%r2\n" -+ "srl8,byte %%r2, %1, %0\n" -+ "sll8 %0, 32, %0\n" -+ "srl8 %0, 32, %0\n" -+ : /* outputs */ "=r" (value) -+ : /* inputs */ "r" (ptr) -+ : /* clobbered */ "%r2"); -+ return value; -+} -+ -+static inline void -+c_store_u32(unsigned int *ptr, unsigned long value) -+{ -+ asm volatile ("sll8,byte %0, %1, %%r2\n" -+ "st4 %%r2, [%1]\n" -+ : /* no outputs */ -+ : /* inputs */ "r" (value), "r" (ptr) -+ : /* clobbered */ "%r2"); -+} -+ -+/* Reschedule the current Elan thread to the back of the run queue -+ * if there is another one ready to run */ -+static inline void -+c_yield (E4_uint64 *commandport) -+{ -+ unsigned long rval; -+ -+ asm volatile ("breaktest %0" : /* outputs */ "=r" (rval) : /* inputs */); -+ -+ if (rval & ICC_SIGNED_BIT) -+ c_reschedule(commandport); -+} -+ -+/* Reschedule the current thread if we're in danger of exceeding the -+ * thread instruction count */ -+static inline void -+c_insn_check(E4_uint64 *commandport) -+{ -+ unsigned long rval; -+ -+ asm volatile ("breaktest %0" : /* outputs */ "=r" (rval) : /* inputs */); -+ -+ if (rval & ICC_ZERO_BIT) -+ c_reschedule(commandport); -+} -+ -+void -+ep4_spinblock (E4_uint64 *cport, EP4_SPINLOCK_ELAN *sle, EP4_SPINLOCK_MAIN *slm) -+{ -+ do { -+ unsigned long val = *sle & 0xfffffffff; -+ -+ *slm = val; /* Release my lock */ -+ -+ while (*sle >> 32) /* Wait until the main */ -+ c_yield(cport); /* releases the lock */ -+ -+ c_store_u32 ((unsigned int *) sle, val + 1); /* and try and relock */ -+ } while (*sle >> 32); -+} -+ -+#define RESCHED_AFTER_PKTS ((CQ_Size(CQ_Size64K) / 128) - 1) -+ -+void -+ep4comms_rcvr (EP4_RAIL_ELAN *rail, EP4_RCVR_RAIL_ELAN *rcvrElan, EP4_RCVR_RAIL_MAIN *rcvrMain, -+ E4_InputQueue *inputq, E4_uint64 *cport, E4_uint64 *resched) -+{ -+ long count = 1; -+ long fptr = inputq->q_fptr; -+ -+ for (;;) -+ { -+ c_waitevent (cport, inputq->q_event, -count << 5); -+ -+ count = 0; -+ -+ while (fptr != inputq->q_bptr) -+ { -+ EP_ENVELOPE *env = (EP_ENVELOPE *) fptr; -+ unsigned long nodeid = c_load_u32 (&env->NodeId); -+ unsigned long opencmd = OPEN_STEN_PKT_CMD | OPEN_PACKET(0, PACK_OK | RESTART_COUNT_ZERO, EP_VP_DATA(nodeid)); -+ unsigned long vproc = EP_VP_DATA(rail->r_nodeid); -+ EP_ATTRIBUTE attr = c_load_u32 (&env->Attr); -+ unsigned long txdRail = c_load_u32 (&env->TxdRail); -+ unsigned long nFrags = c_load_u32 (&env->nFrags); -+ unsigned long srcevent = (EP_IS_RPC(attr) ? txdRail + offsetof (EP4_TXD_RAIL_ELAN, txd_data) : -+ txdRail + offsetof (EP4_TXD_RAIL_ELAN, txd_done)); -+ E4_uint64 cookie; -+ EP4_RXD_RAIL_ELAN *rxdElan; -+ EP4_RXD_RAIL_MAIN *rxdMain; -+ EP_RXD_MAIN *rxd; -+ EP4_RXD_STEN_CMD *sten; -+ E4_Event32 *event; -+ unsigned long first; -+ unsigned long buffer; -+ unsigned long len; -+ unsigned long i; -+ -+ EP4_SPINENTER(resched, &rcvrElan->rcvr_thread_lock, &rcvrMain->rcvr_thread_lock); -+ -+ if ((rxdElan = (EP4_RXD_RAIL_ELAN *) rcvrElan->rcvr_pending_head) == 0) -+ { -+ EP4_SPINEXIT (resched, &rcvrElan->rcvr_thread_lock, &rcvrMain->rcvr_thread_lock); -+ -+ rxdElan = c_stall_thread (rcvrElan); -+ -+ EP4_SPINENTER(resched, &rcvrElan->rcvr_thread_lock, &rcvrMain->rcvr_thread_lock); -+ } -+ -+ if (c_load_u32 (&env->Version) != EP_ENVELOPE_VERSION) /* envelope has been cancelled */ -+ { -+ EP4_SPINEXIT (resched, &rcvrElan->rcvr_thread_lock, &rcvrMain->rcvr_thread_lock); -+ goto consume_envelope; -+ } -+ -+ rxd = (EP_RXD_MAIN *) rxdElan->rxd_rxd; -+ rxdMain = (EP4_RXD_RAIL_MAIN *) rxdElan->rxd_main; -+ first = (EP_MAXFRAG+1) - (( EP_IS_MULTICAST(attr) ? 1 : 0) + (nFrags == 0 ? 1 : nFrags)); -+ sten = &rxdElan->rxd_sten[first]; -+ event = &rxdElan->rxd_chain[first]; -+ cookie = rail->r_cookies[nodeid]; -+ -+ if (EP_IS_MULTICAST(attr)) /* need to fetch broadcast bitmap */ -+ { -+ sten->c_open = opencmd; -+ sten->c_trans = SEND_TRANS_CMD | ((TR_REMOTEDMA | TR_WAIT_FOR_EOP) << 16); -+ sten->c_cookie = cookie | EP4_COOKIE_THREAD | EP4_COOKIE_STEN; -+ sten->c_dma_typeSize = E4_DMA_TYPE_SIZE(BT_BITOUL(EP_MAX_NODES) * sizeof (bitmap_t), DMA_DataTypeWord, 0, EP4_DMA_RETRYCOUNT); -+ sten->c_dma_cookie = cookie | EP4_COOKIE_THREAD | EP4_COOKIE_REMOTE | EP4_COOKIE_DMA | EP4_COOKIE_INC; -+ sten->c_dma_vproc = vproc; -+ sten->c_dma_srcAddr = c_load_u32 (&env->TxdMain.nmd_addr) + offsetof(EP_TXD_MAIN, Bitmap); -+ sten->c_dma_dstAddr = (E4_Addr) &rxd->Bitmap; -+ sten->c_dma_srcEvent = srcevent; -+ sten->c_dma_dstEvent = (E4_Addr) event; -+ -+ event->ev_CountAndType = E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_RXD_STEN_CMD_NDWORDS); -+ -+ cookie += (EP4_COOKIE_INC << 1); -+ -+ sten++; event++; -+ } -+ -+ if (nFrags == 0) -+ { -+ /* Generate an empty "get" DMA to accept the envelope and fire the rx handler */ -+ sten->c_open = opencmd; -+ sten->c_trans = SEND_TRANS_CMD | ((TR_REMOTEDMA | TR_WAIT_FOR_EOP) << 16); -+ sten->c_cookie = cookie | EP4_COOKIE_THREAD | EP4_COOKIE_STEN; -+ sten->c_dma_typeSize = E4_DMA_TYPE_SIZE(0, DMA_DataTypeByte, 0, EP4_DMA_RETRYCOUNT); -+ sten->c_dma_cookie = cookie | EP4_COOKIE_THREAD | EP4_COOKIE_REMOTE | EP4_COOKIE_DMA | EP4_COOKIE_INC; -+ sten->c_dma_vproc = vproc; -+ sten->c_dma_srcEvent = srcevent; -+ sten->c_dma_dstEvent = (E4_Addr) event; -+ -+ event->ev_CountAndType = E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS); -+ -+ len = 0; -+ -+ cookie += (EP4_COOKIE_INC << 1); -+ } -+ else -+ { -+ /* Generate the DMA chain to fetch the data */ -+ for (i = 0, buffer = c_load_u32 (&rxdElan->rxd_buffer.nmd_addr), len = 0; i < nFrags; i++) -+ { -+ unsigned long fragLen = c_load_u32 (&env->Frags[i].nmd_len); -+ -+ sten->c_open = opencmd; -+ sten->c_trans = SEND_TRANS_CMD | ((TR_REMOTEDMA | TR_WAIT_FOR_EOP) << 16); -+ sten->c_cookie = cookie | EP4_COOKIE_THREAD | EP4_COOKIE_STEN; -+ sten->c_dma_typeSize = E4_DMA_TYPE_SIZE(fragLen, DMA_DataTypeByte, 0, EP4_DMA_RETRYCOUNT); -+ sten->c_dma_cookie = cookie | EP4_COOKIE_THREAD | EP4_COOKIE_REMOTE | EP4_COOKIE_DMA | EP4_COOKIE_INC; -+ sten->c_dma_vproc = vproc; -+ sten->c_dma_srcAddr = c_load_u32 (&env->Frags[i].nmd_addr); -+ sten->c_dma_dstAddr = buffer; -+ sten->c_dma_srcEvent = srcevent; -+ sten->c_dma_dstEvent = (E4_Addr) event; -+ -+ event->ev_CountAndType = E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_RXD_STEN_CMD_NDWORDS); -+ -+ buffer += fragLen; -+ len += fragLen; -+ -+ cookie += (EP4_COOKIE_INC << 1); -+ -+ sten++; event++; -+ } -+ -+ (--event)->ev_CountAndType = E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS); -+ -+ if (c_load_u32 (&rxdElan->rxd_buffer.nmd_len) < len) -+ { -+ /* The receive descriptor was too small for the message */ -+ /* complete the message anyway, but don't transfer any */ -+ /* data, we set the length to EP_MSG_TOO_BIG */ -+ for (i = first, sten = &rxdElan->rxd_sten[first]; i <= EP_MAXFRAG; i++, sten++) -+ sten->c_dma_typeSize = E4_DMA_TYPE_SIZE(0, DMA_DataTypeByte, 0, EP4_DMA_RETRYCOUNT); -+ -+ len = EP_MSG_TOO_BIG; -+ } -+ } -+ -+ /* Stuff the first STEN packet into the command queue, there's always enough space, -+ * since we will insert a waitevent at least once for the queue size */ -+ asm volatile ("ld64 [%0], %%r32\n" -+ "ld64 [%0 + 64], %%r48\n" -+ "st64 %%r32, [%1]\n" -+ "st64 %%r48, [%1]\n" -+ : /* no outputs */ -+ : /* inputs */ "r" (&rxdElan->rxd_sten[first]), "r" (cport) -+ : /* clobbered */ R32_to_R47, R48_to_R63); -+ -+ /* remove the RXD from the pending list */ -+ if ((rcvrElan->rcvr_pending_head = rxdElan->rxd_next) == 0) -+ rcvrElan->rcvr_pending_tailp = (E4_Addr)&rcvrElan->rcvr_pending_head; -+ -+ /* mark as not queued */ -+ rxdElan->rxd_queued = 0; -+ -+ /* copy down the envelope */ -+ if (EP_HAS_PAYLOAD(attr)) -+ asm volatile ("ld64 [%0], %%r32\n" -+ "ld64 [%0+64], %%r48\n" -+ "st64 %%r32, [%1]\n" -+ "ld64 [%0+128], %%r32\n" -+ "st64 %%r48, [%1+64]\n" -+ "ld64 [%0+192], %%r48\n" -+ "st64 %%r32, [%1 + 128]\n" -+ "st64 %%r48, [%1 + 192]\n" -+ : /* no outputs */ -+ : /* inputs */ "r" (env), "r" (&rxd->Envelope) -+ : /* clobbered */ R32_to_R47, R48_to_R63); -+ -+ else -+ asm volatile ("ld64 [%0], %%r32\n" -+ "ld64 [%0+64], %%r48\n" -+ "st64 %%r32, [%1]\n" -+ "st64 %%r48, [%1+64]\n" -+ : /* no outputs */ -+ : /* inputs */ "r" (env), "r" (&rxd->Envelope) -+ : /* clobbered */ R32_to_R47, R48_to_R63); -+ -+ /* Store the message length to indicate that I've finished */ -+ c_store_u32 (&rxd->Len, len); -+ -+ /* Finally update the network error cookie */ -+ rail->r_cookies[nodeid] = cookie; -+ -+ EP4_SPINEXIT (resched, &rcvrElan->rcvr_thread_lock, &rcvrMain->rcvr_thread_lock); -+ -+ consume_envelope: -+ if (fptr != rcvrElan->rcvr_qlast) -+ fptr += EP_INPUTQ_SIZE; -+ else -+ fptr = rcvrElan->rcvr_qbase; -+ -+ if (! rcvrElan->rcvr_stall_intcookie) -+ inputq->q_fptr = fptr; -+ -+ if (++count >= RESCHED_AFTER_PKTS) -+ break; -+ -+ c_insn_check (cport); -+ } -+ -+ if (rcvrElan->rcvr_stall_intcookie) -+ { -+ c_waitevent_interrupt (cport, &rcvrElan->rcvr_thread_halt, -(1 << 5), rcvrElan->rcvr_stall_intcookie); -+ inputq->q_fptr = fptr; -+ -+ count++; /* one extra as we were given an extra set to wake us up */ -+ } -+ } -+} -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/ep/epcommsFwd.c linux-2.6.9/drivers/net/qsnet/ep/epcommsFwd.c ---- clean/drivers/net/qsnet/ep/epcommsFwd.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/epcommsFwd.c 2005-07-20 08:01:34.000000000 -0400 -@@ -0,0 +1,310 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: epcommsFwd.c,v 1.12.4.1 2005/07/20 12:01:34 mike Exp $ $Name: QSNETMODULES-5-11-3_20050907 $" -+/* $Source: /cvs/master/quadrics/epmod/epcommsFwd.c,v $*/ -+ -+#include -+ -+#include -+#include -+#include -+ -+#include "debug.h" -+ -+unsigned int epcomms_forward_limit = 8; -+ -+static void -+GenerateTree (unsigned nodeId, unsigned lowId, unsigned highId, bitmap_t *bitmap, -+ unsigned *parentp, unsigned *childrenp, int *nchildrenp) -+{ -+ int i; -+ int count; -+ int branch; -+ int nSub; -+ int branchIndex; -+ int parent; -+ int nBranch; -+ int rem; -+ int self; -+ int branchRatio; -+ int node; -+ int x, y, z; -+ -+ -+#ifdef DEBUG_PRINTF -+ { -+#define OVERFLOW "...]" -+#define LINESZ 128 -+ char space[LINESZ+1]; -+ -+ if (ep_sprintf_bitmap (space, LINESZ-strlen(OVERFLOW), bitmap, 0, 0, (highId - lowId)+1) != -1) -+ strcat (space, OVERFLOW); -+ -+ EPRINTF3 (DBG_FORWARD, "GenerateTree; elan node low=%d node high=%d bitmap=%s\n", lowId, highId, space); -+#undef OVERFLOW -+#undef LINESZ -+ } -+#endif -+ -+ /* Count the number of nodes in the partition */ -+ /* and work out which one I am */ -+ for (count = 0, self = ELAN_INVALID_NODE, i = lowId; i <= highId; i++) -+ { -+ if (BT_TEST (bitmap, i-lowId)) -+ { -+ if (i == nodeId) -+ self = count; -+ count++; -+ } -+ } -+ -+ EPRINTF2 (DBG_FORWARD, "GenerateTree: count=%d self=%d\n", count, self); -+ -+ if (count == 0 || self == ELAN_INVALID_NODE) -+ { -+ *parentp = ELAN_INVALID_NODE; -+ *nchildrenp = 0; -+ return; -+ } -+ -+ /* search for position in tree */ -+ branchRatio = EP_TREE_ARITY; /* branching ratio */ -+ branch = 0; /* start with process 0 */ -+ nSub = count; /* and whole tree */ -+ branchIndex = -1; /* my branch # in parent */ -+ parent = -1; /* my parent's group index # */ -+ -+ while (branch != self) /* descend process tree */ -+ { /* until I find myself */ -+ parent = branch; -+ branch++; /* parent + 1 = first born */ -+ nSub--; /* set # descendents */ -+ -+ rem = nSub % branchRatio; -+ nSub = nSub / branchRatio + 1; -+ x = rem * nSub; -+ y = self - branch; -+ -+ if (y < x) /* my first 'rem' branches have */ -+ { /* 1 more descendent... */ -+ branchIndex = y / nSub; -+ branch += branchIndex * nSub; -+ } -+ else /* than the rest of my branches */ -+ { -+ nSub--; -+ z = (y - x) / nSub; -+ branchIndex = rem + z; -+ branch += x + z * nSub; -+ } -+ } -+ -+ branch++; /* my first born */ -+ nSub--; /* total # of my descendents */ -+ /* leaves + their parents may have # children < branchRatio */ -+ nBranch = (nSub < branchRatio) ? nSub : branchRatio; -+ -+ EPRINTF2 (DBG_FORWARD, "GenerateTree: parent=%d nBranch=%d\n", parent, nBranch); -+ -+ /* Now calculate the real elan id's of the parent and my children */ -+ if (parent == -1) -+ *parentp = ELAN_INVALID_NODE; -+ else -+ { -+ for (i = lowId, node = 0; i <= highId; i++) -+ { -+ if (BT_TEST(bitmap, i-lowId)) -+ if (node++ == parent) -+ break; -+ } -+ *parentp = i; -+ } -+ -+ for (i = lowId, branchIndex = 0, node = 0; branchIndex < nBranch && i <= highId; i++) -+ { -+ if (BT_TEST(bitmap, i-lowId)) -+ { -+ if (node == branch) -+ { -+ branch = branch + nSub / branchRatio + ((branchIndex < (nSub % branchRatio)) ? 1 : 0); -+ -+ childrenp[branchIndex++] = i; -+ } -+ node++; -+ } -+ } -+ -+ *nchildrenp = branchIndex; -+} -+ -+static void -+ForwardTxDone (EP_TXD *txd, void *arg, EP_STATUS status) -+{ -+ EP_FWD_DESC *desc = (EP_FWD_DESC *) arg; -+ EP_RXD *rxd = desc->Rxd; -+ EP_COMMS_SUBSYS *subsys = rxd->Rcvr->Subsys; -+ unsigned long flags; -+ -+ /* XXXX: if transmit fails, could step to next node in this subtree ? */ -+ -+ spin_lock_irqsave (&subsys->ForwardDescLock, flags); -+ -+ if (--desc->NumChildren > 0) -+ spin_unlock_irqrestore (&subsys->ForwardDescLock, flags); -+ else -+ { -+ rxd->Rcvr->ForwardRxdCount--; -+ -+ spin_unlock_irqrestore (&subsys->ForwardDescLock, flags); -+ -+ KMEM_FREE (desc, sizeof (EP_FWD_DESC)); -+ -+ rxd->Handler (rxd); -+ } -+} -+ -+long -+ep_forward_rxds (EP_COMMS_SUBSYS *subsys, long nextRunTime) -+{ -+ unsigned long flags; -+ int i, res; -+ -+ spin_lock_irqsave (&subsys->ForwardDescLock, flags); -+ while (! list_empty (&subsys->ForwardDescList)) -+ { -+ EP_RXD *rxd = (EP_RXD *) list_entry (subsys->ForwardDescList.next, EP_RXD, Link); -+ EP_RXD_MAIN *rxdMain = rxd->RxdMain; -+ EP_ENVELOPE *env = &rxdMain->Envelope; -+ EP_FWD_DESC *desc; -+ -+ EPRINTF2 (DBG_FORWARD, "ep: forwarding rxd %p to range %x\n", rxd, env->Range); -+ -+ list_del (&rxd->Link); -+ -+ rxd->Rcvr->ForwardRxdCount++; -+ -+ spin_unlock_irqrestore (&subsys->ForwardDescLock, flags); -+ -+ KMEM_ALLOC (desc, EP_FWD_DESC *, sizeof (EP_FWD_DESC), 1); -+ -+ if (desc == NULL) -+ { -+ spin_lock_irqsave (&subsys->ForwardDescLock, flags); -+ rxd->Rcvr->ForwardRxdCount--; -+ spin_unlock_irqrestore (&subsys->ForwardDescLock, flags); -+ -+ rxd->Handler (rxd); -+ } -+ else -+ { -+ /* compute the spanning tree for this message */ -+ unsigned int destLo = EP_RANGE_LOW (env->Range); -+ unsigned int destHi = EP_RANGE_HIGH (env->Range); -+ unsigned int parent; -+ -+ GenerateTree (subsys->Subsys.Sys->Position.pos_nodeid, destLo, destHi, rxdMain->Bitmap, &parent, desc->Children, &desc->NumChildren); -+ -+ if (desc->NumChildren == 0 || (epcomms_forward_limit && (rxd->Rcvr->ForwardRxdCount >= epcomms_forward_limit))) -+ { -+ EPRINTF5 (DBG_FORWARD, "ep; don't forward rxd %p to /%d (%d children/ %d forwarding (%d))\n", -+ rxd, rxd->Rcvr->Service, desc->NumChildren, rxd->Rcvr->ForwardRxdCount, epcomms_forward_limit); -+ -+ spin_lock_irqsave (&subsys->ForwardDescLock, flags); -+ rxd->Rcvr->ForwardRxdCount--; -+ spin_unlock_irqrestore (&subsys->ForwardDescLock, flags); -+ -+ KMEM_FREE (desc, sizeof (EP_FWD_DESC)); -+ -+ rxd->Handler (rxd); -+ } -+ else -+ { -+ ep_nmd_subset (&desc->Data, &rxd->Data, 0, ep_rxd_len (rxd)); -+ desc->Rxd = rxd; -+ -+ /* NOTE - cannot access 'desc' after last call to multicast, since it could complete -+ * and free the desc before we access it again. Hence the reverse loop. */ -+ for (i = desc->NumChildren-1; i >= 0; i--) -+ { -+ ASSERT (desc->Children[i] < subsys->Subsys.Sys->Position.pos_nodes); -+ -+ EPRINTF3 (DBG_FORWARD, "ep: forwarding rxd %p to node %d/%d\n", rxd, desc->Children[i], rxd->Rcvr->Service); -+ -+ if ((res = ep_multicast_forward (subsys->ForwardXmtr, desc->Children[i], rxd->Rcvr->Service, 0, -+ ForwardTxDone, desc, env, EP_HAS_PAYLOAD(env->Attr) ? &rxdMain->Payload : NULL, -+ rxdMain->Bitmap, &desc->Data, 1)) != EP_SUCCESS) -+ { -+ ep_debugf (DBG_FORWARD, "ep: ep_multicast_forward failed\n"); -+ ForwardTxDone (NULL, desc, res); -+ } -+ } -+ -+ } -+ } -+ -+ spin_lock_irqsave (&subsys->ForwardDescLock, flags); -+ } -+ spin_unlock_irqrestore (&subsys->ForwardDescLock, flags); -+ -+ return (nextRunTime); -+} -+ -+#if ! defined(CONFIG_EP_NO_CHECK_SUM) -+void -+ep_csum_rxds (EP_COMMS_SUBSYS *subsys) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave (&subsys->CheckSumDescLock, flags); -+ while (! list_empty (&subsys->CheckSumDescList)) -+ { -+ EP_RXD *rxd = (EP_RXD *) list_entry (subsys->CheckSumDescList.next, EP_RXD, CheckSumLink); -+ EP_ENVELOPE *env = &rxd->RxdMain->Envelope; -+ -+ list_del_init (&rxd->CheckSumLink); -+ spin_unlock_irqrestore (&subsys->CheckSumDescLock, flags); -+ -+ if (env->CheckSum) { -+ EP_NMD nmd; -+ uint32_t csum; -+ -+ ep_nmd_subset ( &nmd, &rxd->Data, 0, ep_rxd_len (rxd)); -+ -+ csum = ep_calc_check_sum(subsys->Subsys.Sys, env, &nmd, 1); -+ if ( env->CheckSum != csum ) { -+ int f; -+ -+ -+ printk("Check Sum Error: env(0x%x,0x%x) data(0x%x,0x%x)\n", ((csum >> 16) & 0x7FFF), ((env->CheckSum >> 16) & 0x7FFF), -+ (csum & 0xFFFF), (env->CheckSum & 0xFFFF)); -+ printk("Check Sum Error: Sent : NodeId %u Range 0x%x Service %u Version 0x%x Attr 0x%x\n", env->NodeId, env->Range, rxd->Rcvr->Service, env->Version, env->Attr); -+ printk("Check Sum Error: Sent : Xid Generation 0x%x Handle 0x%x Unique 0x%llx\n", env->Xid.Generation, env->Xid.Handle, (long long)env->Xid.Unique); -+ printk("Check Sum Error: Sent : TxdRail 0x%x TxdMain nmd_addr 0x%x nmd_len %u nmd_attr 0x%x\n", env->TxdRail, env->TxdMain.nmd_addr, env->TxdMain.nmd_len, env->TxdMain.nmd_attr ); -+ printk("Check Sum Error: Sent : nFrags %d \n", env->nFrags); -+ for(f=0;fnFrags;f++) -+ printk("Check Sum Error: Sent (%d): nmd_addr 0x%x nmd_len %u nmd_attr 0x%x\n", f, -+ env->Frags[f].nmd_addr, env->Frags[f].nmd_len, env->Frags[f].nmd_attr); -+ printk("Check Sum Error: Recv : nmd_addr 0x%x nmd_len %u nmd_attr 0x%x\n", -+ nmd.nmd_addr, nmd.nmd_len, nmd.nmd_attr); -+ -+ } -+ } -+ ep_rxd_received_now(rxd); -+ -+ spin_lock_irqsave (&subsys->CheckSumDescLock, flags); -+ } -+ spin_unlock_irqrestore (&subsys->CheckSumDescLock, flags); -+} -+#endif -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/ep/epcommsRx.c linux-2.6.9/drivers/net/qsnet/ep/epcommsRx.c ---- clean/drivers/net/qsnet/ep/epcommsRx.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/epcommsRx.c 2004-11-30 07:02:06.000000000 -0500 -@@ -0,0 +1,1205 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: epcommsRx.c,v 1.33 2004/11/30 12:02:06 mike Exp $ $Name: QSNETMODULES-5-11-3_20050907 $" -+/* $Source: /cvs/master/quadrics/epmod/epcommsRx.c,v $*/ -+ -+#include -+#include -+#include -+#include -+ -+#include "debug.h" -+ -+unsigned int ep_rxd_lowat = 5; -+ -+static int -+AllocateRxdBlock (EP_RCVR *rcvr, EP_ATTRIBUTE attr, EP_RXD **rxdp) -+{ -+ EP_RXD_BLOCK *blk; -+ EP_RXD *rxd; -+ EP_RXD_MAIN *pRxdMain; -+ int i; -+ unsigned long flags; -+ -+ KMEM_ZALLOC (blk, EP_RXD_BLOCK *, sizeof (EP_RXD_BLOCK), ! (attr & EP_NO_SLEEP)); -+ -+ if (blk == NULL) -+ return (ENOMEM); -+ -+ if ((pRxdMain = ep_shared_alloc_main (rcvr->Subsys->Subsys.Sys, EP_RXD_MAIN_SIZE * EP_NUM_RXD_PER_BLOCK, attr, &blk->NmdMain)) == (sdramaddr_t) 0) -+ { -+ KMEM_FREE (blk, sizeof (EP_RXD_BLOCK)); -+ return (ENOMEM); -+ } -+ -+ for (rxd = &blk->Rxd[0], i = 0; i < EP_NUM_RXD_PER_BLOCK; i++, rxd++) -+ { -+ rxd->Rcvr = rcvr; -+ rxd->RxdMain = pRxdMain; -+ -+ ep_nmd_subset (&rxd->NmdMain, &blk->NmdMain, (i * EP_RXD_MAIN_SIZE), EP_RXD_MAIN_SIZE); -+ -+ /* move onto next descriptor */ -+ pRxdMain = (EP_RXD_MAIN *) ((unsigned long) pRxdMain + EP_RXD_MAIN_SIZE); -+ } -+ -+ spin_lock_irqsave (&rcvr->FreeDescLock, flags); -+ -+ list_add (&blk->Link, &rcvr->DescBlockList); -+ -+ rcvr->TotalDescCount += EP_NUM_RXD_PER_BLOCK; -+ -+ for (i = rxdp ? 1 : 0; i < EP_NUM_RXD_PER_BLOCK; i++) -+ { -+ -+#if ! defined(CONFIG_EP_NO_CHECK_SUM) -+ INIT_LIST_HEAD (&blk->Rxd[i].CheckSumLink); -+#endif -+ -+ list_add (&blk->Rxd[i].Link, &rcvr->FreeDescList); -+ -+ rcvr->FreeDescCount++; -+ -+ if (rcvr->FreeDescWanted) -+ { -+ rcvr->FreeDescWanted--; -+ kcondvar_wakeupone (&rcvr->FreeDescSleep, &rcvr->FreeDescLock); -+ } -+ } -+ spin_unlock_irqrestore (&rcvr->FreeDescLock, flags); -+ -+ if (rxdp) -+ { -+ -+#if ! defined(CONFIG_EP_NO_CHECK_SUM) -+ INIT_LIST_HEAD (&blk->Rxd[0].CheckSumLink); -+#endif -+ -+ *rxdp = &blk->Rxd[0]; -+ } -+ return (ESUCCESS); -+} -+ -+static void -+FreeRxdBlock (EP_RCVR *rcvr, EP_RXD_BLOCK *blk) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave (&rcvr->FreeDescLock, flags); -+ -+ list_del (&blk->Link); -+ -+ rcvr->TotalDescCount -= EP_NUM_RXD_PER_BLOCK; -+ rcvr->FreeDescCount -= EP_NUM_RXD_PER_BLOCK; -+ -+ spin_unlock_irqrestore (&rcvr->FreeDescLock, flags); -+ -+ ep_shared_free_main (rcvr->Subsys->Subsys.Sys, &blk->NmdMain); -+ KMEM_FREE (blk, sizeof (EP_RXD_BLOCK)); -+} -+ -+static EP_RXD * -+GetRxd (EP_RCVR *rcvr, EP_ATTRIBUTE attr) -+{ -+ EP_RXD *rxd; -+ unsigned long flags; -+ int low_on_rxds; -+ -+ spin_lock_irqsave (&rcvr->FreeDescLock, flags); -+ -+ while (list_empty (&rcvr->FreeDescList)) -+ { -+ if (! (attr & EP_NO_ALLOC)) -+ { -+ spin_unlock_irqrestore (&rcvr->FreeDescLock, flags); -+ -+ if (AllocateRxdBlock (rcvr, attr, &rxd) == ESUCCESS) -+ return (rxd); -+ -+ spin_lock_irqsave (&rcvr->FreeDescLock, flags); -+ } -+ -+ if (attr & EP_NO_SLEEP) -+ { -+ IncrStat (rcvr->Subsys, NoFreeRxds); -+ spin_unlock_irqrestore (&rcvr->FreeDescLock, flags); -+ -+ ep_kthread_schedule (&rcvr->Subsys->Thread, lbolt); -+ return (NULL); -+ } -+ -+ rcvr->FreeDescWanted++; -+ kcondvar_wait (&rcvr->FreeDescSleep, &rcvr->FreeDescLock, &flags); -+ } -+ -+ rxd = list_entry (rcvr->FreeDescList.next, EP_RXD, Link); -+ -+ list_del (&rxd->Link); -+ -+ /* Wakeup the descriptor primer thread if there's not many left */ -+ low_on_rxds = (--rcvr->FreeDescCount < ep_rxd_lowat); -+ -+ spin_unlock_irqrestore (&rcvr->FreeDescLock, flags); -+ -+ if (low_on_rxds) -+ ep_kthread_schedule (&rcvr->Subsys->Thread, lbolt); -+ -+ return (rxd); -+} -+ -+static void -+FreeRxd (EP_RCVR *rcvr, EP_RXD *rxd) -+{ -+ unsigned long flags; -+ -+ ASSERT (EP_XID_INVALID(rxd->MsgXid)); -+ -+ spin_lock_irqsave (&rcvr->FreeDescLock, flags); -+ -+#if ! defined(CONFIG_EP_NO_CHECK_SUM) -+ ASSERT(list_empty(&rxd->CheckSumLink)); -+#endif -+ -+ list_add (&rxd->Link, &rcvr->FreeDescList); -+ -+ rcvr->FreeDescCount++; -+ -+ if (rcvr->FreeDescWanted) /* someone waiting for a receive */ -+ { /* descriptor, so wake them up */ -+ rcvr->FreeDescWanted--; -+ kcondvar_wakeupone (&rcvr->FreeDescSleep, &rcvr->FreeDescLock); -+ } -+ -+ spin_unlock_irqrestore (&rcvr->FreeDescLock, flags); -+} -+ -+int -+ep_queue_receive (EP_RCVR *rcvr, EP_RXH *handler, void *arg, EP_NMD *nmd, EP_ATTRIBUTE attr) -+{ -+ EP_RCVR_RAIL *rcvrRail; -+ EP_RXD *rxd; -+ int rnum; -+ unsigned long flags; -+ -+ if ((rxd = GetRxd (rcvr, attr)) == NULL) -+ return (ENOMEM); -+ -+ rxd->Handler = handler; -+ rxd->Arg = arg; -+ rxd->Data = *nmd; -+ rxd->RxdMain->Len = EP_RXD_PENDING; -+ -+ spin_lock_irqsave (&rcvr->Lock, flags); -+ -+ list_add_tail (&rxd->Link, &rcvr->ActiveDescList); -+ -+ if (EP_IS_PREFRAIL_SET(attr)) -+ rnum = EP_ATTR2PREFRAIL(attr); -+ else -+ rnum = ep_rcvr_prefrail (rcvr, EP_NMD_RAILMASK(nmd)); -+ -+ if (rnum < 0 || !(EP_NMD_RAILMASK(nmd) & EP_RAIL2RAILMASK(rnum) & rcvr->RailMask)) -+ rcvrRail = NULL; -+ else -+ rcvrRail = rcvr->Rails[rnum]; -+ -+ EPRINTF7 (DBG_RCVR,"ep_queue_receive: rxd=%p svc %d nmd=%08x,%d,%x rnum=%d rcvrRail=%p\n", -+ rxd, rcvr->Service, nmd->nmd_addr, nmd->nmd_len, nmd->nmd_attr, rnum, rcvrRail); -+ -+ rxd->State = EP_RXD_RECEIVE_ACTIVE; -+ -+ if (rcvrRail == NULL || !EP_RCVR_OP (rcvrRail, QueueRxd) (rxd, rcvrRail)) -+ { -+ rxd->State = EP_RXD_RECEIVE_UNBOUND; -+ -+ ep_kthread_schedule (&rcvr->Subsys->Thread, lbolt); -+ } -+ -+ spin_unlock_irqrestore (&rcvr->Lock, flags); -+ -+ return (ESUCCESS); -+} -+ -+void -+ep_requeue_receive (EP_RXD *rxd, EP_RXH *handler, void *arg, EP_NMD *nmd, EP_ATTRIBUTE attr) -+{ -+ EP_RCVR *rcvr = rxd->Rcvr; -+ EP_SYS *sys = rcvr->Subsys->Subsys.Sys; -+ int rnum = ep_pickRail(EP_NMD_RAILMASK(&rxd->Data)); -+ EP_RCVR_RAIL *rcvrRail; -+ unsigned long flags; -+ -+ ASSERT (rxd->RxdRail == NULL); -+ -+ EPRINTF5 (DBG_RCVR,"ep_requeue_receive: rxd=%p svc %d nmd=%08x,%d,%x\n", -+ rxd, rcvr->Service, nmd->nmd_addr, nmd->nmd_len, nmd->nmd_attr); -+ -+ rxd->Handler = handler; -+ rxd->Arg = arg; -+ rxd->Data = *nmd; -+ rxd->RxdMain->Len = EP_RXD_PENDING; -+ -+ spin_lock_irqsave (&rcvr->Lock, flags); -+ -+ list_add_tail (&rxd->Link, &rcvr->ActiveDescList); -+ -+ /* -+ * Rail selection: if they've asked for a particular rail, then use it, otherwise if -+ * the rail it was last received on is mapped for the nmd and is available -+ * then use that one, otherwise pick one that is mapped by the nmd. -+ */ -+ if (EP_IS_PREFRAIL_SET(attr)) -+ rnum = EP_ATTR2PREFRAIL(attr); -+ -+ if (rnum < 0 || ! (EP_RAIL2RAILMASK (rnum) & EP_NMD_RAILMASK(nmd) & ep_rcvr_availrails (rcvr))) -+ rnum = ep_rcvr_prefrail (rcvr, EP_NMD_RAILMASK(nmd)); -+ -+ if (rnum < 0) -+ rcvrRail = NULL; -+ else -+ { -+ rcvrRail = rcvr->Rails[rnum]; -+ -+ if (! (EP_NMD_RAILMASK(&rxd->Data) & EP_RAIL2RAILMASK(rnum)) && ep_nmd_map_rails (sys, &rxd->Data, EP_RAIL2RAILMASK(rnum)) < 0) -+ rcvrRail = NULL; -+ } -+ -+ rxd->State = EP_RXD_RECEIVE_ACTIVE; -+ -+ if (rcvrRail == NULL || !EP_RCVR_OP(rcvrRail, QueueRxd) (rxd, rcvrRail)) -+ { -+ EPRINTF1 (DBG_RCVR, "ep_requeue_receive: rcvrRail=%p - setting unbound\n", rcvrRail); -+ -+ rxd->State = EP_RXD_RECEIVE_UNBOUND; -+ -+ ep_kthread_schedule (&rcvr->Subsys->Thread, lbolt); -+ } -+ -+ if (rcvr->CleanupWaiting) -+ kcondvar_wakeupall (&rcvr->CleanupSleep, &rcvr->Lock); -+ rcvr->CleanupWaiting = 0; -+ -+ spin_unlock_irqrestore (&rcvr->Lock, flags); -+} -+ -+void -+ -+ep_complete_receive (EP_RXD *rxd) -+{ -+ EP_RCVR *rcvr = rxd->Rcvr; -+ unsigned long flags; -+ -+ ASSERT (rxd->RxdRail == NULL && rxd->State == EP_RXD_COMPLETED); -+ -+ FreeRxd (rcvr, rxd); -+ -+ /* if we're waiting for cleanup, then wake them up */ -+ spin_lock_irqsave (&rcvr->Lock, flags); -+ if (rcvr->CleanupWaiting) -+ kcondvar_wakeupall (&rcvr->CleanupSleep, &rcvr->Lock); -+ rcvr->CleanupWaiting = 0; -+ spin_unlock_irqrestore (&rcvr->Lock, flags); -+} -+ -+int -+ep_rpc_put (EP_RXD *rxd, EP_RXH *handler, void *arg, EP_NMD *local, EP_NMD *remote, int nFrags) -+{ -+ EP_RCVR *rcvr = rxd->Rcvr; -+ EP_SYS *sys = rcvr->Subsys->Subsys.Sys; -+ EP_ENVELOPE *env = &rxd->RxdMain->Envelope; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&rcvr->Lock, flags); -+ -+ if (rxd->State == EP_RXD_BEEN_ABORTED) -+ { -+ EPRINTF2 (DBG_RCVR, "ep_rpc_put: rcvr %p rxd %p completed because no rails available\n", rcvr, rxd); -+ -+ /* rxd no longer on active list - just free it */ -+ /* off and return an error */ -+ spin_unlock_irqrestore (&rcvr->Lock, flags); -+ -+ return EP_CONN_RESET; -+ } -+ else -+ { -+ EP_RXD_RAIL *rxdRail = rxd->RxdRail; -+ EP_RCVR_RAIL *rcvrRail = rxdRail->RcvrRail; -+ EP_COMMS_RAIL *commsRail = rcvrRail->CommsRail; -+ EP_RAIL *rail = commsRail->Rail; -+ EP_NODE_RAIL *nodeRail = &rail->Nodes[env->NodeId]; -+ int i; -+ -+ /* Attempt to ensure that the local nmds are mapped */ -+ for (i = 0; i < nFrags; i++) -+ if (! (EP_NMD_RAILMASK(&local[i]) & EP_RAIL2RAILMASK(rail->Number))) -+ ep_nmd_map_rails (sys, &local[i], EP_RAIL2RAILMASK(rail->Number)); -+ -+ if (nodeRail->State == EP_NODE_CONNECTED && /* rail is connected */ -+ (ep_nmd2railmask (local, nFrags) & ep_nmd2railmask (remote, nFrags) & EP_RAIL2RAILMASK (rail->Number))) /* and NMDs valid for it */ -+ { -+ rxd->State = EP_RXD_PUT_ACTIVE; -+ -+ EP_RCVR_OP(rcvrRail, RpcPut) (rxd, local, remote, nFrags); -+ } -+ else -+ { -+ /* RPC completion cannot progress - either node is no longer connected on this -+ * rail or some of the source/destination NMDs are not mapped on this rail. -+ * Save the NMDs into the RXD and schedule the thread to request mappings */ -+ EPRINTF4 (DBG_RCVR, "%s: ep_rpc_put: rcvr %p rxd %p %s\n", rail->Name, rcvr, rxd, -+ (nodeRail->State == EP_NODE_CONNECTED) ? "NMDs not valid on this rail" : "no longer connected on this rail"); -+ -+ rxd->State = EP_RXD_PUT_STALLED; -+ -+ if (nodeRail->State == EP_NODE_CONNECTED) -+ ep_kthread_schedule (&rcvr->Subsys->Thread, lbolt); -+ } -+ -+ /* install the handler */ -+ rxd->Handler = handler; -+ rxd->Arg = arg; -+ -+ /* store the arguements */ -+ rxd->nFrags = nFrags; -+ for (i = 0; i < nFrags; i++) -+ { -+ rxd->Local[i] = local[i]; -+ rxd->Remote[i] = remote[i]; -+ } -+ } -+ spin_unlock_irqrestore (&rcvr->Lock, flags); -+ -+ return EP_SUCCESS; -+} -+ -+int -+ep_rpc_get (EP_RXD *rxd, EP_RXH *handler, void *arg, EP_NMD *remote, EP_NMD *local, int nFrags) -+{ -+ EP_RCVR *rcvr = rxd->Rcvr; -+ EP_SYS *sys = rcvr->Subsys->Subsys.Sys; -+ EP_ENVELOPE *env = &rxd->RxdMain->Envelope; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&rcvr->Lock, flags); -+ -+ if (rxd->State == EP_RXD_BEEN_ABORTED) -+ { -+ EPRINTF2 (DBG_RCVR, "ep_rpc_get: rcvr %p rxd %p completed because no rails available\n", rcvr, rxd); -+ -+ spin_unlock_irqrestore (&rcvr->Lock, flags); -+ -+ return EP_CONN_RESET; -+ } -+ else -+ { -+ EP_RXD_RAIL *rxdRail = rxd->RxdRail; -+ EP_RCVR_RAIL *rcvrRail = rxdRail->RcvrRail; -+ EP_COMMS_RAIL *commsRail = rcvrRail->CommsRail; -+ EP_RAIL *rail = commsRail->Rail; -+ EP_NODE_RAIL *nodeRail = &rail->Nodes[env->NodeId]; -+ int i; -+ -+ /* Attempt to ensure that the local nmds are mapped */ -+ for (i = 0; i < nFrags; i++) -+ if (! (EP_NMD_RAILMASK(&local[i]) & EP_RAIL2RAILMASK(rail->Number))) -+ ep_nmd_map_rails (sys, &local[i], EP_RAIL2RAILMASK(rail->Number)); -+ -+ if (nodeRail->State == EP_NODE_CONNECTED && /* rail is connected */ -+ (ep_nmd2railmask (local, nFrags) & ep_nmd2railmask (remote, nFrags) & EP_RAIL2RAILMASK (rail->Number))) /* and NMDs valid for it */ -+ { -+ rxd->State = EP_RXD_GET_ACTIVE; -+ -+ EP_RCVR_OP (rcvrRail, RpcGet) (rxd, local, remote, nFrags); -+ } -+ else -+ { -+ /* RPC completion cannot progress - either node is no longer connected on this -+ * node or some of the source/destination NMDs are not mapped on this rail. -+ * Save the NMDs into the RXD and schedule the thread to request mappings */ -+ EPRINTF4 (DBG_RCVR, "%s: ep_rpc_get: rcvr %p rxd %p %s\n", rail->Name, rcvr, rxd, -+ (nodeRail->State == EP_NODE_CONNECTED) ? "NMDs not valid on this rail" : "no longer connected on this rail"); -+ -+ rxd->State = EP_RXD_GET_STALLED; -+ -+ if (nodeRail->State == EP_NODE_CONNECTED) -+ ep_kthread_schedule (&rcvr->Subsys->Thread, lbolt); -+ } -+ -+ /* install the handler */ -+ rxd->Handler = handler; -+ rxd->Arg = arg; -+ -+ /* store the arguements */ -+ rxd->nFrags = nFrags; -+ for (i = 0; i < nFrags; i++) -+ { -+ rxd->Local[i] = local[i]; -+ rxd->Remote[i] = remote[i]; -+ } -+ } -+ spin_unlock_irqrestore (&rcvr->Lock, flags); -+ -+ return EP_SUCCESS; -+} -+ -+int -+ep_complete_rpc (EP_RXD *rxd, EP_RXH *handler, void *arg, EP_STATUSBLK *blk, EP_NMD *local, EP_NMD *remote, int nFrags) -+{ -+ EP_RCVR *rcvr = rxd->Rcvr; -+ EP_SYS *sys = rcvr->Subsys->Subsys.Sys; -+ EP_ENVELOPE *env = &rxd->RxdMain->Envelope; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&rcvr->Lock, flags); -+ -+ if (rxd->State == EP_RXD_BEEN_ABORTED) -+ { -+ EPRINTF2 (DBG_RCVR, "ep_complete_rpc: rcvr %p rxd %p completed because no rails available\n", rcvr, rxd); -+ -+ spin_unlock_irqrestore (&rcvr->Lock, flags); -+ return EP_CONN_RESET; -+ } -+ else -+ { -+ EP_RXD_RAIL *rxdRail = rxd->RxdRail; -+ EP_RCVR_RAIL *rcvrRail = rxdRail->RcvrRail; -+ EP_COMMS_RAIL *commsRail = rcvrRail->CommsRail; -+ EP_RAIL *rail = commsRail->Rail; -+ EP_NODE_RAIL *nodeRail = &rail->Nodes[env->NodeId]; -+ int i; -+ -+ if (blk == NULL) -+ bzero (&rxd->RxdMain->StatusBlk, sizeof (EP_STATUSBLK)); -+ else -+ bcopy (blk, &rxd->RxdMain->StatusBlk, sizeof (EP_STATUSBLK)); -+ -+ /* Attempt to ensure that the local nmds are mapped */ -+ for (i = 0; i < nFrags; i++) -+ if (! (EP_NMD_RAILMASK(&local[i]) & EP_RAIL2RAILMASK(rail->Number))) -+ ep_nmd_map_rails (sys, &local[i], EP_RAIL2RAILMASK(rail->Number)); -+ -+ if (nodeRail->State == EP_NODE_CONNECTED && /* rail is connected */ -+ (ep_nmd2railmask (local, nFrags) & ep_nmd2railmask (remote, nFrags) & EP_RAIL2RAILMASK (rail->Number))) /* and NMDs valid for it */ -+ { -+ rxd->State = EP_RXD_COMPLETE_ACTIVE; -+ -+ EP_RCVR_OP (rcvrRail, RpcComplete) (rxd, local, remote, nFrags); -+ } -+ else -+ { -+ /* RPC completion cannot progress - either node is no longer connected on this -+ * node or some of the source/destination NMDs are not mapped on this rail. -+ * Save the NMDs into the RXD and schedule the thread to request mappings */ -+ EPRINTF4 (DBG_RCVR, "%s: ep_complete_rpc: rcvr %p rxd %p %s\n", rail->Name, rcvr, rxd, -+ (nodeRail->State == EP_NODE_CONNECTED) ? "NMDs not valid on this rail" : "no longer connected on this rail"); -+ -+ rxd->State = EP_RXD_COMPLETE_STALLED; -+ -+ if (nodeRail->State == EP_NODE_CONNECTED) -+ ep_kthread_schedule (&rcvr->Subsys->Thread, lbolt); -+ } -+ -+ /* install the handler */ -+ rxd->Handler = handler; -+ rxd->Arg = arg; -+ -+ /* store the arguements */ -+ rxd->nFrags = nFrags; -+ for (i = 0; i < nFrags; i++) -+ { -+ rxd->Local[i] = local[i]; -+ rxd->Remote[i] = remote[i]; -+ } -+ } -+ spin_unlock_irqrestore (&rcvr->Lock, flags); -+ -+ return (ESUCCESS); -+} -+ -+/* functions for accessing fields of rxds */ -+void *ep_rxd_arg(EP_RXD *rxd) { return (rxd->Arg); } -+int ep_rxd_len(EP_RXD *rxd) { return (rxd->RxdMain->Len); } -+EP_STATUS ep_rxd_status(EP_RXD *rxd) { return (rxd->RxdMain->Len < 0 ? rxd->RxdMain->Len : EP_SUCCESS); } -+int ep_rxd_isrpc(EP_RXD *rxd) { return (EP_IS_RPC(rxd->RxdMain->Envelope.Attr) != 0); } -+EP_ENVELOPE *ep_rxd_envelope(EP_RXD *rxd) { return (&rxd->RxdMain->Envelope); } -+EP_PAYLOAD *ep_rxd_payload(EP_RXD *rxd) { return (EP_HAS_PAYLOAD(rxd->RxdMain->Envelope.Attr) ? &rxd->RxdMain->Payload : NULL); } -+int ep_rxd_node(EP_RXD *rxd) { return (rxd->RxdMain->Envelope.NodeId); } -+EP_STATUSBLK *ep_rxd_statusblk(EP_RXD *rxd) { return (&rxd->RxdMain->StatusBlk); } -+EP_RAILMASK ep_rxd_railmask(EP_RXD *rxd) { return (rxd->Data.nmd_attr); } -+ -+static void -+ProcessNmdMapResponse (EP_RCVR *rcvr, EP_RXD *rxd, EP_MANAGER_MSG *msg) -+{ -+ EP_RXD_RAIL *rxdRail = rxd->RxdRail; -+ EP_RCVR_RAIL *rcvrRail = rxdRail->RcvrRail; -+ EP_RAIL *rail = rcvrRail->CommsRail->Rail; -+ EP_NODE_RAIL *nodeRail = &rail->Nodes[rxd->RxdMain->Envelope.NodeId]; -+ int i; -+ -+ ASSERT (msg->Body.MapNmd.nFrags == rxd->nFrags); -+ -+ for (i = 0; i < rxd->nFrags; i++) -+ rxd->Remote[i] = msg->Body.MapNmd.Nmd[i]; -+ -+ if (nodeRail->State == EP_NODE_CONNECTED && /* node is still connected on this rail */ -+ (ep_nmd2railmask (rxd->Local, rxd->nFrags) & ep_nmd2railmask (rxd->Remote, rxd->nFrags) & EP_RAIL2RAILMASK (rail->Number))) /* NMDs are now valid for this rail */ -+ { -+ switch (rxd->State) -+ { -+ case EP_RXD_PUT_STALLED: -+ rxd->State = EP_RXD_PUT_ACTIVE; -+ -+ EP_RCVR_OP(rcvrRail, RpcPut) (rxd, rxd->Local, rxd->Remote, rxd->nFrags); -+ break; -+ -+ case EP_RXD_GET_STALLED: -+ rxd->State = EP_RXD_GET_ACTIVE; -+ -+ EP_RCVR_OP(rcvrRail, RpcGet) (rxd, rxd->Local, rxd->Remote, rxd->nFrags); -+ break; -+ -+ case EP_RXD_COMPLETE_STALLED: -+ rxd->State = EP_RXD_COMPLETE_ACTIVE; -+ -+ EP_RCVR_OP(rcvrRail, RpcComplete) (rxd, rxd->Local, rxd->Remote, rxd->nFrags); -+ break; -+ -+ default: -+ panic ("ProcessNmdMapResponse: XID match but rxd in invalid state\n"); -+ break; -+ } -+ -+ rxd->NextRunTime = 0; -+ } -+ else -+ ep_debugf (DBG_MANAGER, "%s: ep_rcvr_xid_msg_handler: rcvr=%p rxd=%p - still cannot proceed\n", rail->Name, rcvr, rxd); -+} -+ -+static void -+ProcessFailoverResponse (EP_RCVR *rcvr, EP_RXD *rxd, EP_MANAGER_MSG *msg) -+{ -+ /* XXXX - TBD */ -+#ifdef NOTYET -+ EP_COMMS_SUBSYS *subsys = rcvr->Subsys; -+ EP_RXD_RAIL *rxdRail = rxd->RxdRail; -+ EP_RCVR_RAIL *rcvrRail = rxdRail->RcvrRail; -+ EP_RAIL *rail = rcvrRail->CommsRail->Rail; -+ EP_RCVR_RAIL *nRcvrRail; -+ EP_RXD_RAIL *nRxdRail; -+ -+ ASSERT (rxd->RxdMain->Envelope.Attr & EP_RPC); -+ -+ EPRINTF6 (DBG_RCVR, "ep_rcvr_xid_msg_handler: rcvr=%p rxd=%p Xid=%016llx state %x.%x - txd on rail %d\n", rcvr, rxd, -+ rxd->MsgXid.Unique, rxdRail->RxdMain->DataEvent, rxdRail->RxdMain->DoneEvent, msg->Body.FailoverTxd.Rail); -+ -+ if ((nRcvrRail = rcvr->Rails[msg->Body.FailoverTxd.Rail]) == NULL || -+ (nRcvrRail->Rcvr->RailMask & EP_RAIL2RAILMASK (rail->Number)) == NULL) -+ { -+ ep_debugf (DBG_MANAGER, "%s: ep_rcvr_xid_msg_handler: rcvr=%p rxd=%p - still cannot proceed\n", rail->Name, rcvr,rxd); -+ return; -+ } -+ -+ -+ nRxdRail = EP_RCVR_OP (nrcvrRail, GetRxd) (rcvr, nRcvrRail); -+ -+ -+ /* If the RPC was in progress, then rollback and mark it as flagged, -+ * this will then get treated as though the NMDs were not mapped -+ * for the rail when the user initiated the operation. -+ */ -+ switch (rxdRail->RxdMain->DataEvent) -+ { -+ case EP_EVENT_ACTIVE|EP_RXD_PHASE_PUT: -+ case EP_EVENT_FLAGGED|EP_RXD_PHASE_PUT: -+ ASSERT (rxdRail->RxdMain->DoneEvent == EP_EVENT_PRIVATE || -+ rxdRail->RxdMain->DoneEvent == EP_EVENT_PENDING); -+ -+ nRxdRail->RxdMain->DataEvent = EP_EVENT_FLAGGED|EP_RXD_PHASE_PUT; -+ nRxdRail->RxdMain->DoneEvent = EP_EVENT_PENDING; -+ break; -+ -+ case EP_EVENT_ACTIVE|EP_RXD_PHASE_GET: -+ case EP_EVENT_FLAGGED|EP_RXD_PHASE_GET: -+ ASSERT (rxdRail->RxdMain->DoneEvent == EP_EVENT_PRIVATE || -+ rxdRail->RxdMain->DoneEvent == EP_EVENT_PENDING); -+ -+ nRxdRail->RxdMain->DataEvent = EP_EVENT_FLAGGED|EP_RXD_PHASE_GET; -+ nRxdRail->RxdMain->DoneEvent = EP_EVENT_PENDING; -+ break; -+ -+ case EP_EVENT_PRIVATE: -+ switch (rxdRail->RxdMain->DoneEvent) -+ { -+ case EP_EVENT_ACTIVE|EP_RXD_PHASE_COMPLETE: -+ case EP_EVENT_FLAGGED|EP_RXD_PHASE_COMPLETE: -+ nRxdRail->RxdMain->DataEvent = EP_EVENT_PRIVATE; -+ nRxdRail->RxdMain->DoneEvent = EP_EVENT_FLAGGED|EP_RXD_PHASE_COMPLETE; -+ break; -+ -+ case EP_EVENT_PENDING: -+ break; -+ -+ default: -+ panic ("ep_rcvr_xid_msg_handler: rxd in invalid state\n"); -+ } -+ break; -+ -+ default: -+ panic ("ep_rcvr_xid_msg_handler: rxd in invalid staten"); -+ } -+ -+ UnbindRxdFromRail (rxd, rxdRail); -+ -+ /* Mark rxdRail as no longer active */ -+ rxdRail->RxdMain->DataEvent = EP_EVENT_PRIVATE; -+ rxdRail->RxdMain->DoneEvent = EP_EVENT_PRIVATE; -+ -+ sdram_writel (rail->Device, rxdRail->RxdElan + offsetof (EP_RXD_RAIL_ELAN, DataEvent.ev_Count), 0); -+ sdram_writel (rail->Device, rxdRail->RxdElan + offsetof (EP_RXD_RAIL_ELAN, DoneEvent.ev_Count), 0); -+ -+ FreeRxdRail (rcvrRail, rxdRail); -+ -+ BindRxdToRail (rxd, nRxdRail); -+ -+ ep_kthread_schedule (&subsys->Thread, lbolt); -+#endif -+} -+ -+void -+ep_rcvr_xid_msg_handler (void *arg, EP_MANAGER_MSG *msg) -+{ -+ EP_RCVR *rcvr = (EP_RCVR *) arg; -+ struct list_head *el; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&rcvr->Lock, flags); -+ list_for_each (el, &rcvr->ActiveDescList) { -+ EP_RXD *rxd = list_entry (el,EP_RXD, Link); -+ -+ if (EP_XIDS_MATCH (msg->Hdr.Xid, rxd->MsgXid)) -+ { -+ EP_INVALIDATE_XID (rxd->MsgXid); -+ -+ switch (msg->Hdr.Type) -+ { -+ case EP_MANAGER_MSG_TYPE_MAP_NMD_RESPONSE: -+ ProcessNmdMapResponse (rcvr, rxd, msg); -+ break; -+ -+ case EP_MANAGER_MSG_TYPE_FAILOVER_RESPONSE: -+ ProcessFailoverResponse (rcvr, rxd, msg); -+ break; -+ -+ default: -+ panic ("ep_rcvr_xid_msg_handler: XID match but invalid message type\n"); -+ } -+ } -+ } -+ spin_unlock_irqrestore (&rcvr->Lock, flags); -+} -+ -+ -+EP_RCVR * -+ep_alloc_rcvr (EP_SYS *sys, EP_SERVICE svc, unsigned int nenvs) -+{ -+ EP_COMMS_SUBSYS *subsys; -+ EP_RCVR *rcvr; -+ struct list_head *el; -+ extern int portals_envelopes; -+ -+ if (portals_envelopes && (svc == EP_MSG_SVC_PORTALS_SMALL || svc == EP_MSG_SVC_PORTALS_LARGE)) -+ { -+ printk ("ep: use %d envelopes rather than %d for portals %s message service\n", sys->Position.pos_nodes * 16, nenvs, -+ svc == EP_MSG_SVC_PORTALS_SMALL ? "small" : "large"); -+ -+ nenvs = portals_envelopes; -+ } -+ -+ if ((subsys = (EP_COMMS_SUBSYS *) ep_subsys_find (sys, EPCOMMS_SUBSYS_NAME)) == NULL) -+ return (NULL); -+ -+ KMEM_ZALLOC (rcvr, EP_RCVR *, sizeof (EP_RCVR), 1); -+ -+ if (rcvr == NULL) -+ return (NULL); -+ -+ rcvr->Subsys = subsys; -+ rcvr->Service = svc; -+ rcvr->InputQueueEntries = nenvs; -+ rcvr->FreeDescCount = 0; -+ rcvr->TotalDescCount = 0; -+ rcvr->ForwardRxdCount = 0; -+ -+ spin_lock_init (&rcvr->Lock); -+ INIT_LIST_HEAD (&rcvr->ActiveDescList); -+ -+ kcondvar_init (&rcvr->CleanupSleep); -+ kcondvar_init (&rcvr->FreeDescSleep); -+ spin_lock_init (&rcvr->FreeDescLock); -+ INIT_LIST_HEAD (&rcvr->FreeDescList); -+ INIT_LIST_HEAD (&rcvr->DescBlockList); -+ -+ ep_xid_cache_init (sys, &rcvr->XidCache); -+ -+ rcvr->XidCache.MessageHandler = ep_rcvr_xid_msg_handler; -+ rcvr->XidCache.Arg = rcvr; -+ -+ kmutex_lock (&subsys->Lock); -+ /* See if this service is already in use */ -+ list_for_each (el, &subsys->Receivers) { -+ EP_RCVR *rcvr = list_entry (el, EP_RCVR, Link); -+ -+ if (rcvr->Service == svc) -+ { -+ KMEM_FREE (rcvr, sizeof (EP_RCVR)); -+ kmutex_unlock (&subsys->Lock); -+ return NULL; -+ } -+ } -+ -+ -+ list_add_tail (&rcvr->Link, &subsys->Receivers); -+ -+ ep_procfs_rcvr_add(rcvr); -+ -+ /* Now add all rails which are already started */ -+ list_for_each (el, &subsys->Rails) { -+ EP_COMMS_RAIL *commsRail = list_entry (el, EP_COMMS_RAIL, Link); -+ -+ EP_RAIL_OP (commsRail, Rcvr.AddRail) (rcvr, commsRail); -+ } -+ kmutex_unlock (&subsys->Lock); -+ -+ ep_mod_inc_usecount(); -+ -+ return (rcvr); -+} -+ -+void -+ep_free_rcvr (EP_RCVR *rcvr) -+{ -+ EP_COMMS_SUBSYS *subsys = rcvr->Subsys; -+ EP_SYS *sys = subsys->Subsys.Sys; -+ struct list_head list; -+ struct list_head *el,*nel; -+ unsigned long flags; -+ -+ kmutex_lock (&subsys->Lock); -+ list_for_each (el, &subsys->Rails) { -+ EP_COMMS_RAIL *commsRail = list_entry (el, EP_COMMS_RAIL, Link); -+ -+ EP_RAIL_OP (commsRail, Rcvr.DelRail) (rcvr, commsRail); -+ } -+ -+ ep_procfs_rcvr_del(rcvr); -+ -+ list_del (&rcvr->Link); -+ kmutex_unlock (&subsys->Lock); -+ -+ INIT_LIST_HEAD (&list); -+ -+ /* abort all rxds - should not be bound to a rail */ -+ spin_lock_irqsave (&rcvr->Lock, flags); -+ for (;;) -+ { -+ if (! list_empty (&rcvr->ActiveDescList)) -+ { -+ list_for_each_safe (el, nel, &rcvr->ActiveDescList) { -+ EP_RXD *rxd = list_entry (el, EP_RXD, Link); -+ -+ ASSERT (rxd->RxdRail == NULL); -+ ASSERT (rxd->RxdMain->Len == EP_RXD_PENDING); -+ -+ rxd->State = EP_RXD_COMPLETED; -+ rxd->RxdMain->Len = EP_SHUTDOWN; -+ -+ list_del (&rxd->Link); -+ list_add_tail (&rxd->Link, &list); -+ } -+ spin_unlock_irqrestore (&rcvr->Lock, flags); -+ -+ while (! list_empty (&list)) -+ { -+ EP_RXD *rxd = list_entry (list.next, EP_RXD, Link); -+ -+ list_del (&rxd->Link); -+ -+ if (rxd->Handler) -+ rxd->Handler (rxd); -+ } -+ spin_lock_irqsave (&rcvr->Lock, flags); -+ continue; -+ } -+ -+ if (rcvr->FreeDescCount == rcvr->TotalDescCount) -+ break; -+ -+ rcvr->CleanupWaiting++; -+ kcondvar_wait (&rcvr->CleanupSleep, &rcvr->Lock, &flags); -+ } -+ spin_unlock_irqrestore (&rcvr->Lock, flags); -+ -+ /* must all be in free list */ -+ ASSERT( rcvr->FreeDescCount == rcvr->TotalDescCount); -+ -+ while (! list_empty(& rcvr->DescBlockList) ) -+ FreeRxdBlock (rcvr, list_entry (rcvr->DescBlockList.next, EP_RXD_BLOCK, Link)); -+ -+ /* had better be all gone now */ -+ ASSERT((rcvr->FreeDescCount == 0) && (rcvr->TotalDescCount == 0)); -+ -+ ep_xid_cache_destroy (sys, &rcvr->XidCache); -+ -+ spin_lock_destroy (&rcvr->Lock); -+ KMEM_FREE (rcvr, sizeof (EP_RCVR)); -+ -+ ep_mod_dec_usecount(); -+} -+ -+EP_RXD * -+StealRxdFromOtherRail (EP_RCVR *rcvr) -+{ -+ EP_RXD *rxd; -+ int i; -+ -+ /* looking at the the rcvr railmask to find a rail to try to steal rxd from */ -+ for (i = 0; i < EP_MAX_RAILS; i++) -+ if (rcvr->RailMask & (1 << i) ) -+ if ((rxd = EP_RCVR_OP (rcvr->Rails[i], StealRxd) (rcvr->Rails[i])) != NULL) -+ return rxd; -+ -+ return NULL; -+} -+ -+long -+CheckUnboundRxd (EP_RCVR *rcvr, EP_RXD *rxd, long nextRunTime) -+{ -+ EP_SYS *sys = rcvr->Subsys->Subsys.Sys; -+ EP_RCVR_RAIL *rcvrRail; -+ int rnum; -+ -+ if ((rnum = ep_rcvr_prefrail (rcvr, EP_NMD_RAILMASK(&rxd->Data))) < 0) -+ rnum = ep_rcvr_prefrail (rcvr, ep_rcvr_availrails (rcvr)); -+ -+ if ( rnum < 0 ) { -+ if (nextRunTime == 0 || AFTER (nextRunTime, lbolt + RESOURCE_RETRY_TIME)) -+ nextRunTime = lbolt + RESOURCE_RETRY_TIME; -+ -+ return (nextRunTime); -+ } -+ -+ ASSERT ( rnum >= 0 ); -+ -+ rcvrRail = rcvr->Rails[rnum]; -+ -+ ASSERT ( rcvrRail != NULL); -+ -+ rxd->State = EP_RXD_RECEIVE_ACTIVE; -+ -+ if ((!(EP_NMD_RAILMASK (&rxd->Data) & EP_RAIL2RAILMASK(rnum)) && /* not mapped already and */ -+ ep_nmd_map_rails (sys, &rxd->Data, EP_RAIL2RAILMASK(rnum)) == 0) || /* failed mapping, or */ -+ !EP_RCVR_OP (rcvrRail, QueueRxd) (rxd, rcvrRail)) /* failed to queue */ -+ { -+ ASSERT (rxd->RxdRail == NULL); -+ -+ EPRINTF4 (DBG_RCVR,"CheckUnboundRxd: rcvr=%p rxd=%p -> rnum=%d rcvrRail=%p (failed)\n", rcvr, rxd, rnum, rcvrRail); -+ -+ rxd->State = EP_RXD_RECEIVE_UNBOUND; -+ -+ if (nextRunTime == 0 || AFTER (nextRunTime, lbolt + RESOURCE_RETRY_TIME)) -+ nextRunTime = lbolt + RESOURCE_RETRY_TIME; -+ } -+ -+ return (nextRunTime); -+} -+ -+int -+CheckRxdNmdsMapped (EP_RCVR *rcvr, EP_RXD *rxd) -+{ -+ EP_RXD_RAIL *rxdRail = rxd->RxdRail; -+ EP_RXD_MAIN *rxdMain = rxd->RxdMain; -+ EP_ENVELOPE *env = &rxdMain->Envelope; -+ EP_SYS *sys = rcvr->Subsys->Subsys.Sys; -+ EP_RAIL *rail = rxdRail->RcvrRail->CommsRail->Rail; -+ int i; -+ -+ /* Try and map the local NMDs before checking to see if we can proceed */ -+ if (! (ep_nmd2railmask (rxd->Local, rxd->nFrags) & EP_RAIL2RAILMASK (rail->Number))) -+ { -+ EPRINTF3 (DBG_MAPNMD, "%s: rcvr=%p rxd=%p RPC Local NMDs not mapped\n", rail->Name, rcvr, rxd); -+ -+ for (i = 0; i < rxd->nFrags; i++) -+ if (! (EP_NMD_RAILMASK(&rxd->Local[i]) & EP_RAIL2RAILMASK(rail->Number))) -+ if (ep_nmd_map_rails (sys, &rxd->Local[i], EP_RAIL2RAILMASK(rail->Number))) -+ rxd->NextRunTime = lbolt + RESOURCE_RETRY_TIME; -+ } -+ -+ /* Try and map remote NMDs if they are not valid for this rail */ -+ if (! (ep_nmd2railmask (rxd->Remote, rxd->nFrags) & EP_RAIL2RAILMASK (rail->Number))) -+ { -+ EP_MANAGER_MSG_BODY msgBody; -+ -+ EPRINTF3 (DBG_MAPNMD, "%s: rcvr=%p rxd=%p RPC Remote NMDs not mapped\n", rail->Name, rcvr, rxd); -+ -+ if (EP_XID_INVALID(rxd->MsgXid)) -+ rxd->MsgXid = ep_xid_cache_alloc (sys, &rcvr->XidCache); -+ -+ msgBody.MapNmd.nFrags = rxd->nFrags; -+ msgBody.MapNmd.Railmask = EP_RAIL2RAILMASK (rail->Number); -+ for (i = 0; i < rxd->nFrags; i++) -+ msgBody.MapNmd.Nmd[i] = rxd->Remote[i]; -+ -+ if (ep_send_message (rail, env->NodeId, EP_MANAGER_MSG_TYPE_MAP_NMD_REQUEST, rxd->MsgXid, &msgBody) == 0) -+ rxd->NextRunTime = lbolt + MESSAGE_RETRY_TIME; -+ else -+ rxd->NextRunTime = lbolt + MSGBUSY_RETRY_TIME; -+ -+ return 0; -+ } -+ -+ if ((ep_nmd2railmask (rxd->Local, rxd->nFrags) & ep_nmd2railmask (rxd->Remote, rxd->nFrags) & EP_RAIL2RAILMASK (rail->Number)) != 0) -+ { -+ rxd->NextRunTime = 0; -+ return 1; -+ } -+ -+ return 0; -+} -+ -+long -+ep_check_rcvr (EP_RCVR *rcvr, long nextRunTime) -+{ -+ struct list_head *el, *nel; -+ unsigned long flags; -+ int i; -+ -+ /* Check to see if we're low on rxds */ -+ if (rcvr->FreeDescCount < ep_rxd_lowat) -+ AllocateRxdBlock (rcvr, 0, NULL); -+ -+ for (i = 0; i < EP_MAX_RAILS; i++) -+ if (rcvr->RailMask & (1 << i) ) -+ nextRunTime = EP_RCVR_OP (rcvr->Rails[i], Check) (rcvr->Rails[i], nextRunTime); -+ -+ /* See if we have any rxd's which need to be handled */ -+ spin_lock_irqsave (&rcvr->Lock, flags); -+ list_for_each_safe (el, nel, &rcvr->ActiveDescList) { -+ EP_RXD *rxd = list_entry (el, EP_RXD, Link); -+ EP_RXD_MAIN *rxdMain = rxd->RxdMain; -+ EP_ENVELOPE *env = &rxdMain->Envelope; -+ EP_RXD_RAIL *rxdRail = rxd->RxdRail; -+ -+ if (rxdRail == NULL) -+ nextRunTime = CheckUnboundRxd (rcvr, rxd, nextRunTime); -+ else -+ { -+ EP_RCVR_RAIL *rcvrRail = rxdRail->RcvrRail; -+ EP_RAIL *rail = rcvrRail->CommsRail->Rail; -+ -+ if (rxd->RxdMain->Len == EP_RXD_PENDING || /* envelope not received yet */ -+ rail->Nodes[env->NodeId].State != EP_NODE_CONNECTED) /* will be failing over */ -+ continue; -+ -+ switch (rxd->State) -+ { -+ case EP_RXD_PUT_STALLED: -+ if (CheckRxdNmdsMapped (rcvr, rxd)) -+ { -+ rxd->State = EP_RXD_PUT_ACTIVE; -+ -+ EP_RCVR_OP (rcvrRail, RpcPut) (rxd, rxd->Local, rxd->Remote, rxd->nFrags); -+ } -+ break; -+ -+ case EP_RXD_GET_STALLED: -+ if (CheckRxdNmdsMapped (rcvr, rxd)) -+ { -+ rxd->State = EP_RXD_GET_ACTIVE; -+ -+ EP_RCVR_OP (rcvrRail, RpcGet) (rxd, rxd->Local, rxd->Remote, rxd->nFrags); -+ } -+ break; -+ -+ case EP_RXD_COMPLETE_STALLED: -+ if (CheckRxdNmdsMapped (rcvr, rxd)) -+ { -+ rxd->State = EP_RXD_COMPLETE_ACTIVE; -+ -+ EP_RCVR_OP (rcvrRail, RpcComplete)(rxd, rxd->Local, rxd->Remote, rxd->nFrags); -+ } -+ break; -+ } -+ -+ if (rxd->NextRunTime && (nextRunTime == 0 || AFTER (nextRunTime, rxd->NextRunTime))) -+ nextRunTime = rxd->NextRunTime; -+ } -+ } -+ spin_unlock_irqrestore (&rcvr->Lock, flags); -+ -+ return (nextRunTime); -+} -+ -+void -+ep_display_rxd (DisplayInfo *di, EP_RXD *rxd) -+{ -+ EP_RXD_MAIN *rxdMain = rxd->RxdMain; -+ EP_ENVELOPE *env = &rxdMain->Envelope; -+ EP_RXD_RAIL *rxdRail = rxd->RxdRail; -+ -+ (di->func)(di->arg, " RXD: %p State=%x RxdMain=%p(%x.%x.%x) Data=%x.%x.%x %s\n", rxd, -+ rxd->State, rxd->RxdMain, rxd->NmdMain.nmd_addr, rxd->NmdMain.nmd_len, -+ rxd->NmdMain.nmd_attr, rxd->Data.nmd_addr, rxd->Data.nmd_len, rxd->Data.nmd_attr, -+ rxd->RxdMain->Len == EP_RXD_PENDING ? "Pending" : "Active"); -+ (di->func)(di->arg, " NodeId=%d Range=%d.%d TxdRail=%x TxdMain=%x.%x.%x nFrags=%d XID=%08x.%08x.%016llx\n", -+ env->NodeId, EP_RANGE_LOW(env->Range), EP_RANGE_HIGH(env->Range), env->TxdRail, env->TxdMain.nmd_addr, -+ env->TxdMain.nmd_len, env->TxdMain.nmd_attr, env->nFrags, env->Xid.Generation, env->Xid.Handle, env->Xid.Unique);; -+ (di->func)(di->arg, " Frag[0] %08x.%08x.%08x\n", env->Frags[0].nmd_addr, env->Frags[0].nmd_len, env->Frags[0].nmd_attr); -+ (di->func)(di->arg, " Frag[1] %08x.%08x.%08x\n", env->Frags[1].nmd_addr, env->Frags[1].nmd_len, env->Frags[1].nmd_attr); -+ (di->func)(di->arg, " Frag[2] %08x.%08x.%08x\n", env->Frags[2].nmd_addr, env->Frags[2].nmd_len, env->Frags[2].nmd_attr); -+ (di->func)(di->arg, " Frag[3] %08x.%08x.%08x\n", env->Frags[3].nmd_addr, env->Frags[3].nmd_len, env->Frags[3].nmd_attr); -+ -+ if (rxdRail) EP_RCVR_OP (rxdRail->RcvrRail, DisplayRxd) (di, rxdRail); -+} -+ -+void -+ep_display_rcvr (DisplayInfo *di, EP_RCVR *rcvr, int full) -+{ -+ int freeCount = 0; -+ int activeCount = 0; -+ int pendingCount = 0; -+ int railCounts[EP_MAX_RAILS]; -+ struct list_head *el; -+ int i; -+ unsigned long flags; -+ -+ for (i = 0; i FreeDescLock, flags); -+ list_for_each (el, &rcvr->FreeDescList) -+ freeCount++; -+ spin_unlock_irqrestore (&rcvr->FreeDescLock, flags); -+ -+ spin_lock_irqsave (&rcvr->Lock, flags); -+ list_for_each (el, &rcvr->ActiveDescList) { -+ EP_RXD *rxd = list_entry (el, EP_RXD, Link); -+ EP_RXD_RAIL *rxdRail = rxd->RxdRail; -+ -+ if (rxd->RxdMain->Len == EP_RXD_PENDING) -+ pendingCount++; -+ else -+ activeCount++; -+ -+ if (rxdRail) -+ railCounts[rxdRail->RcvrRail->CommsRail->Rail->Number]++; -+ } -+ -+ (di->func)(di->arg, "RCVR: rcvr=%p number=%d\n", rcvr, rcvr->Service); -+ (di->func)(di->arg, " RXDS Free=%d (%d) Pending=%d Active=%d Rails=%d.%d.%d.%d\n", -+ freeCount, rcvr->FreeDescCount, pendingCount, activeCount, railCounts[0], railCounts[1], -+ railCounts[2], railCounts[3]); -+ -+ for (i = 0; i < EP_MAX_RAILS; i++) -+ if (rcvr->Rails[i] != NULL) -+ EP_RCVR_OP (rcvr->Rails[i], DisplayRcvr) (di, rcvr->Rails[i]); -+ -+ list_for_each (el, &rcvr->ActiveDescList) { -+ EP_RXD *rxd = list_entry (el, EP_RXD, Link); -+ -+ if (rxd->RxdMain->Len != EP_RXD_PENDING || full) -+ ep_display_rxd (di, rxd); -+ } -+ spin_unlock_irqrestore (&rcvr->Lock, flags); -+} -+ -+void -+ep_rxd_received_now(EP_RXD *rxd) -+{ -+ EP_ENVELOPE *env = &rxd->RxdMain->Envelope; -+ EP_RCVR *rcvr = rxd->Rcvr; -+ unsigned long flags; -+ -+ INC_STAT(rcvr->stats,rx); -+ ADD_STAT(rcvr->stats,rx_len, rxd->RxdMain->Len); -+ -+ if (rxd->RxdMain->Len < 0 || !EP_IS_MULTICAST(env->Attr)) -+ { -+ rxd->Handler (rxd); -+ } -+ else -+ { -+ EPRINTF5 (DBG_RCVR, "ep_rxd_received: forward rxd=%p Data=%08x.%08x.%08x len=%d\n", rxd, -+ rxd->Data.nmd_addr, rxd->Data.nmd_len, rxd->Data.nmd_attr, ep_rxd_len(rxd)); -+ -+ spin_lock_irqsave (&rcvr->Subsys->ForwardDescLock, flags); -+ list_add_tail (&rxd->Link, &rcvr->Subsys->ForwardDescList); -+ spin_unlock_irqrestore (&rcvr->Subsys->ForwardDescLock, flags); -+ -+ ep_kthread_schedule (&rcvr->Subsys->Thread, lbolt); -+ } -+} -+ -+#if defined(CONFIG_EP_NO_CHECK_SUM) -+void -+ep_rxd_received(EP_RXD *rxd) -+{ -+ ep_rxd_received_now(rxd); -+} -+ -+#else -+ -+void -+ep_rxd_received(EP_RXD *rxd) -+{ -+ EP_ENVELOPE *env = &rxd->RxdMain->Envelope; -+ -+ if (env->CheckSum) -+ ep_rxd_queue_csum(rxd); -+ else -+ ep_rxd_received_now(rxd); -+} -+ -+void -+ep_rxd_queue_csum(EP_RXD *rxd) -+{ -+ EP_RCVR *rcvr = rxd->Rcvr; -+ unsigned long flags; -+ -+ EPRINTF5 (DBG_RCVR, "ep_rxd_queue_csum: rxd=%p Data=%08x.%08x.%08x len=%d\n", rxd, -+ rxd->Data.nmd_addr, rxd->Data.nmd_len, rxd->Data.nmd_attr, ep_rxd_len(rxd)); -+ -+ spin_lock_irqsave (&rcvr->Subsys->CheckSumDescLock, flags); -+ list_add_tail (&rxd->CheckSumLink, &rcvr->Subsys->CheckSumDescList); -+ spin_unlock_irqrestore (&rcvr->Subsys->CheckSumDescLock, flags); -+ -+ ep_kthread_schedule (&rcvr->Subsys->Thread, lbolt); -+} -+#endif -+ -+void -+ep_rcvr_fillout_stats(EP_RCVR *rcvr, char *str) -+{ -+ sprintf(str+strlen(str),"Rx %lu %lu /sec\n", GET_STAT_TOTAL(rcvr->stats,rx), GET_STAT_PER_SEC(rcvr->stats,rx) ); -+ sprintf(str+strlen(str),"MBytes %lu %lu Mbytes/sec\n", GET_STAT_TOTAL(rcvr->stats,rx_len) / (1024*1024), GET_STAT_PER_SEC(rcvr->stats,rx_len) / (1024*1024)); -+} -+ -+void -+ep_rcvr_rail_fillout_stats(EP_RCVR_RAIL *rcvr_rail, char *str) -+{ -+ sprintf(str+strlen(str),"Rx %lu %lu /sec\n", GET_STAT_TOTAL(rcvr_rail->stats,rx), GET_STAT_PER_SEC(rcvr_rail->stats,rx) ); -+ sprintf(str+strlen(str),"MBytes %lu %lu Mbytes/sec\n", GET_STAT_TOTAL(rcvr_rail->stats,rx_len) / (1024*1024), GET_STAT_PER_SEC(rcvr_rail->stats,rx_len) / (1024*1024)); -+} -+ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/ep/epcommsRx_elan3.c linux-2.6.9/drivers/net/qsnet/ep/epcommsRx_elan3.c ---- clean/drivers/net/qsnet/ep/epcommsRx_elan3.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/epcommsRx_elan3.c 2005-03-10 10:25:52.000000000 -0500 -@@ -0,0 +1,1776 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: epcommsRx_elan3.c,v 1.24 2005/03/10 15:25:52 mike Exp $" -+/* $Source: /cvs/master/quadrics/epmod/epcommsRx_elan3.c,v $ */ -+ -+#include -+ -+#include -+#include -+#include -+ -+#include "kcomm_vp.h" -+#include "kcomm_elan3.h" -+#include "epcomms_elan3.h" -+#include "debug.h" -+ -+#define RCVR_TO_RAIL(rcvrRail) ((EP3_RAIL *) ((EP_RCVR_RAIL *) rcvrRail)->CommsRail->Rail) -+#define RCVR_TO_DEV(rcvrRail) (RCVR_TO_RAIL(rcvrRail)->Device) -+#define RCVR_TO_SUBSYS(rcvrRail) (((EP_RCVR_RAIL *) rcvrRail)->Rcvr->Subsys) -+ -+static void RxDataEvent (EP3_RAIL *rail, void *arg); -+static void RxDataRetry (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma, int status); -+static void RxDataVerify (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma); -+ -+static EP3_COOKIE_OPS RxDataCookieOps = -+{ -+ RxDataEvent, -+ RxDataRetry, -+ NULL, /* DmaCancelled */ -+ RxDataVerify, -+}; -+ -+static void RxDoneEvent (EP3_RAIL *rail, void *arg); -+static void RxDoneRetry (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma, int status); -+static void RxDoneVerify (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma); -+ -+static EP3_COOKIE_OPS RxDoneCookieOps = -+{ -+ RxDoneEvent, -+ RxDoneRetry, -+ NULL, /* DmaCancelled */ -+ RxDoneVerify, -+}; -+ -+static int -+AllocateRxdRailBlock (EP3_RCVR_RAIL *rcvrRail) -+{ -+ EP3_RAIL *rail = RCVR_TO_RAIL(rcvrRail); -+ ELAN3_DEV *dev = rail->Device; -+ EP3_RXD_RAIL_BLOCK *blk; -+ EP3_RXD_RAIL *rxdRail; -+ sdramaddr_t pRxdElan; -+ EP3_RXD_RAIL_MAIN *pRxdMain; -+ E3_Addr pRxdElanAddr; -+ E3_Addr pRxdMainAddr; -+ E3_BlockCopyEvent event; -+ int i, j; -+ unsigned long flags; -+ -+ KMEM_ZALLOC (blk, EP3_RXD_RAIL_BLOCK *, sizeof (EP3_RXD_RAIL_BLOCK), 1); -+ if (blk == NULL) -+ return 0; -+ -+ if ((pRxdElan = ep_alloc_elan (&rail->Generic, EP3_RXD_RAIL_ELAN_SIZE * EP3_NUM_RXD_PER_BLOCK, 0, &pRxdElanAddr)) == (sdramaddr_t) 0) -+ { -+ KMEM_FREE (blk, sizeof (EP3_RXD_RAIL_BLOCK)); -+ return 0; -+ } -+ -+ if ((pRxdMain = ep_alloc_main (&rail->Generic, EP3_RXD_RAIL_MAIN_SIZE * EP3_NUM_RXD_PER_BLOCK, 0, &pRxdMainAddr)) == (sdramaddr_t) 0) -+ { -+ ep_free_elan (&rail->Generic, pRxdElanAddr, EP3_RXD_RAIL_ELAN_SIZE * EP3_NUM_RXD_PER_BLOCK); -+ KMEM_FREE (blk, sizeof (EP3_RXD_RAIL_BLOCK)); -+ return 0; -+ } -+ -+ if (ReserveDmaRetries (rail, EP3_NUM_RXD_PER_BLOCK, 0) != ESUCCESS) -+ { -+ ep_free_main (&rail->Generic, pRxdMainAddr, EP3_RXD_RAIL_MAIN_SIZE * EP3_NUM_RXD_PER_BLOCK); -+ ep_free_elan (&rail->Generic, pRxdElanAddr, EP3_RXD_RAIL_ELAN_SIZE * EP3_NUM_RXD_PER_BLOCK); -+ KMEM_FREE (blk, sizeof (EP3_RXD_RAIL_BLOCK)); -+ return 0; -+ } -+ -+ for (rxdRail = &blk->Rxd[0], i = 0; i < EP3_NUM_RXD_PER_BLOCK; i++, rxdRail++) -+ { -+ rxdRail->Generic.RcvrRail = (EP_RCVR_RAIL *) rcvrRail; -+ rxdRail->RxdElan = pRxdElan; -+ rxdRail->RxdElanAddr = pRxdElanAddr; -+ rxdRail->RxdMain = pRxdMain; -+ rxdRail->RxdMainAddr = pRxdMainAddr; -+ -+ elan3_sdram_writel (dev, pRxdElan + offsetof (EP3_RXD_RAIL_ELAN, RxdMain), 0); -+ elan3_sdram_writel (dev, pRxdElan + offsetof (EP3_RXD_RAIL_ELAN, Next), 0); -+ elan3_sdram_writeq (dev, pRxdElan + offsetof (EP3_RXD_RAIL_ELAN, MainAddr), (long) rxdRail); -+ -+ for (j = 0; j < EP_MAXFRAG; j++) -+ { -+ RegisterCookie (&rail->CookieTable, &rxdRail->ChainCookie[j], pRxdElanAddr + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[j]), &RxDataCookieOps, (void *) rxdRail); -+ -+ event.ev_Type = EV_TYPE_DMA | (pRxdElanAddr + offsetof (EP3_RXD_RAIL_ELAN, Dmas[j+1])); -+ event.ev_Count = 0; -+ -+ elan3_sdram_copyl_to_sdram (dev, &event, pRxdElan + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[j]), sizeof (E3_BlockCopyEvent)); -+ } -+ -+ RegisterCookie (&rail->CookieTable, &rxdRail->DataCookie, pRxdElanAddr + offsetof (EP3_RXD_RAIL_ELAN, DataEvent), &RxDataCookieOps, (void *) rxdRail); -+ RegisterCookie (&rail->CookieTable, &rxdRail->DoneCookie, pRxdElanAddr + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent), &RxDoneCookieOps, (void *) rxdRail); -+ -+ EP3_INIT_COPY_EVENT (event, rxdRail->DataCookie, pRxdMainAddr + offsetof (EP3_RXD_RAIL_MAIN, DataEvent), 1); -+ elan3_sdram_copyl_to_sdram (dev, &event, pRxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent), sizeof (E3_BlockCopyEvent)); -+ -+ EP3_INIT_COPY_EVENT (event, rxdRail->DoneCookie, pRxdMainAddr + offsetof (EP3_RXD_RAIL_MAIN, DoneEvent), 1); -+ elan3_sdram_copyl_to_sdram (dev, &event, pRxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent), sizeof (E3_BlockCopyEvent)); -+ -+ pRxdMain->DataEvent = EP3_EVENT_FREE; -+ pRxdMain->DoneEvent = EP3_EVENT_FREE; -+ -+ /* move onto next descriptor */ -+ pRxdElan += EP3_RXD_RAIL_ELAN_SIZE; -+ pRxdElanAddr += EP3_RXD_RAIL_ELAN_SIZE; -+ pRxdMain = (EP3_RXD_RAIL_MAIN *) ((unsigned long) pRxdMain + EP3_RXD_RAIL_MAIN_SIZE); -+ pRxdMainAddr += EP3_RXD_RAIL_MAIN_SIZE; -+ } -+ -+ spin_lock_irqsave (&rcvrRail->FreeDescLock, flags); -+ -+ list_add (&blk->Link, &rcvrRail->DescBlockList); -+ rcvrRail->TotalDescCount += EP3_NUM_RXD_PER_BLOCK; -+ rcvrRail->FreeDescCount += EP3_NUM_RXD_PER_BLOCK; -+ -+ for (i = 0; i < EP3_NUM_RXD_PER_BLOCK; i++) -+ list_add (&blk->Rxd[i].Generic.Link, &rcvrRail->FreeDescList); -+ -+ spin_unlock_irqrestore (&rcvrRail->FreeDescLock, flags); -+ -+ return 1; -+} -+ -+static void -+FreeRxdRailBlock (EP3_RCVR_RAIL *rcvrRail, EP3_RXD_RAIL_BLOCK *blk) -+{ -+ EP3_RAIL *rail = RCVR_TO_RAIL(rcvrRail); -+ EP3_RXD_RAIL *rxdRail; -+ unsigned long flags; -+ int i, j; -+ -+ spin_lock_irqsave (&rcvrRail->FreeDescLock, flags); -+ -+ list_del (&blk->Link); -+ -+ rcvrRail->TotalDescCount -= EP3_NUM_RXD_PER_BLOCK; -+ -+ for (rxdRail = &blk->Rxd[0], i = 0; i < EP3_NUM_RXD_PER_BLOCK; i++, rxdRail++) -+ { -+ -+ rcvrRail->FreeDescCount--; -+ -+ list_del (&rxdRail->Generic.Link); -+ -+ for (j = 0; j < EP_MAXFRAG; j++) -+ DeregisterCookie (&rail->CookieTable, &rxdRail->ChainCookie[j]); -+ -+ DeregisterCookie (&rail->CookieTable, &rxdRail->DataCookie); -+ DeregisterCookie (&rail->CookieTable, &rxdRail->DoneCookie); -+ } -+ -+ spin_unlock_irqrestore (&rcvrRail->FreeDescLock, flags); -+ -+ ReleaseDmaRetries (rail, EP3_NUM_RXD_PER_BLOCK); -+ -+ ep_free_main (&rail->Generic, blk->Rxd[0].RxdMainAddr, EP3_RXD_RAIL_MAIN_SIZE * EP3_NUM_RXD_PER_BLOCK); -+ ep_free_elan (&rail->Generic, blk->Rxd[0].RxdElanAddr, EP3_RXD_RAIL_ELAN_SIZE * EP3_NUM_RXD_PER_BLOCK); -+ -+ KMEM_FREE (blk, sizeof (EP3_RXD_RAIL_BLOCK)); -+} -+ -+static EP3_RXD_RAIL * -+GetRxdRail (EP3_RCVR_RAIL *rcvrRail) -+{ -+ EP3_RXD_RAIL *rxdRail; -+ unsigned long flags; -+ int low_on_rxds; -+ -+ spin_lock_irqsave (&rcvrRail->FreeDescLock, flags); -+ -+ if (list_empty (&rcvrRail->FreeDescList)) -+ rxdRail = NULL; -+ else -+ { -+ rxdRail = list_entry (rcvrRail->FreeDescList.next, EP3_RXD_RAIL, Generic.Link); -+ -+ list_del (&rxdRail->Generic.Link); -+ -+ rcvrRail->FreeDescCount--; -+ } -+ -+ /* Wakeup the descriptor primer thread if there's not many left */ -+ low_on_rxds = (rcvrRail->FreeDescCount < ep_rxd_lowat); -+ -+ spin_unlock_irqrestore (&rcvrRail->FreeDescLock, flags); -+ -+ if (low_on_rxds) -+ ep_kthread_schedule (&RCVR_TO_SUBSYS(rcvrRail)->Thread, lbolt); -+ -+ return (rxdRail); -+} -+ -+static void -+FreeRxdRail (EP3_RCVR_RAIL *rcvrRail, EP3_RXD_RAIL *rxdRail) -+{ -+ unsigned long flags; -+ -+#if defined(DEBUG_ASSERT) -+ { -+ EP_RAIL *rail = (EP_RAIL *) RCVR_TO_RAIL(rcvrRail); -+ ELAN3_DEV *dev = RCVR_TO_DEV (rcvrRail); -+ -+ EP_ASSERT (rail, rxdRail->Generic.RcvrRail == &rcvrRail->Generic); -+ -+ EP_ASSERT (rail, rxdRail->RxdMain->DataEvent == EP3_EVENT_PRIVATE); -+ EP_ASSERT (rail, rxdRail->RxdMain->DoneEvent == EP3_EVENT_PRIVATE); -+ EP_ASSERT (rail, SDRAM_ASSERT (elan3_sdram_readl (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count)) == 0)); -+ EP_ASSERT (rail, SDRAM_ASSERT (elan3_sdram_readl (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count)) == 0)); -+ -+ rxdRail->RxdMain->DataEvent = EP3_EVENT_FREE; -+ rxdRail->RxdMain->DoneEvent = EP3_EVENT_FREE; -+ } -+#endif -+ -+ spin_lock_irqsave (&rcvrRail->FreeDescLock, flags); -+ -+ list_add (&rxdRail->Generic.Link, &rcvrRail->FreeDescList); -+ -+ rcvrRail->FreeDescCount++; -+ -+ if (rcvrRail->FreeDescWaiting) -+ { -+ rcvrRail->FreeDescWaiting--; -+ kcondvar_wakeupall (&rcvrRail->FreeDescSleep, &rcvrRail->FreeDescLock); -+ } -+ -+ spin_unlock_irqrestore (&rcvrRail->FreeDescLock, flags); -+} -+ -+static void -+BindRxdToRail (EP_RXD *rxd, EP3_RXD_RAIL *rxdRail) -+{ -+ EP3_RAIL *rail = RCVR_TO_RAIL (rxdRail->Generic.RcvrRail); -+ -+ ASSERT (SPINLOCK_HELD (&rxd->Rcvr->Lock)); -+ -+ EPRINTF3 (DBG_RCVR, "%s: BindRxdToRail: rxd=%p rxdRail=%p\n", rail->Generic.Name, rxd, rxdRail); -+ -+ elan3_sdram_writel (rail->Device, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, RxdMain), rxd->NmdMain.nmd_addr); /* PCI write */ -+ -+ rxd->RxdRail = &rxdRail->Generic; -+ rxdRail->Generic.Rxd = rxd; -+} -+ -+static void -+UnbindRxdFromRail (EP_RXD *rxd, EP3_RXD_RAIL *rxdRail) -+{ -+ EP3_RCVR_RAIL *rcvrRail = (EP3_RCVR_RAIL *) rxdRail->Generic.RcvrRail; -+ -+ ASSERT (SPINLOCK_HELD (&rxd->Rcvr->Lock)); -+ ASSERT (rxd->RxdRail == &rxdRail->Generic && rxdRail->Generic.Rxd == rxd); -+ -+ EPRINTF3 (DBG_RCVR, "%s: UnbindRxdFromRail: rxd=%p rxdRail=%p\n", RCVR_TO_RAIL(rxdRail->Generic.RcvrRail)->Generic.Name, rxd, rxdRail); -+ -+ rxd->RxdRail = NULL; -+ rxdRail->Generic.Rxd = NULL; -+ -+ if (rcvrRail->CleanupWaiting) -+ kcondvar_wakeupall (&rcvrRail->CleanupSleep, &rxd->Rcvr->Lock); -+ rcvrRail->CleanupWaiting = 0; -+} -+ -+static void -+LockRcvrThread (EP3_RCVR_RAIL *rcvrRail) -+{ -+ EP_COMMS_RAIL *commsRail = rcvrRail->Generic.CommsRail; -+ EP3_RAIL *rail = RCVR_TO_RAIL(rcvrRail); -+ ELAN3_DEV *dev = rail->Device; -+ sdramaddr_t sle = rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, ThreadLock); -+ EP3_SPINLOCK_MAIN *sl = &rcvrRail->RcvrMain->ThreadLock; -+ E3_uint32 RestartBits = 0; -+ int delay = 1; -+ E3_uint32 seq; -+ E3_uint32 reg; -+ -+ ASSERT (SPINLOCK_HELD (&rcvrRail->Generic.Rcvr->Lock)); -+ -+ mb(); -+ elan3_sdram_writel (dev, sle + offsetof (EP3_SPINLOCK_ELAN, sl_lock), 1); -+ mb(); -+ seq = elan3_sdram_readl (dev, sle + offsetof (EP3_SPINLOCK_ELAN, sl_seq)); -+ while (seq != sl->sl_seq) -+ { -+ while (sl->sl_seq == (seq - 1)) -+ { -+ mb(); -+ -+ if ((read_reg32 (dev, Exts.InterruptReg) & (INT_TProc | INT_TProcHalted)) != 0 && spin_trylock (&dev->IntrLock)) -+ { -+ reg=read_reg32 (dev, Exts.InterruptReg); -+ ELAN_REG_REC(reg); -+ -+ if ((reg & (INT_TProc | INT_TProcHalted)) != 0&& -+ elan3_sdram_readl (dev, sle + offsetof (EP3_SPINLOCK_ELAN, sl_seq)) != sl->sl_seq) -+ { -+ EPRINTF1 (DBG_RCVR, "%s: LockRcvrThread - thread trapped\n", rail->Generic.Name); -+ -+ /* The thread processor has *really* trapped, and the spinlock is still held. -+ * thus is must have trapped due to a network error - we need to complete the -+ * actions required for this envelope, since we may be spin-locking the receiver -+ * to search the dma retry lists for a particular dma. So must ensure that -+ * if the thread had trapped then the dma has been queued onto the retry list -+ * *before* we inspect them. -+ */ -+ IncrStat (commsRail, LockRcvrTrapped); -+ -+ /* We're going to generate a spurious interrupt here - since we will -+ * handle the thread processor trap directly */ -+ ELAN_REG_REC(reg); -+ if (HandleTProcTrap (dev, &RestartBits)) -+ { -+ /* NOTE - this is not an assert, since the "store" to unlock the lock could -+ * be held up on the PCI interface, whilst the thread processor has -+ * gone on and switched to a new thread, which has then trapped, and -+ * our read of the InterruptReg can overtake the unlock write. -+ * -+ * ASSERT (dev->ThreadTrap->Registers[REG_GLOBALS + (1^WordEndianFlip)] == -+ * elan3_sdram_readl (dev, rcvr->RcvrElan + offsetof (EP_RCVR_ELAN, PendingRxDescsElan))); -+ */ -+ -+ PULSE_SCHED_STATUS (dev, RestartBits); -+ -+ DeliverTProcTrap (dev, dev->ThreadTrap, INT_TProc); -+ } -+ } -+ spin_unlock (&dev->IntrLock); -+ } -+ -+ DELAY (delay); delay++; -+ } -+ seq = elan3_sdram_readl (dev, sle + offsetof (EP3_SPINLOCK_ELAN, sl_seq)); -+ } -+} -+ -+static void -+UnlockRcvrThread (EP3_RCVR_RAIL *rcvrRail) -+{ -+ EP3_RAIL *rail = RCVR_TO_RAIL(rcvrRail); -+ sdramaddr_t sle = rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, ThreadLock); -+ -+ mb(); -+ elan3_sdram_writel (rail->Device, sle + offsetof (EP3_SPINLOCK_ELAN, sl_lock), 0); -+ mmiob(); -+} -+ -+void -+CompleteEnvelope (EP3_RAIL *rail, E3_Addr rxdElanAddr, E3_uint32 PAckVal) -+{ -+ ELAN3_DEV *dev = rail->Device; -+ sdramaddr_t rxdElan = ep_elan2sdram (&rail->Generic, rxdElanAddr); -+ EP3_RXD_RAIL *rxdRail = (EP3_RXD_RAIL *) (unsigned long) elan3_sdram_readq (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, MainAddr)); -+ EP_RXD_MAIN *rxdMain = rxdRail->Generic.Rxd->RxdMain; -+ EP_ENVELOPE *env = &rxdMain->Envelope; -+ EP3_RCVR_RAIL *rcvrRail = (EP3_RCVR_RAIL *) rxdRail->Generic.RcvrRail; -+ EP_COMMS_RAIL *commsRail = rcvrRail->Generic.CommsRail; -+ EP_RCVR *rcvr = rcvrRail->Generic.Rcvr; -+ sdramaddr_t queue = ((EP3_COMMS_RAIL *) commsRail)->QueueDescs + rcvr->Service * sizeof (EP3_InputQueue); -+ sdramaddr_t sle = rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, ThreadLock); -+ EP3_SPINLOCK_MAIN *sl = &rcvrRail->RcvrMain->ThreadLock; -+ int nodeId; -+ EP_NODE_RAIL *nodeRail; -+ E3_DMA_BE dma; -+ E3_Addr nfptr; -+ E3_Addr next; -+ -+ ASSERT (commsRail->Rail == &rail->Generic); -+ ASSERT (rxdElanAddr == elan3_sdram_readl (dev, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, PendingDescs))); -+ -+ IncrStat (commsRail, CompleteEnvelope); -+ -+ /* We don't need to aquire the NodeLock here (however we might be holding it), -+ * since this can only get called while the node is connected, or disconnecting. -+ * If the node is disconnecting, then we can get called from FlushDisconnecting() -+ * while holding the NodeLock - after we cannot get called again until the node -+ * has reconnected from scratch. -+ */ -+ /* Copy the envelope information */ -+ nfptr = elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_fptr)); -+ -+ if (nfptr == elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_top))) -+ nfptr = elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_base)); -+ else -+ nfptr += elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_size)); -+ -+ /* Copy the envelope and payload (unconditionally) */ -+ elan3_sdram_copyl_from_sdram (dev, rcvrRail->InputQueueBase + (nfptr - rcvrRail->InputQueueAddr), env, EP_ENVELOPE_SIZE + EP_PAYLOAD_SIZE); -+ -+ ASSERT (env->Version == EP_ENVELOPE_VERSION); -+ -+ /* Copy the received message length */ -+ rxdMain->Len = elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, Data.nmd_len)); -+ -+ /* Remove the RXD from the pending desc list */ -+ if ((next = elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, Next))) == 0) -+ rcvrRail->RcvrMain->PendingDescsTailp = 0; -+ elan3_sdram_writel (dev, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, PendingDescs), next); -+ -+ /* Copy the DMA descriptor to queue on the approriate retry list */ -+ elan3_sdram_copyq_from_sdram (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, Dmas[0]), &dma, sizeof (E3_DMA)); /* PCI read block */ -+ -+ EP_ASSERT (&rail->Generic, dma.s.dma_direction == DMA_READ);; -+ -+#if defined(DEBUG_ASSERT) && defined(DEBUG_SDRAM_ASSERT) -+ /* NOTE: not an assertion, since the thread packet could have successfully -+ * transferred the "put" dma to the far side - which could then have -+ * completed - but the far side will see a network error which will -+ * cause the virtual circuit to be dropped by the far side and this -+ * DMA will be removed */ -+ if (rxdRail->RxdMain->DataEvent != EP3_EVENT_ACTIVE || -+ elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count)) != 1) -+ { -+ printk ("CompleteEnvelope: suspicious dma : Node=%d DataBlock=%d Event=%d\n", -+ env->NodeId, rxdRail->RxdMain->DataEvent, -+ elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count))); -+ } -+#endif -+ -+ EPRINTF6 (DBG_RCVR, "%s: CompleteEnvelope: rxd=%p NodeId=%d Xid=%llx Cookies=%08x,%08x\n", commsRail->Rail->Name, -+ rxdRail, env->NodeId, (long long) env->Xid.Unique, dma.s.dma_srcCookieVProc, dma.s.dma_destCookieVProc); -+ -+ /* we MUST convert this into a DMA_READ_REQUEUE dma as if we don't the DMA descriptor will -+ * be read from the EP_RETRY_DMA rather than the original DMA - this can then get reused -+ * and an incorrect DMA descriptor sent */ -+ dma.s.dma_source = rxdRail->RxdElanAddr + offsetof (EP3_RXD_RAIL_ELAN, Dmas[0]); -+ dma.s.dma_direction = (dma.s.dma_direction & ~DMA_READ) | DMA_READ_REQUEUE; -+ -+ nodeId = EP_VP_TO_NODE(dma.s.dma_srcVProc); -+ nodeRail = &rail->Generic.Nodes[nodeId]; -+ -+ ASSERT (nodeRail->State >= EP_NODE_CONNECTED && nodeRail->State <= EP_NODE_LOCAL_PASSIVATE); -+ -+ if (PAckVal != E3_PAckOk) -+ { -+ if (nodeRail->State == EP_NODE_CONNECTED) -+ QueueDmaForRetry (rail, &dma, EP_RETRY_LOW_PRI_RETRY); -+ else -+ QueueDmaOnStalledList (rail, &dma); -+ } -+ -+ /* Finaly forcefully drop the spinlock for the thread */ -+ sl->sl_seq = elan3_sdram_readl (dev, sle + offsetof (EP3_SPINLOCK_ELAN, sl_seq)); -+ -+ wmb(); -+} -+ -+void -+StallThreadForNoDescs (EP3_RAIL *rail, E3_Addr rcvrElanAddr, E3_Addr sp) -+{ -+ ELAN3_DEV *dev = rail->Device; -+ sdramaddr_t rcvrElan = ep_elan2sdram (&rail->Generic, rcvrElanAddr); -+ EP3_RCVR_RAIL *rcvrRail = (EP3_RCVR_RAIL *) (unsigned long) elan3_sdram_readq (dev, rcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, MainAddr)); -+ EP_RCVR *rcvr = rcvrRail->Generic.Rcvr; -+ EP_COMMS_RAIL *commsRail = rcvrRail->Generic.CommsRail; -+ -+ EPRINTF3 (DBG_RCVR, "%s: StallThreadForNoDescs - rcvrRail=%p sp=%x\n", commsRail->Rail->Name, rcvrRail, sp); -+ -+ IncrStat (commsRail, StallThread); -+ -+ /* NOTE: spin lock not required as thread is trapped */ -+ -+ if (rcvrRail->RcvrMain->PendingDescsTailp != 0) -+ { -+ EPRINTF1 (DBG_RCVR, "%s: StallThreadForNoDescs - pending descriptors, wakeup thread\n", commsRail->Rail->Name); -+ -+ /* -+ * A receive buffer was queued after the thread had decided to go to -+ * sleep, but before the event interrupt occured. Just restart the -+ * thread to consume the envelope. -+ */ -+ IssueRunThread (rail, sp); -+ } -+ else -+ { -+ EPRINTF1 (DBG_RCVR, "%s: StallThreadForNoDescs - set ThreadWaiting\n", commsRail->Rail->Name); -+ -+ IncrStat (commsRail, ThrdWaiting); -+ -+ /* Mark the rcvr as waiting for a rxd, and schedule a call of ep_check_rcvr -+ * to attempt to "steal" a descriptor from a different rail */ -+ rcvrRail->ThreadWaiting = sp; -+ -+ ep_kthread_schedule (&rcvr->Subsys->Thread, lbolt); -+ } -+} -+ -+void -+StallThreadForHalted (EP3_RAIL *rail, E3_Addr rcvrElanAddr, E3_Addr sp) -+{ -+ ELAN3_DEV *dev = rail->Device; -+ sdramaddr_t rcvrElan = ep_elan2sdram (&rail->Generic, rcvrElanAddr); -+ EP3_RCVR_RAIL *rcvrRail = (EP3_RCVR_RAIL *) (unsigned long) elan3_sdram_readq (dev, rcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, MainAddr)); -+ EP_RCVR *rcvr = rcvrRail->Generic.Rcvr; -+ unsigned long flags = 0; -+ -+ spin_lock_irqsave (&rcvr->Lock, flags); -+ -+ rcvrRail->ThreadHalted = sp; -+ -+ EPRINTF2 (DBG_EPTRAP, "%s: StallThreadForHalted: sp=%08x\n", rail->Generic.Name, sp); -+ -+ if (rcvrRail->CleanupWaiting) -+ kcondvar_wakeupone (&rcvrRail->CleanupSleep, &rcvr->Lock); -+ rcvrRail->CleanupWaiting = 0; -+ -+ spin_unlock_irqrestore (&rcvr->Lock, flags); -+} -+/* -+ * RxDataEvent: arg == EP3_RXD_RAIL -+ * Called on completion of receiving data. -+ */ -+static void -+RxDataEvent (EP3_RAIL *rail, void *arg) -+{ -+ EP3_RXD_RAIL *rxdRail = (EP3_RXD_RAIL *) arg; -+ EP3_RCVR_RAIL *rcvrRail = (EP3_RCVR_RAIL *) rxdRail->Generic.RcvrRail; -+ EP_RXD *rxd = rxdRail->Generic.Rxd; -+ EP_ENVELOPE *env = &rxd->RxdMain->Envelope; -+ EP_RCVR *rcvr = rxd->Rcvr; -+ ELAN3_DEV *dev = rail->Device; -+ unsigned long flags; -+ int delay = 1; -+ -+ spin_lock_irqsave (&rcvr->Lock, flags); -+ for (;;) -+ { -+ if (EP3_EVENT_FIRED (rxdRail->DataCookie, rxdRail->RxdMain->DataEvent)) -+ break; -+ -+ if (EP3_EVENT_FIRING (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent), rxdRail->DataCookie, rxdRail->RxdMain->DataEvent)) -+ { -+ if (delay > EP3_EVENT_FIRING_TLIMIT) -+ panic ("RxDataEvent: events set but block copy not completed\n"); -+ DELAY(delay); -+ delay <<= 1; -+ } -+ else -+ { -+ printk ("%s: RxDataEvent: rxd %p not complete [%x,%x,%x]\n", rail->Generic.Name, rxd, rxdRail->RxdMain->DataEvent, -+ elan3_sdram_readl (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count)), -+ elan3_sdram_readl (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Type))); -+ -+ spin_unlock_irqrestore (&rcvr->Lock, flags); -+ return; -+ } -+ mb(); -+ } -+ -+ /* -+ * Note, since the thread will have sent the "get" dma before copying the -+ * envelope, we must check that it has completed doing this, if not then -+ * it might be that the thread trapped due to a network error, so we must -+ * spinlock against the thread -+ */ -+ if (rxd->RxdMain->Len == EP_RXD_PENDING) -+ { -+ LockRcvrThread (rcvrRail); -+ UnlockRcvrThread (rcvrRail); -+ -+ ASSERT (env->Version == EP_ENVELOPE_VERSION && rxd->RxdMain->Len != EP_RXD_PENDING); -+ } -+ -+ EPRINTF7 (DBG_RCVR, "%s: RxDataEvent: rxd=%p rxdRail=%p completed from elan node %d [XID=%llx] Length %d State %x\n", -+ rail->Generic.Name, rxd, rxdRail, env->NodeId, (long long) env->Xid.Unique, rxd->RxdMain->Len, rxd->State); -+ -+ EP_ASSERT (&rail->Generic, rxd->State == EP_RXD_RECEIVE_ACTIVE || rxd->State == EP_RXD_PUT_ACTIVE || rxd->State == EP_RXD_GET_ACTIVE); -+ EP_ASSERT (&rail->Generic, SDRAM_ASSERT (elan3_sdram_readl (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count)) == 0)); /* PCI read */ -+ EP_ASSERT (&rail->Generic, rxdRail->RxdMain->DoneEvent == EP3_EVENT_PRIVATE); -+ -+ rxdRail->RxdMain->DataEvent = EP3_EVENT_PRIVATE; -+ rxd->Data.nmd_attr = EP_RAIL2RAILMASK (rail->Generic.Number); -+ -+ if (rxd->RxdMain->Len >= 0 && EP_IS_RPC(env->Attr)) -+ rxd->State = EP_RXD_RPC_IN_PROGRESS; -+ else -+ { -+ rxd->State = EP_RXD_COMPLETED; -+ -+ /* remove from active list */ -+ list_del (&rxd->Link); -+ -+ UnbindRxdFromRail (rxd, rxdRail); -+ FreeRxdRail (rcvrRail, rxdRail); -+ } -+ -+ spin_unlock_irqrestore (&rcvr->Lock, flags); -+ ep_rxd_received (rxd); -+ -+} -+ -+/* -+ * RxDataRetry: arg == EP3_RXD_RAIL -+ * Called on retry of "get" dma of large transmit data -+ * and rpc_get/rpc_put and "put" of datavec of rpc completion. -+ */ -+static void -+RxDataRetry (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma, int status) -+{ -+ EP3_RXD_RAIL *rxdRail = (EP3_RXD_RAIL *) arg; -+ EP_COMMS_RAIL *commsRail = rxdRail->Generic.RcvrRail->CommsRail; -+ EP_RXD *rxd = rxdRail->Generic.Rxd; -+ -+#if defined(DEBUG_ASSERT) -+ RxDataVerify (rail, arg, dma); -+#endif -+ -+ IncrStat (commsRail, RxDataRetry); -+ -+ EPRINTF4 (DBG_RCVR, "%s: RxDataRetry: rcvr %p rxd %p [XID=%llx]\n", rail->Generic.Name, rxd->Rcvr, rxd, (long long) rxd->RxdMain->Envelope.Xid.Unique); -+ -+ QueueDmaForRetry (rail, dma, EP_RETRY_LOW_PRI_RETRY + ep_backoff (&rxdRail->Backoff, EP_BACKOFF_DATA)); -+} -+ -+static void -+RxDataVerify (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma) -+{ -+#if defined(DEBUG_ASSERT) -+ EP3_RXD_RAIL *rxdRail = (EP3_RXD_RAIL *) arg; -+ EP_RXD *rxd = rxdRail->Generic.Rxd; -+ -+ if (dma->s.dma_direction == DMA_WRITE) -+ { -+ EP_ASSERT (&rail->Generic, -+ (rxd->State == EP_RXD_RECEIVE_ACTIVE && rxdRail->RxdMain->DataEvent == EP3_EVENT_ACTIVE && rxdRail->RxdMain->DoneEvent == EP3_EVENT_PRIVATE) || -+ (rxd->State == EP_RXD_PUT_ACTIVE && rxdRail->RxdMain->DataEvent == EP3_EVENT_ACTIVE && rxdRail->RxdMain->DoneEvent == EP3_EVENT_PRIVATE) || -+ (rxd->State == EP_RXD_COMPLETE_ACTIVE && rxdRail->RxdMain->DataEvent == EP3_EVENT_PRIVATE && rxdRail->RxdMain->DoneEvent == EP3_EVENT_ACTIVE)); -+ EP_ASSERT (&rail->Generic, SDRAM_ASSERT (rxd->State == EP_RXD_COMPLETE_ACTIVE ? -+ elan3_sdram_readl (rail->Device, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count)) == 1: /* PCI read */ -+ elan3_sdram_readl (rail->Device, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count)) == 1)); /* PCI read */ -+ } -+ else -+ { -+ EP_ASSERT (&rail->Generic, dma->s.dma_direction == DMA_READ_REQUEUE); -+ -+#if defined(DEBUG_SDRAM_ASSERT) -+ /* NOTE: not an assertion, since the "get" DMA can still be running if -+ * it's packet got a network error - and then the "put" from the -+ * far side has completed - however the virtual circuit should -+ * then be dropped by the far side and this DMA will be removed */ -+ if (EP_VP_TO_NODE(dma->s.dma_srcVProc) != ep_rxd_node(rxd) || -+ (rxd->State != EP_RXD_RECEIVE_ACTIVE && rxd->State != EP_RXD_GET_ACTIVE) || -+ rxdRail->RxdMain->DataEvent != EP3_EVENT_ACTIVE || -+ elan3_sdram_readl (rail->Device, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count)) != 1) -+ { -+ EPRINTF6 (DBG_RCVR, "%s: RxDataRetry: suspicious dma : VProc=%d NodeId=%d State=%d DataBlock=%x Event=%d\n", -+ rail->Generic.Name, EP_VP_TO_NODE(dma->s.dma_srcVProc), ep_rxd_node(rxd), rxd->State, rxdRail->RxdMain->DataEvent, -+ elan3_sdram_readl (rail->Device, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count))); -+ } -+#endif /* defined(DEBUG_SDRAM_ASSERT) */ -+ } -+#endif /* DEBUG_ASSERT */ -+} -+ -+/* -+ * RxDoneEvent: arg == EP_RXD -+ * Called on completion of large receive. -+ */ -+static void -+RxDoneEvent (EP3_RAIL *rail, void *arg) -+{ -+ EP3_RXD_RAIL *rxdRail = (EP3_RXD_RAIL *) arg; -+ EP3_RCVR_RAIL *rcvrRail = (EP3_RCVR_RAIL *) rxdRail->Generic.RcvrRail; -+ EP_COMMS_RAIL *commsRail = rcvrRail->Generic.CommsRail; -+ EP_RXD *rxd = rxdRail->Generic.Rxd; -+ EP_RCVR *rcvr = rxd->Rcvr; -+ ELAN3_DEV *dev = rail->Device; -+ int delay = 1; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&rcvr->Lock, flags); -+ for (;;) -+ { -+ if (EP3_EVENT_FIRED (rxdRail->DoneCookie, rxdRail->RxdMain->DoneEvent)) -+ break; -+ -+ if (EP3_EVENT_FIRING (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent), rxdRail->DoneCookie, rxdRail->RxdMain->DoneEvent)) -+ { -+ if (delay > EP3_EVENT_FIRING_TLIMIT) -+ panic ("RxDoneEvent: events set but block copy not completed\n"); -+ DELAY(delay); -+ delay <<= 1; -+ } -+ else -+ { -+ printk ("RxDoneEvent: rxd %p not complete [%x,%x.%x]\n", rxd, rxdRail->RxdMain->DoneEvent, -+ elan3_sdram_readl (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count)), -+ elan3_sdram_readl (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Type))); -+ -+ spin_unlock_irqrestore (&rcvr->Lock, flags); -+ return; -+ } -+ mb(); -+ } -+ -+ EPRINTF4 (DBG_RCVR, "%s: RxDoneEvent: rxd %p completed from elan node %d [XID=%llx]\n", -+ commsRail->Rail->Name, rxd, rxd->RxdMain->Envelope.NodeId, (long long) rxd->RxdMain->Envelope.Xid.Unique); -+ -+ IncrStat (commsRail, RxDoneEvent); -+ -+ EP_ASSERT (&rail->Generic, rxdRail->RxdMain->DataEvent == EP3_EVENT_PRIVATE); -+ EP_ASSERT (&rail->Generic, EP3_EVENT_FIRED (rxdRail->DoneCookie, rxdRail->RxdMain->DoneEvent)); -+ EP_ASSERT (&rail->Generic, SDRAM_ASSERT (elan3_sdram_readl (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count)) == 0)); /* PCI read */ -+ EP_ASSERT (&rail->Generic, SDRAM_ASSERT (elan3_sdram_readl (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count)) == 0)); /* PCI read */ -+ -+ /* mark rxd as private */ -+ rxdRail->RxdMain->DoneEvent = EP3_EVENT_PRIVATE; -+ -+ /* remove from active list */ -+ list_del (&rxd->Link); -+ -+ UnbindRxdFromRail (rxd, rxdRail); -+ FreeRxdRail (rcvrRail, rxdRail); -+ -+ spin_unlock_irqrestore (&rcvr->Lock, flags); -+ -+ rxd->Handler (rxd); -+} -+ -+/* -+ * RxDoneRetry: arg == EP_RXD -+ * Called on retry of "put" of RPC completion status block -+ */ -+static void -+RxDoneRetry (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma, int status) -+{ -+ EP3_RXD_RAIL *rxdRail = (EP3_RXD_RAIL *) arg; -+ EP_COMMS_RAIL *commsRail = rxdRail->Generic.RcvrRail->CommsRail; -+ EP_RXD *rxd = rxdRail->Generic.Rxd; -+ -+#if defined(DEBUG_ASSERT) -+ RxDoneVerify (rail, arg, dma); -+#endif -+ -+ IncrStat (commsRail, RxDoneRetry); -+ -+ EPRINTF4 (DBG_RCVR, "%s: RxDoneRetry: rcvr %p rxd %p [XID=%llx]\n", commsRail->Rail->Name, rxd->Rcvr, rxd, (long long) rxd->RxdMain->Envelope.Xid.Unique); -+ -+ QueueDmaForRetry (rail, dma, EP_RETRY_LOW_PRI_RETRY + ep_backoff (&rxdRail->Backoff, EP_BACKOFF_DONE)); -+} -+ -+static void -+RxDoneVerify (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma) -+{ -+#if defined(DEBUG_ASSERT) -+ EP3_RXD_RAIL *rxdRail = (EP3_RXD_RAIL *) arg; -+ EP_RXD *rxd = rxdRail->Generic.Rxd; -+ -+ EP_ASSERT (&rail->Generic, dma->s.dma_direction == DMA_WRITE && EP_VP_TO_NODE(dma->s.dma_destVProc) == ep_rxd_node(rxd)); -+ EP_ASSERT (&rail->Generic, rxd->State == EP_RXD_COMPLETE_ACTIVE && rxdRail->RxdMain->DoneEvent == EP3_EVENT_ACTIVE); -+ EP_ASSERT (&rail->Generic, SDRAM_ASSERT (elan3_sdram_readl (rail->Device, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count)) == 1)); /* PCI read */ -+#endif /* defined(DEBUG_ASSERT) */ -+} -+ -+int -+ep3rcvr_queue_rxd (EP_RXD *rxd, EP_RCVR_RAIL *r) -+{ -+ EP3_RCVR_RAIL *rcvrRail = (EP3_RCVR_RAIL *) r; -+ EP3_RAIL *rail = RCVR_TO_RAIL(rcvrRail); -+ ELAN3_DEV *dev = rail->Device; -+ EP3_RXD_RAIL *rxdRail; -+ -+ ASSERT ( SPINLOCK_HELD(&rxd->Rcvr->Lock)); -+ -+ if ((rxdRail = GetRxdRail (rcvrRail)) == NULL) -+ return 0; -+ -+ /* Flush the Elan TLB if mappings have changed */ -+ ep_perrail_dvma_sync (&rail->Generic); -+ -+ elan3_sdram_writel (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, Data.nmd_addr), rxd->Data.nmd_addr); /* PCI write */ -+ elan3_sdram_writel (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, Data.nmd_len), rxd->Data.nmd_len); /* PCI write */ -+ elan3_sdram_writel (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, Data.nmd_attr), rxd->Data.nmd_attr); /* PCI write */ -+ -+ /* Bind the rxdRail and rxd together */ -+ BindRxdToRail (rxd, rxdRail); -+ -+ /* Mark as active */ -+ elan3_sdram_writel (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count), 1); -+ -+ rxdRail->RxdMain->DataEvent = EP3_EVENT_ACTIVE; -+ rxdRail->RxdMain->DoneEvent = EP3_EVENT_PRIVATE; -+ -+ /* Interlock with StallThreadForNoDescs */ -+ spin_lock (&dev->IntrLock); -+ -+ EPRINTF4 (DBG_RCVR, "%s: ep3rcvr_queue_rxd: rcvr %p rxd %p rxdRail %p\n", rail->Generic.Name, rxd->Rcvr, rxd, rxdRail); -+ -+ EP3_SPINENTER (dev, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, PendingLock), &rcvrRail->RcvrMain->PendingLock); -+ -+ elan3_sdram_writel (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, Next), 0); /* PCI write */ -+ if (rcvrRail->RcvrMain->PendingDescsTailp == 0) -+ elan3_sdram_writel (dev, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, PendingDescs), rxdRail->RxdElanAddr); /* PCI write */ -+ else -+ elan3_sdram_writel (dev, rcvrRail->RcvrMain->PendingDescsTailp, rxdRail->RxdElanAddr); /* PCI write */ -+ rcvrRail->RcvrMain->PendingDescsTailp = rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, Next); -+ -+ EP3_SPINEXIT (dev, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, PendingLock), &rcvrRail->RcvrMain->PendingLock); -+ -+ /* If the thread has paused because it was woken up with no receive buffer */ -+ /* ready, then wake it up to process the one we've just added */ -+ if (rcvrRail->ThreadWaiting) -+ { -+ EPRINTF1 (DBG_RCVR, "%s: DoReceive: ThreadWaiting - restart thread\n", rail->Generic.Name); -+ -+ IssueRunThread (rail, rcvrRail->ThreadWaiting); -+ -+ rcvrRail->ThreadWaiting = (E3_Addr) 0; -+ } -+ -+ spin_unlock (&dev->IntrLock); -+ -+ return 1; -+} -+ -+void -+ep3rcvr_rpc_put (EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags) -+{ -+ EP3_RXD_RAIL *rxdRail = (EP3_RXD_RAIL *) rxd->RxdRail; -+ EP3_RCVR_RAIL *rcvrRail = (EP3_RCVR_RAIL *) rxdRail->Generic.RcvrRail; -+ EP3_RAIL *rail = RCVR_TO_RAIL (rcvrRail); -+ ELAN3_DEV *dev = rail->Device; -+ -+ EP3_RXD_RAIL_MAIN *rxdMain = rxdRail->RxdMain; -+ sdramaddr_t rxdElan = rxdRail->RxdElan; -+ EP_ENVELOPE *env = &rxd->RxdMain->Envelope; -+ E3_DMA_BE dmabe; -+ int i, len; -+ -+ EP_ASSERT (&rail->Generic, rxd->State == EP_RXD_PUT_ACTIVE); -+ EP_ASSERT (&rail->Generic, rxdMain->DataEvent == EP3_EVENT_PRIVATE && rxdMain->DoneEvent == EP3_EVENT_PRIVATE); -+ EP_ASSERT (&rail->Generic, SDRAM_ASSERT (elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count)) == 0)); /* PCI read */ -+ EP_ASSERT (&rail->Generic, SDRAM_ASSERT (elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count)) == 0)); /* PCI read */ -+ -+ /* Flush the Elan TLB if mappings have changed */ -+ ep_perrail_dvma_sync (&rail->Generic); -+ -+ /* Generate the DMA chain to put the data in two loops to burst -+ * the data across the PCI bus */ -+ for (len = 0, i = (nFrags-1), local += (nFrags-1), remote += (nFrags-1); i >= 0; len += local->nmd_len, i--, local--, remote--) -+ { -+ dmabe.s.dma_type = E3_DMA_TYPE(DMA_BYTE, DMA_WRITE, DMA_NORMAL, EP3_DMAFAILCOUNT); -+ dmabe.s.dma_size = local->nmd_len; -+ dmabe.s.dma_source = local->nmd_addr; -+ dmabe.s.dma_dest = remote->nmd_addr; -+ dmabe.s.dma_destEvent = (E3_Addr) 0; -+ dmabe.s.dma_destCookieVProc = EP_VP_DATA (env->NodeId); -+ if (i == (nFrags-1)) -+ dmabe.s.dma_srcEvent = rxdRail->RxdElanAddr + offsetof (EP3_RXD_RAIL_ELAN, DataEvent); -+ else -+ dmabe.s.dma_srcEvent = rxdRail->RxdElanAddr + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[i]); -+ dmabe.s.dma_srcCookieVProc = LocalCookie (rail, env->NodeId); -+ -+ EPRINTF9 (DBG_RCVR, "%s: ep3rcvr_rpc_put: rxd %p [XID=%llx] idx=%d Source=%08x Dest=%08x Len=%x Cookies=%x.%x\n", rail->Generic.Name, rxd, -+ (long long) env->Xid.Unique, i, local->nmd_addr, remote->nmd_addr, local->nmd_len, dmabe.s.dma_destCookieVProc, dmabe.s.dma_srcCookieVProc); -+ -+ if (i != 0) -+ elan3_sdram_copyq_to_sdram (dev, &dmabe, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, Dmas[i]), sizeof (E3_DMA)); /* PCI write block */ -+ } -+ -+ for (i = 0; i < nFrags; i++) -+ elan3_sdram_writel (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[i].ev_Count), 1); /* PCI write */ -+ -+ /* Initialise the data event */ -+ elan3_sdram_writel (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count), 1); /* PCI write */ -+ rxdMain->DataEvent = EP3_EVENT_ACTIVE; -+ -+ ASSERT (rail->Generic.Nodes[env->NodeId].State >= EP_NODE_CONNECTED && rail->Generic.Nodes[env->NodeId].State <= EP_NODE_LOCAL_PASSIVATE); -+ -+ if (IssueDma (rail, &dmabe, EP_RETRY_LOW_PRI, FALSE) != ISSUE_COMMAND_OK) -+ { -+ /* Failed to issue the dma command, so copy the dma descriptor and queue it for retry */ -+ EPRINTF2 (DBG_RCVR, "%s: ep3rcvr_rpc_put: queue rxd %p on retry thread\n", rail->Generic.Name, rxd); -+ -+ QueueDmaForRetry (rail, &dmabe, EP_RETRY_LOW_PRI); -+ } -+ -+ BucketStat (rxd->Rcvr->Subsys, RPCPut, len); -+} -+ -+void -+ep3rcvr_rpc_get (EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags) -+{ -+ EP3_RXD_RAIL *rxdRail = (EP3_RXD_RAIL *) rxd->RxdRail; -+ EP3_RCVR_RAIL *rcvrRail = (EP3_RCVR_RAIL *) rxdRail->Generic.RcvrRail; -+ EP3_RAIL *rail = RCVR_TO_RAIL (rcvrRail); -+ ELAN3_DEV *dev = rail->Device; -+ -+ EP3_RXD_RAIL_MAIN *rxdMain = rxdRail->RxdMain; -+ sdramaddr_t rxdElan = rxdRail->RxdElan; -+ EP_ENVELOPE *env = &rxd->RxdMain->Envelope; -+ E3_DMA_BE dmabe; -+ int i, len; -+ -+ EP_ASSERT (&rail->Generic, rxd->State == EP_RXD_GET_ACTIVE); -+ EP_ASSERT (&rail->Generic, rxdMain->DataEvent == EP3_EVENT_PRIVATE && rxdMain->DoneEvent == EP3_EVENT_PRIVATE); -+ EP_ASSERT (&rail->Generic, SDRAM_ASSERT (elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count)) == 0)); /* PCI read */ -+ EP_ASSERT (&rail->Generic, SDRAM_ASSERT (elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count)) == 0)); /* PCI read */ -+ -+ /* Flush the Elan TLB if mappings have changed */ -+ ep_perrail_dvma_sync (&rail->Generic); -+ -+ /* Generate the DMA chain to get the data in two loops to burst -+ * the data across the PCI bus */ -+ for (len = 0, i = (nFrags-1), remote += (nFrags-1), local += (nFrags-1); i >= 0; len += remote->nmd_len, i--, remote--, local--) -+ { -+ dmabe.s.dma_type = E3_DMA_TYPE(DMA_BYTE, DMA_READ, DMA_NORMAL, EP3_DMAFAILCOUNT); -+ dmabe.s.dma_size = remote->nmd_len; -+ dmabe.s.dma_source = remote->nmd_addr; -+ dmabe.s.dma_dest = local->nmd_addr; -+ if (i == (nFrags-1)) -+ dmabe.s.dma_destEvent = rxdRail->RxdElanAddr + offsetof (EP3_RXD_RAIL_ELAN, DataEvent); -+ else -+ dmabe.s.dma_destEvent = rxdRail->RxdElanAddr + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[i]); -+ dmabe.s.dma_destCookieVProc = LocalCookie (rail, env->NodeId); -+ dmabe.s.dma_srcEvent = (E3_Addr) 0; -+ dmabe.s.dma_srcCookieVProc = RemoteCookie (rail, env->NodeId); -+ -+ EPRINTF9 (DBG_RCVR, "%s: ep3rcvr_rpc_get rxd %p [XID=%llx] idx=%d Source=%08x Dest=%08x Len=%x Cookies=%x.%x\n", rail->Generic.Name, rxd, -+ (long long) env->Xid.Unique, i, remote->nmd_addr, local->nmd_addr, remote->nmd_len, dmabe.s.dma_destCookieVProc, -+ dmabe.s.dma_srcCookieVProc); -+ -+ /* -+ * Always copy down the dma descriptor, since we issue it as a READ_REQUEUE -+ * dma, and the elan will fetch the descriptor to send out of the link from -+ * the rxdElan->Dmas[i] location, before issueing the DMA chain we modify -+ * the dma_source. -+ */ -+ elan3_sdram_copyq_to_sdram (dev, &dmabe, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, Dmas[i]), sizeof (E3_DMA)); /* PCI write block */ -+ } -+ -+ for (i = 0; i < nFrags; i++) -+ elan3_sdram_writel (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[i].ev_Count), 1); /* PCI write */ -+ -+ /* Initialise the data event */ -+ elan3_sdram_writel (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count), 1); /* PCI write */ -+ rxdMain->DataEvent = EP3_EVENT_ACTIVE; -+ -+ ASSERT (rail->Generic.Nodes[env->NodeId].State >= EP_NODE_CONNECTED && rail->Generic.Nodes[env->NodeId].State <= EP_NODE_LOCAL_PASSIVATE); -+ -+ /* we MUST convert this into a DMA_READ_REQUEUE dma as if we don't the DMA descriptor will -+ * be read from the EP_RETRY_DMA rather than the orignal DMA - this can then get reused -+ * and an incorrect DMA descriptor sent */ -+ dmabe.s.dma_source = rxdRail->RxdElanAddr + offsetof (EP3_RXD_RAIL_ELAN, Dmas[0]); -+ dmabe.s.dma_direction = (dmabe.s.dma_direction & ~DMA_READ) | DMA_READ_REQUEUE; -+ -+ if (IssueDma (rail, &dmabe, EP_RETRY_LOW_PRI, FALSE) != ISSUE_COMMAND_OK) -+ { -+ /* Failed to issue the dma command, so copy the dma descriptor and queue it for retry */ -+ EPRINTF2 (DBG_RCVR, "%s: ep3rcvr_rpc_get: queue rxd %p on retry thread\n", rail->Generic.Name, rxd); -+ -+ QueueDmaForRetry (rail, &dmabe, EP_RETRY_LOW_PRI); -+ } -+ -+ BucketStat (rxd->Rcvr->Subsys, RPCGet, len); -+} -+ -+void -+ep3rcvr_rpc_complete (EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags) -+{ -+ EP3_RXD_RAIL *rxdRail = (EP3_RXD_RAIL *) rxd->RxdRail; -+ EP3_RCVR_RAIL *rcvrRail = (EP3_RCVR_RAIL *) rxdRail->Generic.RcvrRail; -+ EP3_RAIL *rail = RCVR_TO_RAIL (rcvrRail); -+ ELAN3_DEV *dev = rail->Device; -+ -+ EP3_RXD_RAIL_MAIN *rxdMain = rxdRail->RxdMain; -+ sdramaddr_t rxdElan = rxdRail->RxdElan; -+ EP_ENVELOPE *env = &rxd->RxdMain->Envelope; -+ E3_DMA_BE dmabe; -+ int i, len; -+ -+ EP_ASSERT (&rail->Generic, rxd->State == EP_RXD_COMPLETE_ACTIVE); -+ EP_ASSERT (&rail->Generic, rxdMain->DataEvent == EP3_EVENT_PRIVATE && rxdMain->DoneEvent == EP3_EVENT_PRIVATE); -+ EP_ASSERT (&rail->Generic, SDRAM_ASSERT (elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count)) == 0)); /* PCI read */ -+ EP_ASSERT (&rail->Generic, SDRAM_ASSERT (elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count)) == 0)); /* PCI read */ -+ -+ /* Flush the Elan TLB if mappings have changed */ -+ ep_perrail_dvma_sync (&rail->Generic); -+ -+ /* Initialise the status block dma */ -+ dmabe.s.dma_type = E3_DMA_TYPE(DMA_BYTE, DMA_WRITE, DMA_NORMAL, EP3_DMAFAILCOUNT); -+ dmabe.s.dma_size = sizeof (EP_STATUSBLK); -+ dmabe.s.dma_source = rxd->NmdMain.nmd_addr + offsetof (EP_RXD_MAIN, StatusBlk); -+ dmabe.s.dma_dest = env->TxdMain.nmd_addr + offsetof (EP_TXD_MAIN, StatusBlk); -+ dmabe.s.dma_destEvent = env->TxdRail + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent); -+ dmabe.s.dma_destCookieVProc = EP_VP_DATA(env->NodeId); -+ dmabe.s.dma_srcEvent = rxdRail->RxdElanAddr + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent); -+ dmabe.s.dma_srcCookieVProc = LocalCookie (rail, env->NodeId); -+ -+ EPRINTF8 (DBG_RCVR, "%s: ep3rcvr_rpc_complete: rxd %p [XID=%llx] statusblk source=%08x dest=%08x len=%x Cookies=%x.%x\n", rail->Generic.Name, rxd, -+ (long long) env->Xid.Unique, dmabe.s.dma_source, dmabe.s.dma_dest, dmabe.s.dma_size, dmabe.s.dma_destCookieVProc, -+ dmabe.s.dma_srcCookieVProc); -+ -+ for (len = 0, i = EP_MAXFRAG, remote += (nFrags-1), local += (nFrags-1); i > EP_MAXFRAG-nFrags; len += local->nmd_len, i--, local--, remote--) -+ { -+ /* copy down previous dma */ -+ elan3_sdram_copyq_to_sdram (dev, &dmabe, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, Dmas[i]), sizeof (E3_DMA)); /* PCI write block */ -+ -+ dmabe.s.dma_type = E3_DMA_TYPE(DMA_BYTE, DMA_WRITE, DMA_NORMAL, EP3_DMAFAILCOUNT); -+ dmabe.s.dma_size = local->nmd_len; -+ dmabe.s.dma_source = local->nmd_addr; -+ dmabe.s.dma_dest = remote->nmd_addr; -+ dmabe.s.dma_destEvent = (E3_Addr) 0; -+ dmabe.s.dma_destCookieVProc = EP_VP_DATA (env->NodeId); -+ dmabe.s.dma_srcEvent = rxdRail->RxdElanAddr + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[i-1]); -+ dmabe.s.dma_srcCookieVProc = LocalCookie (rail, env->NodeId); -+ -+ EPRINTF9 (DBG_RCVR, "%s: ep3rcvr_rpc_complete: rxd %p [XID=%llx] idx=%d Source=%08x Dest=%08x Len=%x Cookies=%x.%x\n", rail->Generic.Name, rxd, -+ (long long) env->Xid.Unique, i, local->nmd_addr, remote->nmd_addr, local->nmd_len, dmabe.s.dma_destCookieVProc, -+ dmabe.s.dma_srcCookieVProc); -+ } -+ -+ for (i = EP_MAXFRAG-nFrags; i < EP_MAXFRAG; i++) -+ elan3_sdram_writel (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[i].ev_Count), 1); /* PCI write */ -+ -+ /* Initialise the done event */ -+ elan3_sdram_writel (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count), 1); /* PCI write */ -+ rxdMain->DoneEvent = EP3_EVENT_ACTIVE; -+ -+ ASSERT (rail->Generic.Nodes[env->NodeId].State >= EP_NODE_CONNECTED && rail->Generic.Nodes[env->NodeId].State <= EP_NODE_LOCAL_PASSIVATE); -+ -+ if (IssueDma (rail, &dmabe, EP_RETRY_LOW_PRI, FALSE) != ISSUE_COMMAND_OK) -+ { -+ /* Failed to issue the dma command, so copy the dma descriptor and queue it for retry */ -+ EPRINTF2 (DBG_RCVR, "%s: ep3rcvr_rpc_complete: queue rxd %p on retry thread\n", rail->Generic.Name, rxd); -+ -+ QueueDmaForRetry (rail, &dmabe, EP_RETRY_LOW_PRI); -+ } -+ -+ BucketStat (rxd->Rcvr->Subsys, CompleteRPC, len); -+} -+ -+void -+ep3rcvr_add_rail (EP_RCVR *rcvr, EP_COMMS_RAIL *commsRail) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) commsRail->Rail; -+ sdramaddr_t qdescs = ((EP3_COMMS_RAIL *) commsRail)->QueueDescs; -+ EP3_RCVR_RAIL *rcvrRail; -+ EP3_InputQueue qdesc; -+ sdramaddr_t stack; -+ unsigned long flags; -+ -+ KMEM_ZALLOC (rcvrRail, EP3_RCVR_RAIL *, sizeof (EP3_RCVR_RAIL), TRUE); -+ -+ kcondvar_init (&rcvrRail->CleanupSleep); -+ spin_lock_init (&rcvrRail->FreeDescLock); -+ INIT_LIST_HEAD (&rcvrRail->FreeDescList); -+ INIT_LIST_HEAD (&rcvrRail->DescBlockList); -+ -+ rcvrRail->Generic.CommsRail = commsRail; -+ rcvrRail->Generic.Rcvr = rcvr; -+ -+ rcvrRail->RcvrMain = ep_alloc_main (&rail->Generic, sizeof (EP3_RCVR_RAIL_MAIN), 0, &rcvrRail->RcvrMainAddr); -+ rcvrRail->RcvrElan = ep_alloc_elan (&rail->Generic, sizeof (EP3_RCVR_RAIL_ELAN), 0, &rcvrRail->RcvrElanAddr); -+ rcvrRail->InputQueueBase = ep_alloc_elan (&rail->Generic, EP_INPUTQ_SIZE * rcvr->InputQueueEntries, 0, &rcvrRail->InputQueueAddr); -+ stack = ep_alloc_elan (&rail->Generic, EP3_STACK_SIZE, 0, &rcvrRail->ThreadStack); -+ -+ rcvrRail->TotalDescCount = 0; -+ rcvrRail->FreeDescCount = 0; -+ -+ /* Initialise the main/elan spin lock */ -+ elan3_sdram_writel (rail->Device, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, ThreadLock.sl_lock), 0); -+ elan3_sdram_writel (rail->Device, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, ThreadLock.sl_seq), 0); -+ -+ elan3_sdram_writel (rail->Device, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, PendingLock.sl_lock), 0); -+ elan3_sdram_writel (rail->Device, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, PendingLock.sl_seq), 0); -+ -+ /* Initialise the receive lists */ -+ elan3_sdram_writel (rail->Device, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, PendingDescs), 0); -+ -+ /* Initialise the ThreadShould Halt */ -+ elan3_sdram_writel (rail->Device, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, ThreadShouldHalt), 0); -+ -+ /* Initialise pointer to the ep_rcvr_rail */ -+ elan3_sdram_writeq (rail->Device, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, MainAddr), (unsigned long) rcvrRail); -+ -+ /* Initialise elan visible main memory */ -+ rcvrRail->RcvrMain->ThreadLock.sl_seq = 0; -+ rcvrRail->RcvrMain->PendingLock.sl_seq = 0; -+ rcvrRail->RcvrMain->PendingDescsTailp = 0; -+ -+ /* initialise and copy down the input queue descriptor */ -+ qdesc.q_state = E3_QUEUE_FULL; -+ qdesc.q_base = rcvrRail->InputQueueAddr; -+ qdesc.q_top = rcvrRail->InputQueueAddr + (rcvr->InputQueueEntries-1) * EP_INPUTQ_SIZE; -+ qdesc.q_fptr = rcvrRail->InputQueueAddr; -+ qdesc.q_bptr = rcvrRail->InputQueueAddr + EP_INPUTQ_SIZE; -+ qdesc.q_size = EP_INPUTQ_SIZE; -+ qdesc.q_event.ev_Count = 0; -+ qdesc.q_event.ev_Type = 0; -+ -+ elan3_sdram_copyl_to_sdram (rail->Device, &qdesc, qdescs + rcvr->Service * sizeof (EP3_InputQueue), sizeof (EP3_InputQueue)); -+ -+ spin_lock_irqsave (&rcvr->Lock, flags); -+ rcvr->Rails[rail->Generic.Number] = &rcvrRail->Generic; -+ rcvr->RailMask |= EP_RAIL2RAILMASK (rail->Generic.Number); -+ spin_unlock_irqrestore (&rcvr->Lock, flags); -+ -+ /* initialise and run the Elan thread to process the queue */ -+ IssueRunThread (rail, ep3_init_thread (rail->Device, ep_symbol (&rail->ThreadCode, "ep3comms_rcvr"), -+ rcvrRail->ThreadStack, stack, EP3_STACK_SIZE, 5, -+ rail->RailElanAddr, rcvrRail->RcvrElanAddr, rcvrRail->RcvrMainAddr, -+ EP_MSGQ_ADDR(rcvr->Service), -+ rail->ElanCookies)); -+} -+ -+void -+ep3rcvr_del_rail (EP_RCVR *rcvr, EP_COMMS_RAIL *commsRail) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) commsRail->Rail; -+ EP3_RCVR_RAIL *rcvrRail = (EP3_RCVR_RAIL *) rcvr->Rails[rail->Generic.Number]; -+ unsigned long flags; -+ struct list_head *el, *nel; -+ -+ EPRINTF1 (DBG_RCVR, "%s: ep3rcvr_del_rail: removing rail\n", rail->Generic.Name); -+ -+ /* flag the rail as no longer available */ -+ spin_lock_irqsave (&rcvr->Lock, flags); -+ rcvr->RailMask &= ~EP_RAIL2RAILMASK (rail->Generic.Number); -+ spin_unlock_irqrestore (&rcvr->Lock, flags); -+ -+ /* mark the input queue descriptor as full */ -+ SetQueueLocked(rail, ((EP3_COMMS_RAIL *)commsRail)->QueueDescs + rcvr->Service * sizeof (EP3_InputQueue)); -+ -+ /* need to halt the thread first */ -+ /* set ThreadShouldHalt in elan memory */ -+ /* then trigger the event */ -+ /* and wait on haltWait */ -+ elan3_sdram_writel (rail->Device, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, ThreadShouldHalt), TRUE); -+ -+ IssueSetevent (rail, EP_MSGQ_ADDR(rcvr->Service) + offsetof(EP3_InputQueue, q_event)); -+ -+ spin_lock_irqsave (&rcvr->Lock, flags); -+ -+ while (rcvrRail->ThreadHalted == 0) -+ { -+ rcvrRail->CleanupWaiting++; -+ kcondvar_wait (&rcvrRail->CleanupSleep, &rcvr->Lock, &flags); -+ } -+ -+ /* at this point the thread is halted and it has no envelopes */ -+ -+ /* we need to wait until all the rxd's in the list that are -+ * bound to the rail we are removing are not pending -+ */ -+ for (;;) -+ { -+ int mustWait = 0; -+ -+ list_for_each (el, &rcvr->ActiveDescList) { -+ EP_RXD *rxd = list_entry (el,EP_RXD, Link); -+ EP3_RXD_RAIL *rxdRail = (EP3_RXD_RAIL *) rxd->RxdRail; -+ -+ if (rxdRail && RXD_BOUND2RAIL (rxdRail, rcvrRail) && rxd->RxdMain->Len != EP_RXD_PENDING) -+ { -+ mustWait++; -+ break; -+ } -+ } -+ -+ if (! mustWait) -+ break; -+ -+ EPRINTF1 (DBG_RCVR, "%s: ep3rcvr_del_rail: waiting for active rxd's to be returned\n", rail->Generic.Name); -+ -+ rcvrRail->CleanupWaiting++; -+ kcondvar_wait (&rcvrRail->CleanupSleep, &rcvr->Lock, &flags); -+ } -+ -+ /* at this point all rxd's in the list that are bound to the deleting rail are not pending */ -+ list_for_each_safe (el, nel, &rcvr->ActiveDescList) { -+ EP_RXD *rxd = list_entry (el, EP_RXD, Link); -+ EP3_RXD_RAIL *rxdRail = (EP3_RXD_RAIL *) rxd->RxdRail; -+ -+ if (rxdRail && RXD_BOUND2RAIL (rxdRail, rcvrRail)) -+ { -+ /* here we need to unbind the remaining rxd's */ -+ rxdRail->RxdMain->DataEvent = EP3_EVENT_PRIVATE; -+ rxdRail->RxdMain->DoneEvent = EP3_EVENT_PRIVATE; -+ -+ elan3_sdram_writel (rail->Device, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count), 0); /* PCI write */ -+ elan3_sdram_writel (rail->Device, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count), 0); /* PCI write */ -+ -+ UnbindRxdFromRail (rxd, rxdRail); -+ FreeRxdRail(rcvrRail, rxdRail ); -+ } -+ } -+ spin_unlock_irqrestore (&rcvr->Lock, flags); -+ -+ /* wait for all rxd's for this rail to become free */ -+ spin_lock_irqsave (&rcvrRail->FreeDescLock, flags); -+ while (rcvrRail->FreeDescCount != rcvrRail->TotalDescCount) -+ { -+ rcvrRail->FreeDescWaiting++; -+ kcondvar_wait (&rcvrRail->FreeDescSleep, &rcvrRail->FreeDescLock, &flags); -+ } -+ spin_unlock_irqrestore (&rcvrRail->FreeDescLock, flags); -+ -+ /* can now remove the rail as it can no longer be used */ -+ spin_lock_irqsave (&rcvr->Lock, flags); -+ rcvr->Rails[rail->Generic.Number] = NULL; -+ spin_unlock_irqrestore (&rcvr->Lock, flags); -+ -+ /* all the rxd's accociated with DescBlocks must be in the FreeDescList */ -+ ASSERT (rcvrRail->TotalDescCount == rcvrRail->FreeDescCount); -+ -+ /* run through the DescBlockList deleting them */ -+ while (!list_empty (&rcvrRail->DescBlockList)) -+ FreeRxdRailBlock (rcvrRail, list_entry(rcvrRail->DescBlockList.next, EP3_RXD_RAIL_BLOCK , Link)); -+ -+ /* it had better be empty after that */ -+ ASSERT ((rcvrRail->TotalDescCount == 0) && (rcvrRail->TotalDescCount == rcvrRail->FreeDescCount)); -+ -+ ep_free_elan (&rail->Generic, rcvrRail->ThreadStack, EP3_STACK_SIZE); -+ ep_free_elan (&rail->Generic, rcvrRail->InputQueueAddr, EP_INPUTQ_SIZE * rcvr->InputQueueEntries); -+ ep_free_elan (&rail->Generic, rcvrRail->RcvrElanAddr, sizeof (EP3_RCVR_RAIL_ELAN)); -+ ep_free_main (&rail->Generic, rcvrRail->RcvrMainAddr, sizeof (EP3_RCVR_RAIL_MAIN)); -+ -+ KMEM_FREE (rcvrRail, sizeof (EP3_RCVR_RAIL)); -+} -+ -+EP_RXD * -+ep3rcvr_steal_rxd (EP_RCVR_RAIL *r) -+{ -+ EP3_RCVR_RAIL *rcvrRail = (EP3_RCVR_RAIL *) r; -+ EP3_RAIL *rail = RCVR_TO_RAIL (rcvrRail); -+ EP_RCVR *rcvr = rcvrRail->Generic.Rcvr; -+ E3_Addr rxdElanAddr; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&rcvr->Lock, flags); -+ -+ LockRcvrThread (rcvrRail); -+ if ((rxdElanAddr = elan3_sdram_readl (rail->Device, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, PendingDescs))) != 0) -+ { -+ sdramaddr_t rxdElan = ep_elan2sdram (&rail->Generic, rxdElanAddr); -+ EP3_RXD_RAIL *rxdRail = (EP3_RXD_RAIL *) (unsigned long) elan3_sdram_readq (rail->Device, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, MainAddr)); -+ EP_RXD *rxd = rxdRail->Generic.Rxd; -+ sdramaddr_t next; -+ -+ EPRINTF2 (DBG_RCVR, "%s: StealRxdFromOtherRail stealing rxd %p\n", rail->Generic.Name, rail); -+ -+ /* Remove the RXD from the pending desc list */ -+ if ((next = elan3_sdram_readl (rail->Device, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, Next))) == 0) -+ rcvrRail->RcvrMain->PendingDescsTailp = 0; -+ elan3_sdram_writel (rail->Device, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, PendingDescs), next); -+ UnlockRcvrThread (rcvrRail); -+ -+ UnbindRxdFromRail (rxd, rxdRail); -+ -+ spin_unlock_irqrestore (&rcvr->Lock, flags); -+ -+ /* Mark rxdRail as no longer active */ -+ rxdRail->RxdMain->DataEvent = EP3_EVENT_PRIVATE; -+ rxdRail->RxdMain->DoneEvent = EP3_EVENT_PRIVATE; -+ elan3_sdram_writel (rail->Device, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count), 0); -+ elan3_sdram_writel (rail->Device, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count), 0); -+ -+ FreeRxdRail (rcvrRail, rxdRail); -+ -+ return rxd; -+ } -+ -+ UnlockRcvrThread (rcvrRail); -+ spin_unlock_irqrestore (&rcvr->Lock, flags); -+ -+ return NULL; -+} -+ -+long -+ep3rcvr_check (EP_RCVR_RAIL *r, long nextRunTime) -+{ -+ EP3_RCVR_RAIL *rcvrRail = (EP3_RCVR_RAIL *) r; -+ EP3_RAIL *rail = RCVR_TO_RAIL (rcvrRail); -+ EP_RCVR *rcvr = rcvrRail->Generic.Rcvr; -+ EP_COMMS_SUBSYS *subsys = rcvr->Subsys; -+ EP_SYS *sys = subsys->Subsys.Sys; -+ EP_RXD *rxd; -+ unsigned long flags; -+ -+ if (rcvrRail->FreeDescCount < ep_rxd_lowat && !AllocateRxdRailBlock (rcvrRail)) -+ { -+ EPRINTF1 (DBG_RCVR,"%s: failed to grow rxd rail pool\n", rail->Generic.Name); -+ -+ if (nextRunTime == 0 || AFTER (nextRunTime, lbolt + RESOURCE_RETRY_TIME)) -+ nextRunTime = lbolt + RESOURCE_RETRY_TIME; -+ } -+ -+ if (rcvrRail->ThreadWaiting && (rxd = StealRxdFromOtherRail (rcvr)) != NULL) -+ { -+ /* Map the receive buffer into this rail as well */ -+ EPRINTF4 (DBG_RCVR, "%s: mapping rxd->Data (%08x.%08x.%08x) into this rails\n", -+ rail->Generic.Name, rxd->Data.nmd_addr,rxd->Data.nmd_len, rxd->Data.nmd_attr); -+ -+ spin_lock_irqsave (&rcvr->Lock, flags); -+ if ((!(EP_NMD_RAILMASK (&rxd->Data) & EP_RAIL2RAILMASK(rail->Generic.Number)) && /* not already mapped and */ -+ ep_nmd_map_rails (sys, &rxd->Data, EP_RAIL2RAILMASK(rail->Generic.Number)) == 0) || /* failed to map it */ -+ ep3rcvr_queue_rxd (rxd, &rcvrRail->Generic)) /* or failed to queue it */ -+ { -+ EPRINTF5 (DBG_RCVR,"%s: stolen rcvr=%p rxd=%p -> rnum=%d rcvrRail=%p (failed)\n", -+ rail->Generic.Name, rcvr, rxd, rail->Generic.Number, rcvrRail); -+ -+ if (nextRunTime == 0 || AFTER (nextRunTime, lbolt + RESOURCE_RETRY_TIME)) -+ nextRunTime = lbolt + RESOURCE_RETRY_TIME; -+ } -+ spin_unlock_irqrestore (&rcvr->Lock, flags); -+ } -+ -+ return nextRunTime; -+} -+ -+static void -+ep3rcvr_flush_filtering (EP_RCVR *rcvr, EP3_RCVR_RAIL *rcvrRail) -+{ -+ EP3_COMMS_RAIL *commsRail = (EP3_COMMS_RAIL *) rcvrRail->Generic.CommsRail; -+ EP3_RAIL *rail = (EP3_RAIL *) commsRail->Generic.Rail; -+ ELAN3_DEV *dev = rail->Device; -+ sdramaddr_t qdesc = commsRail->QueueDescs + rcvr->Service*sizeof (EP3_InputQueue); -+ E3_Addr qTop = elan3_sdram_readl (dev, qdesc + offsetof (EP3_InputQueue, q_top)); -+ E3_Addr qBase = elan3_sdram_readl (dev, qdesc + offsetof (EP3_InputQueue, q_base)); -+ E3_Addr qSize = elan3_sdram_readl (dev,qdesc + offsetof (EP3_InputQueue, q_size)); -+ E3_uint32 nfptr, qbptr; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&rcvr->Lock, flags); -+ LockRcvrThread (rcvrRail); /* PCI lock */ -+ -+ nfptr = elan3_sdram_readl (dev, qdesc + offsetof (EP3_InputQueue, q_fptr)); -+ qbptr = elan3_sdram_readl (dev, qdesc + offsetof (EP3_InputQueue, q_bptr)); -+ -+ if (nfptr == qTop) -+ nfptr = qBase; -+ else -+ nfptr += qSize; -+ -+ while (nfptr != qbptr) -+ { -+ unsigned nodeId = elan3_sdram_readl (dev, rcvrRail->InputQueueBase + (nfptr - rcvrRail->InputQueueAddr) + -+ offsetof (EP_ENVELOPE, NodeId)); -+ -+ EPRINTF3 (DBG_DISCON, "%s: ep3rcvr_flush_filtering: nodeId=%d State=%d\n", rail->Generic.Name, nodeId, rail->Generic.Nodes[nodeId].State); -+ -+ if (rail->Generic.Nodes[nodeId].State == EP_NODE_LOCAL_PASSIVATE) -+ elan3_sdram_writel (dev, rcvrRail->InputQueueBase + (nfptr - rcvrRail->InputQueueAddr) + -+ offsetof (EP_ENVELOPE, Version), 0); -+ -+ if (nfptr == qTop) -+ nfptr = qBase; -+ else -+ nfptr += qSize; -+ } -+ -+ UnlockRcvrThread (rcvrRail); /* PCI unlock */ -+ spin_unlock_irqrestore (&rcvr->Lock, flags); -+} -+ -+static void -+ep3rcvr_flush_flushing (EP_RCVR *rcvr, EP3_RCVR_RAIL *rcvrRail) -+{ -+ EP3_RAIL *rail = RCVR_TO_RAIL (rcvrRail); -+ struct list_head *el, *nel; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&rcvr->Lock, flags); -+ LockRcvrThread (rcvrRail); /* PCI lock */ -+ -+ list_for_each_safe (el, nel, &rcvr->ActiveDescList) { -+ EP_RXD *rxd = list_entry (el, EP_RXD, Link); -+ EP3_RXD_RAIL *rxdRail = (EP3_RXD_RAIL *) rxd->RxdRail; -+ EP_ENVELOPE *env = &rxd->RxdMain->Envelope; -+ EP_NODE_RAIL *nodeRail = &rail->Generic.Nodes[env->NodeId]; -+ -+ if (rxd->RxdMain->Len == EP_RXD_PENDING || !RXD_BOUND2RAIL(rxdRail,rcvrRail) || nodeRail->State != EP_NODE_LOCAL_PASSIVATE) -+ continue; -+ -+ EPRINTF6 (DBG_DISCON, "%s: ep3rcvr_flush_flushing: rcvr %p rxd %p state %x.%x elan node %d\n", rail->Generic.Name, -+ rcvr, rxd, rxdRail->RxdMain->DataEvent, rxdRail->RxdMain->DoneEvent, env->NodeId); -+ -+ switch (rxd->State) -+ { -+ case EP_RXD_FREE: -+ printk ("ep3rcvr_flush_flushing: rxd state is free but bound to a fail\n"); -+ break; -+ -+ case EP_RXD_RECEIVE_ACTIVE: -+ if (rxdRail->RxdMain->DataEvent == EP3_EVENT_ACTIVE) /* incomplete message receive */ -+ { -+ EPRINTF4 (DBG_RCVR, "%s: ep3rcvr_flush_flushing: rcvr %p rxd %p nodeId %d - passive\n", -+ rail->Generic.Name, rcvr, rxd, env->NodeId); -+ -+ nodeRail->MessageState |= EP_NODE_PASSIVE_MESSAGES; -+ continue; -+ } -+ break; -+ -+ default: -+ EP_ASSERT (&rail->Generic, EP_IS_RPC(env->Attr)); -+ -+ if (!EP3_EVENT_FIRED (rxdRail->DoneCookie, rxdRail->RxdMain->DoneEvent)) /* incomplete RPC */ -+ { -+ EPRINTF4 (DBG_RCVR, "%s: ep3rcvr_flush_flushing: rcvr %p rxd %p nodeId %d - active\n", -+ rail->Generic.Name, rcvr, rxd, env->NodeId); -+ -+ EP_INVALIDATE_XID (rxd->MsgXid); /* Ignore any previous NMD map responses */ -+ -+ nodeRail->MessageState |= EP_NODE_ACTIVE_MESSAGES; -+ continue; -+ } -+ break; -+ -+ case EP_RXD_BEEN_ABORTED: -+ printk ("ep3rcvr_flush_flushing: rxd state is aborted but bound to a fail\n"); -+ break; -+ } -+ -+ EPRINTF4 (DBG_RCVR, "%s: ep3rcvr_flush_flushing: rcvr %p rxd %p nodeId %d - finished\n", -+ rail->Generic.Name, rcvr, rxd, env->NodeId); -+ } -+ -+ UnlockRcvrThread (rcvrRail); /* PCI unlock */ -+ spin_unlock_irqrestore (&rcvr->Lock, flags); -+} -+ -+void -+ep3rcvr_flush_callback (EP_RCVR *rcvr, EP3_RCVR_RAIL *rcvrRail) -+{ -+ EP3_RAIL *rail = RCVR_TO_RAIL(rcvrRail); -+ -+ switch (rail->Generic.CallbackStep) -+ { -+ case EP_CB_FLUSH_FILTERING: -+ ep3rcvr_flush_filtering (rcvr, rcvrRail); -+ break; -+ -+ case EP_CB_FLUSH_FLUSHING: -+ ep3rcvr_flush_flushing (rcvr, rcvrRail); -+ break; -+ } -+} -+ -+void -+ep3rcvr_failover_callback (EP_RCVR *rcvr, EP3_RCVR_RAIL *rcvrRail) -+{ -+ EP_COMMS_SUBSYS *subsys = rcvr->Subsys; -+ EP3_RAIL *rail = RCVR_TO_RAIL (rcvrRail); -+ ELAN3_DEV *dev = rail->Device; -+ struct list_head *el, *nel; -+ unsigned long flags; -+#ifdef SUPPORT_RAIL_FAILOVER -+ EP_SYS *sys = subsys->Subsys.Sys; -+#endif -+ -+ spin_lock_irqsave (&rcvr->Lock, flags); -+ LockRcvrThread (rcvrRail); /* PCI lock */ -+ -+ list_for_each_safe (el, nel, &rcvr->ActiveDescList) { -+ EP_RXD *rxd = list_entry (el, EP_RXD, Link); -+ EP3_RXD_RAIL *rxdRail = (EP3_RXD_RAIL *) rxd->RxdRail; -+ EP_ENVELOPE *env = &rxd->RxdMain->Envelope; -+ EP_NODE_RAIL *nodeRail = &rail->Generic.Nodes[env->NodeId]; -+#ifdef SUPPORT_RAIL_FAILOVER -+ EP_MANAGER_MSG_BODY msgBody; -+ EP_NODE *node = &sys->Nodes[env->NodeId]; -+#endif -+ -+ if (rxd->RxdMain->Len == EP_RXD_PENDING || !RXD_BOUND2RAIL(rxdRail,rcvrRail) || nodeRail->State != EP_NODE_PASSIVATED) -+ continue; -+ -+ EPRINTF6 (DBG_FAILOVER, "%s: ep3rcvr_failover_callback: rcvr %p rxd %p elan node %d state %x.%x\n", rail->Generic.Name, rcvr, rxd, env->NodeId, -+ rxdRail->RxdMain->DataEvent, rxdRail->RxdMain->DoneEvent); -+ -+ switch (rxd->State) -+ { -+ case EP_RXD_FREE: -+ printk ("ep4rcvr_failover_callback: rxd state is free but bound to a fail\n"); -+ break; -+ -+ case EP_RXD_RECEIVE_ACTIVE: -+ if (rxdRail->RxdMain->DataEvent == EP3_EVENT_ACTIVE) /* incomplete message receive */ -+ { -+ EPRINTF4 (DBG_FAILOVER, "%s: ep3rcvr_failover_callback: rcvr %p rxd %p nodeId %d - unbind\n", rail->Generic.Name, rcvr, rxd, env->NodeId); -+ -+ UnbindRxdFromRail (rxd, rxdRail); -+ -+ /* clear the done flags - so that it will be ignored if an event interrupt is generated */ -+ rxdRail->RxdMain->DataEvent = EP3_EVENT_PRIVATE; -+ rxdRail->RxdMain->DoneEvent = EP3_EVENT_PRIVATE; -+ -+ /* clear the data event - the done event should already be zero */ -+ elan3_sdram_writel (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count), 0); /* PCI write */ -+ -+ FreeRxdRail (rcvrRail, rxdRail); -+ -+ /* epcomms thread will requeue on different rail */ -+ ep_kthread_schedule (&subsys->Thread, lbolt); -+ continue; -+ } -+ break; -+ -+ default: -+ EP_ASSERT (&rail->Generic, EP_IS_RPC(env->Attr)); -+ -+#ifdef SUPPORT_RAIL_FAILOVER -+ if (!EP3_EVENT_FIRED (rxdRail->DoneCookie, rxdRail->RxdMain->DoneEvent) && !(EP_IS_NO_FAILOVER(env->Attr))) /* incomplete RPC, which can be failed over */ -+ { -+ EPRINTF7 (DBG_FAILOVER, "%s: ep3rcvr_failover_callback: rxd %p State %x.%x Xid %llxx MsgXid %llxx nodeId %d - failover\n", -+ rail->Generic.Name, rxd, rxdRail->RxdMain->DataEvent, rxdRail->RxdMain->DoneEvent, -+ (long long) env->Xid.Unique, (long long) rxd->MsgXid.Unique, env->NodeId); -+ -+ if (EP_XID_INVALID(rxd->MsgXid)) -+ rxd->MsgXid = ep_xid_cache_alloc (sys, &rcvr->XidCache); -+ -+ /* XXXX maybe only send the message if the node failover retry is now ? */ -+ msgBody.Failover.Xid = env->Xid; -+ msgBody.Failover.Railmask = node->ConnectedRails; -+ -+ ep_send_message (&rail->Generic, env->NodeId, EP_MANAGER_MSG_TYPE_FAILOVER_REQUEST, rxd->MsgXid, &msgBody); -+ -+ nodeRail->MessageState |= EP_NODE_ACTIVE_MESSAGES; -+ continue; -+ } -+#endif -+ break; -+ -+ case EP_RXD_BEEN_ABORTED: -+ printk ("ep3rcvr_failover_callback: rxd state is aborted but bound to a rail\n"); -+ break; -+ } -+ -+ EPRINTF3 (DBG_FAILOVER, "%s: ep3rcvr_failover_callback: rxd %p nodeId %d - finished\n", rail->Generic.Name, rxd, env->NodeId); -+ } -+ -+ UnlockRcvrThread (rcvrRail); /* PCI unlock */ -+ spin_unlock_irqrestore (&rcvr->Lock, flags); -+} -+ -+void -+ep3rcvr_disconnect_callback (EP_RCVR *rcvr, EP3_RCVR_RAIL *rcvrRail) -+{ -+ EP3_RAIL *rail = RCVR_TO_RAIL (rcvrRail); -+ ELAN3_DEV *dev = rail->Device; -+ struct list_head *el, *nel; -+ struct list_head rxdList; -+ unsigned long flags; -+ -+ INIT_LIST_HEAD (&rxdList); -+ -+ spin_lock_irqsave (&rcvr->Lock, flags); -+ LockRcvrThread (rcvrRail); /* PCI lock */ -+ -+ list_for_each_safe (el, nel, &rcvr->ActiveDescList) { -+ EP_RXD *rxd = list_entry (el, EP_RXD, Link); -+ EP3_RXD_RAIL *rxdRail = (EP3_RXD_RAIL *) rxd->RxdRail; -+ EP_ENVELOPE *env = &rxd->RxdMain->Envelope; -+ EP_NODE_RAIL *nodeRail = &rail->Generic.Nodes[env->NodeId]; -+ -+ if (rxd->RxdMain->Len == EP_RXD_PENDING || !RXD_BOUND2RAIL(rxdRail,rcvrRail) || nodeRail->State != EP_NODE_DISCONNECTING) -+ continue; -+ -+ EPRINTF4 (DBG_DISCON, "%s: ep3rcvr_disconnect_callback: rcvr %p rxd %p elan node %d\n", rail->Generic.Name, rcvr, rxd, env->NodeId); -+ -+ switch (rxd->State) -+ { -+ case EP_RXD_FREE: -+ printk ("ep3rcvr_disconnect_callback: rxd state is free but bound to a fail\n"); -+ break; -+ -+ case EP_RXD_RECEIVE_ACTIVE: -+ if (rxdRail->RxdMain->DataEvent == EP3_EVENT_ACTIVE) /* incomplete message receive */ -+ { -+ EPRINTF4 (DBG_RCVR, "%s: ep3rcvr_disconnect_callback: rcvr %p rxd %p nodeId %d - unbind\n", rail->Generic.Name, rcvr, rxd, env->NodeId); -+ -+ UnbindRxdFromRail (rxd, rxdRail); -+ -+ /* clear the done flags - so that it will be ignored if an event interrupt is generated */ -+ rxdRail->RxdMain->DataEvent = EP3_EVENT_PRIVATE; -+ rxdRail->RxdMain->DoneEvent = EP3_EVENT_PRIVATE; -+ -+ /* clear the data event - the done event should already be zero */ -+ elan3_sdram_writel (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count), 0); /* PCI write */ -+ -+ FreeRxdRail (rcvrRail, rxdRail); -+ -+ /* remark it as pending if it was partially received */ -+ rxd->RxdMain->Len = EP_RXD_PENDING; -+ -+ /* epcomms thread will requeue on different rail */ -+ ep_kthread_schedule (&rcvr->Subsys->Thread, lbolt); -+ continue; -+ } -+ break; -+ -+ default: -+ EP_ASSERT (&rail->Generic, EP_IS_RPC(env->Attr)); -+ -+ if (!EP3_EVENT_FIRED (rxdRail->DoneCookie, rxdRail->RxdMain->DoneEvent)) /* incomplete RPC */ -+ { -+ EPRINTF4 (DBG_RCVR, "%s: ep3rcvr_disconnect_callback: rcvr %p rxd %p nodeId %d - not able to failover\n", -+ rail->Generic.Name, rcvr, rxd, env->NodeId); -+ -+ /* Mark as no longer active */ -+ rxdRail->RxdMain->DataEvent = EP3_EVENT_PRIVATE; -+ rxdRail->RxdMain->DoneEvent = EP3_EVENT_PRIVATE; -+ -+ elan3_sdram_writel (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count), 0); /* PCI write */ -+ elan3_sdram_writel (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count), 0); /* PCI write */ -+ -+ UnbindRxdFromRail (rxd, rxdRail); -+ FreeRxdRail (rcvrRail, rxdRail); -+ -+ /* Ignore any previous NMD/failover responses */ -+ EP_INVALIDATE_XID (rxd->MsgXid); -+ -+ /* Remove from active list */ -+ list_del (&rxd->Link); -+ -+ if (rxd->State == EP_RXD_RPC_IN_PROGRESS) /* ownder by user .... */ -+ rxd->State = EP_RXD_BEEN_ABORTED; -+ else /* queue for completion */ -+ { -+ rxd->RxdMain->Len = EP_CONN_RESET; /* ensure ep_rxd_status() fails */ -+ list_add_tail (&rxd->Link, &rxdList); -+ } -+ continue; -+ } -+ break; -+ -+ case EP_RXD_BEEN_ABORTED: -+ printk ("ep4rcvr_failover_callback: rxd state is aborted but bound to a fail\n"); -+ break; -+ } -+ -+ EPRINTF4 (DBG_RCVR, "%s: ep3rcvr_disconnect_callback: rcvr %p rxd %p nodeId %d - finished\n", -+ rail->Generic.Name, rcvr, rxd, env->NodeId); -+ } -+ -+ UnlockRcvrThread (rcvrRail); /* PCI unlock */ -+ spin_unlock_irqrestore (&rcvr->Lock, flags); -+ -+ while (! list_empty (&rxdList)) -+ { -+ EP_RXD *rxd = list_entry (rxdList.next, EP_RXD, Link); -+ -+ list_del (&rxd->Link); -+ -+ rxd->Handler (rxd); -+ } -+} -+ -+void -+ep3rcvr_display_rxd (DisplayInfo *di, EP_RXD_RAIL *r) -+{ -+ EP3_RXD_RAIL *rxdRail = (EP3_RXD_RAIL *) r; -+ sdramaddr_t rxdElan = rxdRail->RxdElan; -+ EP3_RAIL *rail = RCVR_TO_RAIL (rxdRail->Generic.RcvrRail); -+ ELAN3_DEV *dev = rail->Device; -+ -+ (di->func)(di->arg, " ChainEvent=%x.%x %x.%x\n", -+ elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[0].ev_Count)), -+ elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[0].ev_Type)), -+ elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[1].ev_Count)), -+ elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[1].ev_Type))); -+ (di->func)(di->arg, " ChainEvent=%x.%x %x.%x\n", -+ elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[2].ev_Count)), -+ elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[2].ev_Type)), -+ elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[3].ev_Count)), -+ elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[3].ev_Type))); -+ (di->func)(di->arg, " DataEvent=%x.%x DoneEvent=%x.%x\n", -+ elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count)), -+ elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Type)), -+ elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count)), -+ elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Type))); -+ (di->func)(di->arg, " Data=%x Len=%x\n", -+ elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, Data.nmd_addr)), -+ elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, Data.nmd_len))); -+} -+ -+void -+ep3rcvr_display_rcvr (DisplayInfo *di, EP_RCVR_RAIL *r) -+{ -+ EP3_RCVR_RAIL *rcvrRail = (EP3_RCVR_RAIL *) r; -+ EP3_COMMS_RAIL *commsRail = (EP3_COMMS_RAIL *) rcvrRail->Generic.CommsRail; -+ EP3_RAIL *rail = RCVR_TO_RAIL (rcvrRail); -+ ELAN3_DEV *dev = rail->Device; -+ sdramaddr_t queue = commsRail->QueueDescs + rcvrRail->Generic.Rcvr->Service * sizeof (EP3_InputQueue); -+ E3_Addr qbase = elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_base)); -+ E3_Addr qtop = elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_top)); -+ E3_uint32 qsize = elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_size)); -+ int freeCount = 0; -+ int blockCount = 0; -+ unsigned long flags; -+ struct list_head *el; -+ -+ spin_lock_irqsave (&rcvrRail->FreeDescLock, flags); -+ list_for_each (el, &rcvrRail->FreeDescList) -+ freeCount++; -+ list_for_each (el, &rcvrRail->DescBlockList) -+ blockCount++; -+ spin_unlock_irqrestore (&rcvrRail->FreeDescLock, flags); -+ -+ (di->func)(di->arg, " Rail %d FreeDesc %d (%d) Total %d Blocks %d %s\n", -+ rail->Generic.Number, rcvrRail->FreeDescCount, freeCount, rcvrRail->TotalDescCount, blockCount, -+ rcvrRail->ThreadWaiting ? "ThreadWaiting" : ""); -+ -+ (di->func)(di->arg, " InputQueue state=%x bptr=%x size=%x top=%x base=%x fptr=%x\n", -+ elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_state)), -+ elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_bptr)), -+ elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_size)), -+ elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_top)), -+ elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_base)), -+ elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_fptr))); -+ (di->func)(di->arg, " event=%x.%x [%x.%x] wevent=%x.%x\n", -+ elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_event.ev_Type)), -+ elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_event.ev_Count)), -+ elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_event.ev_Source)), -+ elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_event.ev_Dest)), -+ elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_wevent)), -+ elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_wcount))); -+ -+ LockRcvrThread (rcvrRail); -+ { -+ E3_Addr nfptr = elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_fptr)); -+ EP_ENVELOPE env; -+ -+ if (nfptr == qtop) -+ nfptr = qbase; -+ else -+ nfptr += qsize; -+ -+ while (nfptr != elan3_sdram_readl (dev, queue + offsetof (E3_Queue, q_bptr))) -+ { -+ elan3_sdram_copyl_from_sdram (dev, rcvrRail->InputQueueBase + (nfptr - rcvrRail->InputQueueAddr), -+ &env, sizeof (EP_ENVELOPE)); -+ -+ (di->func)(di->arg, " ENVELOPE Version=%x Attr=%x Xid=%08x.%08x.%016llx\n", -+ env.Version, env.Attr, env.Xid.Generation, env.Xid.Handle, (long long) env.Xid.Unique); -+ (di->func)(di->arg, " NodeId=%x Range=%x TxdRail=%x TxdMain=%x.%x.%x\n", -+ env.NodeId, env.Range, env.TxdRail, env.TxdMain.nmd_addr, -+ env.TxdMain.nmd_len, env.TxdMain.nmd_attr); -+ -+ -+ if (nfptr == qtop) -+ nfptr = qbase; -+ else -+ nfptr += qsize; -+ } -+ } -+ UnlockRcvrThread (rcvrRail); -+} -+ -+void -+ep3rcvr_fillout_rail_stats(EP_RCVR_RAIL *rcvr_rail, char *str) { -+ /* no stats here yet */ -+ /* EP3_RCVR_RAIL * ep4rcvr_rail = (EP3_RCVR_RAIL *) rcvr_rail; */ -+} -+ -diff -urN clean/drivers/net/qsnet/ep/epcommsRx_elan4.c linux-2.6.9/drivers/net/qsnet/ep/epcommsRx_elan4.c ---- clean/drivers/net/qsnet/ep/epcommsRx_elan4.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/epcommsRx_elan4.c 2005-07-20 07:35:37.000000000 -0400 -@@ -0,0 +1,1765 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: epcommsRx_elan4.c,v 1.35.2.1 2005/07/20 11:35:37 mike Exp $" -+/* $Source: /cvs/master/quadrics/epmod/epcommsRx_elan4.c,v $ */ -+ -+#include -+ -+#include -+#include -+#include -+ -+#include "debug.h" -+#include "kcomm_vp.h" -+#include "kcomm_elan4.h" -+#include "epcomms_elan4.h" -+ -+#include -+ -+#define RCVR_TO_COMMS(rcvrRail) ((EP4_COMMS_RAIL *) ((EP_RCVR_RAIL *) rcvrRail)->CommsRail) -+#define RCVR_TO_RAIL(rcvrRail) ((EP4_RAIL *) ((EP_RCVR_RAIL *) rcvrRail)->CommsRail->Rail) -+#define RCVR_TO_DEV(rcvrRail) (RCVR_TO_RAIL(rcvrRail)->r_ctxt.ctxt_dev) -+#define RCVR_TO_SUBSYS(rcvrRail) (((EP_RCVR_RAIL *) rcvrRail)->Rcvr->Subsys) -+ -+#define RXD_TO_RCVR(txdRail) ((EP4_RCVR_RAIL *) rxdRail->rxd_generic.RcvrRail) -+#define RXD_TO_RAIL(txdRail) RCVR_TO_RAIL(RXD_TO_RCVR(rxdRail)) -+ -+static void rxd_interrupt (EP4_RAIL *rail, void *arg); -+ -+static __inline__ void -+__ep4_rxd_assert_free (EP4_RXD_RAIL *rxdRail, const char *file, const int line) -+{ -+ EP4_RCVR_RAIL *rcvrRail = RXD_TO_RCVR(rxdRail); -+ ELAN4_DEV *dev = RCVR_TO_DEV(rcvrRail); -+ register int i, failed = 0; -+ -+ for (i = 0; i <= EP_MAXFRAG; i++) -+ if (((rxdRail)->rxd_main->rxd_sent[i] != EP4_STATE_FREE)) -+ failed |= (1 << i); -+ -+ if (((rxdRail)->rxd_main->rxd_failed != EP4_STATE_FREE)) -+ failed |= (1 << 5); -+ if (((rxdRail)->rxd_main->rxd_done != EP4_STATE_FREE)) -+ failed |= (1 << 6); -+ -+ if (sdram_assert) -+ { -+ if (((elan4_sdram_readq (RXD_TO_RAIL(rxdRail)->r_ctxt.ctxt_dev, (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_start.ev_CountAndType)) >> 32) != 0)) -+ failed |= (1 << 7); -+ for (i = 0; i < EP_MAXFRAG; i++) -+ if (((elan4_sdram_readq (dev, (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[i].ev_CountAndType)) >> 32) != 0)) -+ failed |= (1 << (8 + i)); -+ if (((elan4_sdram_readq (dev, (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CountAndType)) >> 32) != 0)) -+ failed |= (1 << 12); -+ if (((int)(elan4_sdram_readq (dev, (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed.ev_CountAndType)) >> 32) != -32)) -+ failed |= (1 << 13); -+ } -+ -+ if (failed) -+ { -+ printk ("__ep4_rxd_assert_free: failed=%x rxdRail=%p %s - %d\n", failed, rxdRail, file, line); -+ -+ ep_debugf (DBG_DEBUG, "__ep4_rxd_assert_free: failed=%x rxdRail=%p %s - %d\n", failed, rxdRail, file, line); -+ ep4rcvr_display_rxd (&di_ep_debug, &rxdRail->rxd_generic); -+ -+ for (i = 0; i <= EP_MAXFRAG; i++) -+ (rxdRail)->rxd_main->rxd_sent[i] = EP4_STATE_FREE; -+ -+ (rxdRail)->rxd_main->rxd_failed = EP4_STATE_FREE; -+ (rxdRail)->rxd_main->rxd_done = EP4_STATE_FREE; -+ -+ if (sdram_assert) -+ { -+ elan4_sdram_writew (RXD_TO_RAIL(rxdRail)->r_ctxt.ctxt_dev, -+ (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_start.ev_CountAndType) + 4, 0); -+ -+ for (i = 0; i < EP_MAXFRAG; i++) -+ elan4_sdram_writew (dev, (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[i].ev_CountAndType) + 4, 0); -+ elan4_sdram_writew (dev, (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CountAndType) + 4, 0); -+ elan4_sdram_writew (dev, (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed.ev_CountAndType) + 4, -32); -+ } -+ EP_ASSFAIL (RCVR_TO_RAIL(rcvrRail), "__ep4_rxd_assert_free"); -+ } -+} -+ -+static __inline__ void -+__ep4_rxd_assert_pending(EP4_RXD_RAIL *rxdRail, const char *file, const int line) -+{ -+ EP4_RCVR_RAIL *rcvrRail = RXD_TO_RCVR(rcvrRail); -+ register int failed = 0; -+ -+ failed |= ((rxdRail)->rxd_main->rxd_done != EP4_STATE_ACTIVE); -+ -+ if (failed) -+ { -+ printk ("__ep4_rxd_assert_pending: %s - %d\n", file, line); -+ -+ ep_debugf (DBG_DEBUG, "__ep4_rxd_assert_pending: %s - %d\n", file, line); -+ ep4rcvr_display_rxd (&di_ep_debug, &rxdRail->rxd_generic); -+ -+ (rxdRail)->rxd_main->rxd_done = EP4_STATE_ACTIVE; -+ -+ EP_ASSFAIL (RCVR_TO_RAIL(rcvrRail), "__ep4_rxd_assert_pending"); -+ } -+} -+ -+static __inline__ void -+__ep4_rxd_assert_private(EP4_RXD_RAIL *rxdRail, const char *file, const int line) -+{ -+ EP4_RCVR_RAIL *rcvrRail = RXD_TO_RCVR(rxdRail); -+ ELAN4_DEV *dev = RCVR_TO_DEV(rcvrRail); -+ register int failed = 0; -+ -+ if (((rxdRail)->rxd_main->rxd_failed != EP4_STATE_ACTIVE)) failed |= (1 << 0); -+ if (((rxdRail)->rxd_main->rxd_done != EP4_STATE_PRIVATE)) failed |= (1 << 1); -+ -+ if (sdram_assert) -+ { -+ if (((elan4_sdram_readq (dev, (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CountAndType)) >> 32) != 0)) failed |= (1 << 2); -+ if (((int) (elan4_sdram_readq (dev, (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed.ev_CountAndType)) >> 32) != -32)) failed |= (1 << 3); -+ } -+ -+ if (failed) -+ { -+ printk ("__ep4_rxd_assert_private: failed=%x rxdRail=%p %s - %d\n", failed, rxdRail, file, line); -+ -+ ep_debugf (DBG_DEBUG, "__ep4_rxd_assert_private: failed=%x rxdRail=%p %s - %d\n", failed, rxdRail, file, line); -+ ep4rcvr_display_rxd (&di_ep_debug, &rxdRail->rxd_generic); -+ -+ (rxdRail)->rxd_main->rxd_failed = EP4_STATE_ACTIVE; -+ (rxdRail)->rxd_main->rxd_done = EP4_STATE_PRIVATE; -+ -+ if (sdram_assert) -+ { -+ elan4_sdram_writew (dev, (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CountAndType) + 4, 0); -+ elan4_sdram_writew (dev, (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed.ev_CountAndType) + 4, -32); -+ } -+ -+ EP_ASSFAIL (RCVR_TO_RAIL(rcvrRail), "__ep4_rxd_assert_private"); -+ } -+} -+ -+static __inline__ void -+__ep4_rxd_private_to_free (EP4_RXD_RAIL *rxdRail) -+{ -+ register int i; -+ -+ for (i = 0; i <= EP_MAXFRAG; i++) -+ rxdRail->rxd_main->rxd_sent[i] = EP4_STATE_FREE; -+ -+ rxdRail->rxd_main->rxd_failed = EP4_STATE_FREE; -+ rxdRail->rxd_main->rxd_done = EP4_STATE_FREE; -+} -+ -+static __inline__ void -+__ep4_rxd_force_private (EP4_RXD_RAIL *rxdRail) -+{ -+ EP4_RAIL *rail = RXD_TO_RAIL(rxdRail); -+ ELAN4_DEV *dev = rail->r_ctxt.ctxt_dev; -+ -+ (rxdRail)->rxd_main->rxd_failed = EP4_STATE_ACTIVE; -+ (rxdRail)->rxd_main->rxd_done = EP4_STATE_PRIVATE; -+ -+ if (sdram_assert) -+ elan4_sdram_writeq (dev, (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CountAndType), -+ E4_EVENT_INIT_VALUE(0, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS)); -+} -+ -+#define EP4_RXD_ASSERT_FREE(rxdRail) __ep4_rxd_assert_free(rxdRail, __FILE__, __LINE__) -+#define EP4_RXD_ASSERT_PENDING(rxdRail) __ep4_rxd_assert_pending(rxdRail, __FILE__, __LINE__) -+#define EP4_RXD_ASSERT_PRIVATE(rxdRail) __ep4_rxd_assert_private(rxdRail, __FILE__, __LINE__) -+#define EP4_RXD_PRIVATE_TO_FREE(rxdRail) __ep4_rxd_private_to_free(rxdRail) -+#define EP4_RXD_FORCE_PRIVATE(rxdRail) __ep4_rxd_force_private(rxdRail) -+ -+static int -+alloc_rxd_block (EP4_RCVR_RAIL *rcvrRail) -+{ -+ EP4_RAIL *rail = RCVR_TO_RAIL (rcvrRail); -+ ELAN4_DEV *dev = rail->r_ctxt.ctxt_dev; -+ EP4_RXD_RAIL_BLOCK *blk; -+ EP4_RXD_RAIL_MAIN *rxdMain; -+ EP_ADDR rxdMainAddr; -+ sdramaddr_t rxdElan; -+ EP_ADDR rxdElanAddr; -+ EP4_RXD_RAIL *rxdRail; -+ unsigned long flags; -+ int i, j; -+ -+ KMEM_ZALLOC (blk, EP4_RXD_RAIL_BLOCK *, sizeof (EP4_RXD_RAIL_BLOCK), 1); -+ -+ if (blk == NULL) -+ return 0; -+ -+ if ((rxdElan = ep_alloc_elan (&rail->r_generic, EP4_RXD_RAIL_ELAN_SIZE * EP4_NUM_RXD_PER_BLOCK, 0, &rxdElanAddr)) == (sdramaddr_t) 0) -+ { -+ KMEM_FREE (blk, sizeof (EP4_RXD_RAIL_BLOCK)); -+ return 0; -+ } -+ -+ if ((rxdMain = ep_alloc_main (&rail->r_generic, EP4_RXD_RAIL_MAIN_SIZE * EP4_NUM_RXD_PER_BLOCK, 0, &rxdMainAddr)) == (EP4_RXD_RAIL_MAIN *) NULL) -+ { -+ ep_free_elan (&rail->r_generic, rxdElanAddr, EP4_RXD_RAIL_ELAN_SIZE * EP4_NUM_RXD_PER_BLOCK); -+ KMEM_FREE (blk, sizeof (EP4_RXD_RAIL_BLOCK)); -+ return 0; -+ } -+ -+ if (ep4_reserve_dma_retries (rail, EP4_NUM_RXD_PER_BLOCK, 0) != 0) -+ { -+ ep_free_main (&rail->r_generic, blk->blk_rxds[0].rxd_main_addr, EP4_RXD_RAIL_MAIN_SIZE * EP4_NUM_RXD_PER_BLOCK); -+ ep_free_elan (&rail->r_generic, rxdElanAddr, EP4_RXD_RAIL_ELAN_SIZE * EP4_NUM_RXD_PER_BLOCK); -+ KMEM_FREE (blk, sizeof (EP4_RXD_RAIL_BLOCK)); -+ -+ return 0; -+ } -+ -+ for (rxdRail = &blk->blk_rxds[0], i = 0; i < EP4_NUM_RXD_PER_BLOCK; i++, rxdRail++) -+ { -+ rxdRail->rxd_generic.RcvrRail = &rcvrRail->rcvr_generic; -+ rxdRail->rxd_elan = rxdElan; -+ rxdRail->rxd_elan_addr = rxdElanAddr; -+ rxdRail->rxd_main = rxdMain; -+ rxdRail->rxd_main_addr = rxdMainAddr; -+ -+ /* reserve 128 bytes of "event" cq space for the chained STEN packets */ -+ if ((rxdRail->rxd_ecq = ep4_get_ecq (rail, EP4_ECQ_EVENT, EP4_RXD_STEN_CMD_NDWORDS)) == NULL) -+ goto failed; -+ -+ /* allocate a single word of "setevent" command space */ -+ if ((rxdRail->rxd_scq = ep4_get_ecq (rail, EP4_ECQ_SINGLE, 1)) == NULL) -+ { -+ ep4_put_ecq (rail, rxdRail->rxd_ecq, EP4_RXD_STEN_CMD_NDWORDS); -+ goto failed; -+ } -+ -+ /* initialise the completion events */ -+ for (j = 0; j <= EP_MAXFRAG; j++) -+ rxdMain->rxd_sent[i] = EP4_STATE_FREE; -+ -+ rxdMain->rxd_done = EP4_STATE_FREE; -+ rxdMain->rxd_failed = EP4_STATE_FREE; -+ -+ /* initialise the scq for the thread */ -+ rxdMain->rxd_scq = rxdRail->rxd_scq->ecq_addr; -+ -+ /* initialise the "start" event to copy the first STEN packet into the command queue */ -+ elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_start.ev_CountAndType), -+ E4_EVENT_INIT_VALUE(0, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_RXD_START_CMD_NDWORDS)); -+ elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_start.ev_CopySource), -+ rxdElanAddr + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[0])); -+ elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_start.ev_CopyDest), -+ rxdRail->rxd_ecq->ecq_addr); -+ -+ /* initialise the "chain" events to copy the next STEN packet into the command queue */ -+ for (j = 0; j < EP_MAXFRAG; j++) -+ { -+ elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[j].ev_CountAndType), -+ E4_EVENT_INIT_VALUE(0, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_RXD_STEN_CMD_NDWORDS)); -+ elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[j].ev_CopySource), -+ rxdElanAddr + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[j+1])); -+ elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[j].ev_CopyDest), -+ rxdRail->rxd_ecq->ecq_addr); -+ } -+ -+ /* initialise the portions of the sten packets which don't change */ -+ for (j = 0; j < EP_MAXFRAG+1; j++) -+ { -+ if (j < EP_MAXFRAG) -+ elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[j].c_dma_dstEvent), -+ rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[j])); -+ else -+ elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[j].c_dma_dstEvent), -+ rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done)); -+ -+ elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[j].c_ok_guard), -+ GUARD_CMD | GUARD_CHANNEL (1) | GUARD_TEST(0, PACK_OK) | GUARD_RESET (EP4_RXD_STEN_RETRYCOUNT)); -+ elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[j].c_ok_write_cmd), -+ WRITE_DWORD_CMD | (rxdMainAddr + offsetof (EP4_RXD_RAIL_MAIN, rxd_sent[j]))); -+ elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[j].c_ok_write_value), -+ EP4_STATE_FINISHED); -+ elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[j].c_fail_guard), -+ GUARD_CMD | GUARD_CHANNEL (1) | GUARD_TEST(0, RESTART_COUNT_ZERO) | GUARD_RESET (EP4_RXD_STEN_RETRYCOUNT)); -+ elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[j].c_fail_setevent), -+ SET_EVENT_CMD | (rxdElanAddr + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed))); -+ elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[j].c_nop_cmd), -+ NOP_CMD); -+ } -+ -+ /* register a main interrupt cookie */ -+ ep4_register_intcookie (rail, &rxdRail->rxd_intcookie, rxdElanAddr + offsetof (EP4_RXD_RAIL_ELAN, rxd_done), -+ rxd_interrupt, rxdRail); -+ -+ /* initialise the command stream for the done event */ -+ elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done_cmd.c_write_cmd), -+ WRITE_DWORD_CMD | (rxdMainAddr + offsetof (EP4_RXD_RAIL_MAIN, rxd_done))); -+ elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done_cmd.c_write_value), -+ EP4_STATE_FINISHED); -+ elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done_cmd.c_intr_cmd), -+ INTERRUPT_CMD | (rxdRail->rxd_intcookie.int_val << E4_MAIN_INT_SHIFT)); -+ -+ /* initialise the command stream for the fail event */ -+ elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed_cmd.c_write_cmd), -+ WRITE_DWORD_CMD | (rxdMainAddr + offsetof (EP4_RXD_RAIL_MAIN, rxd_failed))); -+ elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed_cmd.c_write_value), -+ EP4_STATE_FAILED); -+ elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed_cmd.c_intr_cmd), -+ INTERRUPT_CMD | (rxdRail->rxd_intcookie.int_val << E4_MAIN_INT_SHIFT)); -+ -+ /* initialise the done and fail events */ -+ elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CountAndType), -+ E4_EVENT_INIT_VALUE(0, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS)); -+ elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CopySource), -+ rxdElanAddr + offsetof (EP4_RXD_RAIL_ELAN, rxd_done_cmd)); -+ elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CopyDest), -+ rxdRail->rxd_ecq->ecq_addr); -+ -+ elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed.ev_CountAndType), -+ E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS)); -+ elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed.ev_CopySource), -+ rxdElanAddr + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed_cmd)); -+ elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed.ev_CopyDest), -+ rxdRail->rxd_ecq->ecq_addr); -+ -+ /* initialise the pointer to the main memory portion */ -+ elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_main), -+ rxdMainAddr); -+ -+ /* move onto next descriptor */ -+ rxdElan += EP4_RXD_RAIL_ELAN_SIZE; -+ rxdElanAddr += EP4_RXD_RAIL_ELAN_SIZE; -+ rxdMain = (EP4_RXD_RAIL_MAIN *) ((unsigned long) rxdMain + EP4_RXD_RAIL_MAIN_SIZE); -+ rxdMainAddr += EP4_RXD_RAIL_MAIN_SIZE; -+ } -+ -+ spin_lock_irqsave (&rcvrRail->rcvr_freelock, flags); -+ -+ list_add (&blk->blk_link, &rcvrRail->rcvr_blocklist); -+ -+ rcvrRail->rcvr_totalcount += EP4_NUM_RXD_PER_BLOCK; -+ rcvrRail->rcvr_freecount += EP4_NUM_RXD_PER_BLOCK; -+ -+ for (i = 0; i < EP4_NUM_RXD_PER_BLOCK; i++) -+ list_add (&blk->blk_rxds[i].rxd_generic.Link, &rcvrRail->rcvr_freelist); -+ -+ spin_unlock_irqrestore (&rcvrRail->rcvr_freelock, flags); -+ -+ return 1; -+ -+ failed: -+ while (--i >= 0) -+ { -+ rxdRail--; -+ -+ ep4_put_ecq (rail, rxdRail->rxd_ecq, EP4_RXD_STEN_CMD_NDWORDS); -+ ep4_put_ecq (rail, rxdRail->rxd_scq, 1); -+ -+ ep4_deregister_intcookie (rail, &rxdRail->rxd_intcookie); -+ } -+ -+ ep4_release_dma_retries (rail, EP4_NUM_RXD_PER_BLOCK); -+ -+ ep_free_main (&rail->r_generic, blk->blk_rxds[0].rxd_main_addr, EP4_RXD_RAIL_MAIN_SIZE * EP4_NUM_RXD_PER_BLOCK); -+ ep_free_elan (&rail->r_generic, rxdElanAddr, EP4_RXD_RAIL_ELAN_SIZE * EP4_NUM_RXD_PER_BLOCK); -+ KMEM_FREE (blk, sizeof (EP4_RXD_RAIL_BLOCK)); -+ -+ return 0; -+} -+ -+ -+static void -+free_rxd_block (EP4_RCVR_RAIL *rcvrRail, EP4_RXD_RAIL_BLOCK *blk) -+{ -+ EP4_RAIL *rail = RCVR_TO_RAIL (rcvrRail); -+ EP4_RXD_RAIL *rxdRail; -+ unsigned long flags; -+ int i; -+ -+ spin_lock_irqsave (&rcvrRail->rcvr_freelock, flags); -+ -+ list_del (&blk->blk_link); -+ -+ rcvrRail->rcvr_totalcount -= EP4_NUM_RXD_PER_BLOCK; -+ -+ for (rxdRail = &blk->blk_rxds[0], i = 0; i < EP4_NUM_RXD_PER_BLOCK; i++, rxdRail++) -+ { -+ rcvrRail->rcvr_freecount--; -+ -+ ep4_put_ecq (rail, rxdRail->rxd_ecq, EP4_RXD_STEN_CMD_NDWORDS); -+ ep4_put_ecq (rail, rxdRail->rxd_scq, 1); -+ -+ ep4_deregister_intcookie (rail, &rxdRail->rxd_intcookie); -+ -+ list_del (&rxdRail->rxd_generic.Link); -+ } -+ spin_unlock_irqrestore (&rcvrRail->rcvr_freelock, flags); -+ -+ ep4_release_dma_retries (rail, EP4_NUM_RXD_PER_BLOCK); -+ -+ ep_free_main (&rail->r_generic, blk->blk_rxds[0].rxd_main_addr, EP4_RXD_RAIL_MAIN_SIZE * EP4_NUM_RXD_PER_BLOCK); -+ ep_free_elan (&rail->r_generic, blk->blk_rxds[0].rxd_elan_addr, EP4_RXD_RAIL_ELAN_SIZE * EP4_NUM_RXD_PER_BLOCK); -+ -+ KMEM_FREE (blk, sizeof (EP4_RXD_RAIL_BLOCK)); -+} -+ -+static EP4_RXD_RAIL * -+get_rxd_rail (EP4_RCVR_RAIL *rcvrRail) -+{ -+ EP_COMMS_SUBSYS *subsys = RCVR_TO_SUBSYS(rcvrRail); -+ EP4_RXD_RAIL *rxdRail; -+ unsigned long flags; -+ int low_on_rxds; -+ -+ spin_lock_irqsave (&rcvrRail->rcvr_freelock, flags); -+ -+ if (list_empty (&rcvrRail->rcvr_freelist)) -+ rxdRail = NULL; -+ else -+ { -+ rxdRail = list_entry (rcvrRail->rcvr_freelist.next, EP4_RXD_RAIL, rxd_generic.Link); -+ -+ EP4_RXD_ASSERT_FREE(rxdRail); -+ -+ list_del (&rxdRail->rxd_generic.Link); -+ -+ rcvrRail->rcvr_freecount--; -+ } -+ /* Wakeup the descriptor primer thread if there's not many left */ -+ low_on_rxds = (rcvrRail->rcvr_freecount < ep_rxd_lowat); -+ -+ spin_unlock_irqrestore (&rcvrRail->rcvr_freelock, flags); -+ -+ if (low_on_rxds) -+ ep_kthread_schedule (&subsys->Thread, lbolt); -+ -+ return (rxdRail); -+} -+ -+static void -+free_rxd_rail (EP4_RCVR_RAIL *rcvrRail, EP4_RXD_RAIL *rxdRail) -+{ -+ unsigned long flags; -+ -+ EP4_RXD_ASSERT_FREE(rxdRail); -+ -+ spin_lock_irqsave (&rcvrRail->rcvr_freelock, flags); -+ -+ list_add (&rxdRail->rxd_generic.Link, &rcvrRail->rcvr_freelist); -+ -+ rcvrRail->rcvr_freecount++; -+ -+ if (rcvrRail->rcvr_freewaiting) -+ { -+ rcvrRail->rcvr_freewaiting--; -+ kcondvar_wakeupall (&rcvrRail->rcvr_freesleep, &rcvrRail->rcvr_freelock); -+ } -+ -+ spin_unlock_irqrestore (&rcvrRail->rcvr_freelock, flags); -+} -+ -+static void -+bind_rxd_rail (EP_RXD *rxd, EP4_RXD_RAIL *rxdRail) -+{ -+ EP4_RAIL *rail = RCVR_TO_RAIL (rxdRail->rxd_generic.RcvrRail); -+ -+ ASSERT (SPINLOCK_HELD (&rxd->Rcvr->Lock)); -+ -+ EPRINTF3 (DBG_RCVR, "%s: bind_rxd_rail: rxd=%p rxdRail=%p\n", rail->r_generic.Name, rxd, rxdRail); -+ -+ elan4_sdram_writeq (rail->r_ctxt.ctxt_dev, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_rxd), rxd->NmdMain.nmd_addr); /* PCI write */ -+ -+ rxd->RxdRail = &rxdRail->rxd_generic; -+ rxdRail->rxd_generic.Rxd = rxd; -+} -+ -+static void -+unbind_rxd_rail (EP_RXD *rxd, EP4_RXD_RAIL *rxdRail) -+{ -+ EP4_RCVR_RAIL *rcvrRail = (EP4_RCVR_RAIL *) rxdRail->rxd_generic.RcvrRail; -+ -+ ASSERT (SPINLOCK_HELD (&rxd->Rcvr->Lock)); -+ ASSERT (rxd->RxdRail == &rxdRail->rxd_generic && rxdRail->rxd_generic.Rxd == rxd); -+ -+ EP4_RXD_ASSERT_PRIVATE (rxdRail); -+ -+ EPRINTF3 (DBG_RCVR, "%s: unbind_rxd_rail: rxd=%p rxdRail=%p\n", RCVR_TO_RAIL(rcvrRail)->r_generic.Name, rxd, rxdRail); -+ -+ rxd->RxdRail = NULL; -+ rxdRail->rxd_generic.Rxd = NULL; -+ -+ if (rcvrRail->rcvr_cleanup_waiting) -+ kcondvar_wakeupall (&rcvrRail->rcvr_cleanup_sleep, &rxd->Rcvr->Lock); -+ rcvrRail->rcvr_cleanup_waiting = 0; -+ -+ EP4_RXD_PRIVATE_TO_FREE (rxdRail); -+} -+ -+ -+static void -+rcvr_stall_interrupt (EP4_RAIL *rail, void *arg) -+{ -+ EP4_RCVR_RAIL *rcvrRail = (EP4_RCVR_RAIL *) arg; -+ EP_RCVR *rcvr = rcvrRail->rcvr_generic.Rcvr; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&rcvr->Lock, flags); -+ -+ EPRINTF1 (DBG_RCVR, "rcvr_stall_interrupt: rcvrRail %p thread halted\n", rcvrRail); -+ -+ rcvrRail->rcvr_thread_halted = 1; -+ -+ kcondvar_wakeupall (&rcvrRail->rcvr_cleanup_sleep, &rcvr->Lock); -+ -+ spin_unlock_irqrestore (&rcvr->Lock, flags); -+} -+ -+static void -+rcvr_stall_haltop (ELAN4_DEV *dev, void *arg) -+{ -+ EP4_RCVR_RAIL *rcvrRail = (EP4_RCVR_RAIL *) arg; -+ EP4_COMMS_RAIL *commsRail = RCVR_TO_COMMS(rcvrRail); -+ EP_RCVR *rcvr = rcvrRail->rcvr_generic.Rcvr; -+ sdramaddr_t qdesc = ((EP4_COMMS_RAIL *) commsRail)->r_descs + (rcvr->Service * EP_QUEUE_DESC_SIZE); -+ E4_uint64 qbptr = elan4_sdram_readq (dev, qdesc + offsetof (E4_InputQueue, q_bptr)); -+ -+ /* Mark the queue as full by writing the fptr */ -+ if (qbptr == (rcvrRail->rcvr_slots_addr + EP_INPUTQ_SIZE * (rcvr->InputQueueEntries-1))) -+ elan4_sdram_writeq (dev, qdesc + offsetof (E4_InputQueue, q_fptr), rcvrRail->rcvr_slots_addr); -+ else -+ elan4_sdram_writeq (dev, qdesc + offsetof (E4_InputQueue, q_fptr), qbptr + EP_INPUTQ_SIZE); -+ -+ /* Notify the thread that it should stall after processing any outstanding envelopes */ -+ elan4_sdram_writeq (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_stall_intcookie), -+ rcvrRail->rcvr_stall_intcookie.int_val); -+ -+ /* Issue a swtevent to the queue event to wake the thread up */ -+ ep4_set_event_cmd (rcvrRail->rcvr_resched, rcvrRail->rcvr_elan_addr + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_qevent)); -+} -+ -+static void -+rxd_interrupt (EP4_RAIL *rail, void *arg) -+{ -+ EP4_RXD_RAIL *rxdRail = (EP4_RXD_RAIL *) arg; -+ EP4_RCVR_RAIL *rcvrRail = (EP4_RCVR_RAIL *) rxdRail->rxd_generic.RcvrRail; -+ EP_RCVR *rcvr = rcvrRail->rcvr_generic.Rcvr; -+ EP4_RXD_RAIL_MAIN *rxdMain = rxdRail->rxd_main; -+ unsigned long delay = 1; -+ EP_RXD *rxd; -+ EP_ENVELOPE *env; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&rcvr->Lock, flags); -+ -+ for (;;) -+ { -+ if (rxdMain->rxd_done == EP4_STATE_FINISHED || rxdMain->rxd_failed == EP4_STATE_FAILED) -+ break; -+ -+ /* The write to rxd_done could be held up in the PCI bridge even though -+ * we've seen the interrupt cookie. Unlike elan3, there is no possibility -+ * of spurious interrupts since we flush the command queues on node -+ * disconnection and the txcallback mechanism */ -+ mb(); -+ -+ if (delay > EP4_EVENT_FIRING_TLIMIT) -+ { -+ spin_unlock_irqrestore (&rcvr->Lock, flags); -+ -+ EP_ASSFAIL (RCVR_TO_RAIL(rcvrRail), "rxd_interrupt - not finished\n"); -+ return; -+ } -+ DELAY(delay); -+ delay <<= 1; -+ } -+ -+ if (rxdMain->rxd_done != EP4_STATE_FINISHED) -+ { -+ EPRINTF8 (DBG_RETRY, "%s: rxd_interrupt: rxdRail %p retry: done=%d failed=%d NodeId=%d XID=%08x.%08x.%016llx\n", -+ rail->r_generic.Name, rxdRail, (int)rxdMain->rxd_done, (int)rxdMain->rxd_failed, rxdRail->rxd_generic.Rxd->RxdMain->Envelope.NodeId, -+ rxdRail->rxd_generic.Rxd->RxdMain->Envelope.Xid.Generation, rxdRail->rxd_generic.Rxd->RxdMain->Envelope.Xid.Handle, -+ (long long)rxdRail->rxd_generic.Rxd->RxdMain->Envelope.Xid.Unique); -+ -+ spin_lock (&rcvrRail->rcvr_retrylock); -+ -+ rxdRail->rxd_retry_time = lbolt + EP_RETRY_LOW_PRI_TIME; /* XXXX backoff ? */ -+ -+ list_add_tail (&rxdRail->rxd_retry_link, &rcvrRail->rcvr_retrylist); -+ -+ ep_kthread_schedule (&rail->r_retry_thread, rxdRail->rxd_retry_time); -+ spin_unlock (&rcvrRail->rcvr_retrylock); -+ -+ spin_unlock_irqrestore (&rcvr->Lock, flags); -+ return; -+ } -+ -+ rxd = rxdRail->rxd_generic.Rxd; -+ env = &rxd->RxdMain->Envelope; -+ -+ /* -+ * Note, since the thread will have sent the remote dma packet before copying -+ * the envelope, we must check that it has completed doing this, we do this -+ * by acquiring the spinlock against the thread which it only drops once it's -+ * completed. -+ */ -+ if (rxd->RxdMain->Len == EP_RXD_PENDING) -+ { -+ EP4_SPINENTER (rail->r_ctxt.ctxt_dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), -+ &rcvrRail->rcvr_main->rcvr_thread_lock); -+ -+ EP4_SPINEXIT (rail->r_ctxt.ctxt_dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), -+ &rcvrRail->rcvr_main->rcvr_thread_lock); -+ -+ EP4_ASSERT (rail, env->Version == EP_ENVELOPE_VERSION && rxd->RxdMain->Len != EP_RXD_PENDING); -+ } -+ -+ EPRINTF8 (DBG_RCVR, "%s: rxd_interrupt: rxd %p finished from %d XID %08x.%08x.%016llx len %d attr %x\n", rail->r_generic.Name, -+ rxd, rxd->RxdMain->Envelope.NodeId, rxd->RxdMain->Envelope.Xid.Generation, rxd->RxdMain->Envelope.Xid.Handle, -+ (long long)rxd->RxdMain->Envelope.Xid.Unique, rxd->RxdMain->Len, rxd->RxdMain->Envelope.Attr); -+ -+ rxdMain->rxd_done = EP4_STATE_PRIVATE; -+ rxd->Data.nmd_attr = EP_RAIL2RAILMASK (rail->r_generic.Number); -+ -+ switch (rxd->State) -+ { -+ case EP_RXD_RECEIVE_ACTIVE: -+ if (rxd->RxdMain->Len >= 0 && EP_IS_RPC(env->Attr)) -+ rxd->State = EP_RXD_RPC_IN_PROGRESS; -+ else -+ { -+ rxd->State = EP_RXD_COMPLETED; -+ -+ /* remove from active list */ -+ list_del (&rxd->Link); -+ -+ unbind_rxd_rail (rxd, rxdRail); -+ free_rxd_rail (rcvrRail, rxdRail); -+ } -+ -+ if (rxd->RxdMain->Len >= 0) { -+ INC_STAT(rcvrRail->rcvr_generic.stats,rx); -+ ADD_STAT(rcvrRail->rcvr_generic.stats,rx_len,rxd->RxdMain->Len); -+ INC_STAT(rail->r_generic.Stats,rx); -+ ADD_STAT(rail->r_generic.Stats,rx_len,rxd->RxdMain->Len); -+ } -+ spin_unlock_irqrestore (&rcvr->Lock, flags); -+ ep_rxd_received (rxd); -+ -+ break; -+ -+ case EP_RXD_PUT_ACTIVE: -+ case EP_RXD_GET_ACTIVE: -+ rxd->State = EP_RXD_RPC_IN_PROGRESS; -+ spin_unlock_irqrestore (&rcvr->Lock, flags); -+ -+ rxd->Handler (rxd); -+ break; -+ -+ case EP_RXD_COMPLETE_ACTIVE: -+ rxd->State = EP_RXD_COMPLETED; -+ -+ /* remove from active list */ -+ list_del (&rxd->Link); -+ -+ unbind_rxd_rail (rxd, rxdRail); -+ free_rxd_rail (rcvrRail, rxdRail); -+ -+ spin_unlock_irqrestore (&rcvr->Lock, flags); -+ -+ rxd->Handler(rxd); -+ break; -+ -+ default: -+ spin_unlock_irqrestore (&rcvr->Lock, flags); -+ -+ printk ("%s: rxd_interrupt: rxd %p in invalid state %d\n", rail->r_generic.Name, rxd, rxd->State); -+ /* NOTREACHED */ -+ } -+} -+ -+static void -+ep4rcvr_flush_filtering (EP_RCVR *rcvr, EP4_RCVR_RAIL *rcvrRail) -+{ -+ EP4_COMMS_RAIL *commsRail = RCVR_TO_COMMS(rcvrRail); -+ EP4_RAIL *rail = RCVR_TO_RAIL(rcvrRail); -+ ELAN4_DEV *dev = rail->r_ctxt.ctxt_dev; -+ sdramaddr_t qdesc = commsRail->r_descs + (rcvr->Service * EP_QUEUE_DESC_SIZE); -+ E4_Addr qbase = rcvrRail->rcvr_slots_addr; -+ E4_Addr qlast = qbase + EP_INPUTQ_SIZE * (rcvr->InputQueueEntries-1); -+ E4_uint64 qfptr, qbptr; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&rcvr->Lock, flags); -+ EP4_SPINENTER (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), &rcvrRail->rcvr_main->rcvr_thread_lock); -+ -+ /* zip down the input queue and invalidate any envelope we find to a node which is locally passivated */ -+ qfptr = elan4_sdram_readq (dev, qdesc + offsetof (E4_InputQueue, q_fptr)); -+ qbptr = elan4_sdram_readq (dev, qdesc + offsetof (E4_InputQueue, q_bptr)); -+ -+ while (qfptr != qbptr) -+ { -+ unsigned int nodeId = elan4_sdram_readl (dev, rcvrRail->rcvr_slots + (qfptr - qbase) + offsetof (EP_ENVELOPE, NodeId)); -+ -+ EPRINTF3 (DBG_DISCON, "%s: ep4rcvr_flush_filtering: nodeId=%d State=%d\n", rail->r_generic.Name, nodeId, rail->r_generic.Nodes[nodeId].State); -+ -+ if (rail->r_generic.Nodes[nodeId].State == EP_NODE_LOCAL_PASSIVATE) -+ elan4_sdram_writel (dev, rcvrRail->rcvr_slots + (qfptr - qbase) + offsetof (EP_ENVELOPE, Version), 0); -+ -+ if (qfptr != qlast) -+ qfptr += EP_INPUTQ_SIZE; -+ else -+ qfptr = qbase; -+ } -+ -+ /* Insert an setevent command into the thread's command queue -+ * to ensure that all sten packets have completed */ -+ elan4_guard (rcvrRail->rcvr_ecq->ecq_cq, GUARD_ALL_CHANNELS); -+ ep4comms_flush_setevent (commsRail, rcvrRail->rcvr_ecq->ecq_cq); -+ -+ EP4_SPINEXIT (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), &rcvrRail->rcvr_main->rcvr_thread_lock); -+ spin_unlock_irqrestore (&rcvr->Lock, flags); -+} -+ -+static void -+ep4rcvr_flush_flushing (EP_RCVR *rcvr, EP4_RCVR_RAIL *rcvrRail) -+{ -+ EP4_RAIL *rail = RCVR_TO_RAIL (rcvrRail); -+ ELAN4_DEV *dev = rail->r_ctxt.ctxt_dev; -+ struct list_head *el, *nel; -+ struct list_head rxdList; -+ unsigned long flags; -+ -+ INIT_LIST_HEAD (&rxdList); -+ -+ /* remove any sten packates which are retrying to nodes which are being passivated */ -+ spin_lock_irqsave (&rcvrRail->rcvr_retrylock, flags); -+ list_for_each_safe (el, nel, &rcvrRail->rcvr_retrylist) { -+ EP4_RXD_RAIL *rxdRail = list_entry (el, EP4_RXD_RAIL, rxd_retry_link); -+ EP_ENVELOPE *env = &rxdRail->rxd_generic.Rxd->RxdMain->Envelope; -+ EP_NODE_RAIL *nodeRail = &rail->r_generic.Nodes[env->NodeId]; -+ -+ if (nodeRail->State == EP_NODE_LOCAL_PASSIVATE) -+ { -+ EPRINTF2 (DBG_XMTR, "%s; ep4rcvr_flush_flushing: removing rxdRail %p from retry list\n", rail->r_generic.Name, rxdRail); -+ -+ list_del (&rxdRail->rxd_retry_link); -+ } -+ } -+ spin_unlock_irqrestore (&rcvrRail->rcvr_retrylock, flags); -+ -+ spin_lock_irqsave (&rcvr->Lock, flags); -+ EP4_SPINENTER (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), &rcvrRail->rcvr_main->rcvr_thread_lock); -+ -+ list_for_each_safe (el, nel, &rcvr->ActiveDescList) { -+ EP_RXD *rxd = list_entry (el, EP_RXD, Link); -+ EP4_RXD_RAIL *rxdRail = (EP4_RXD_RAIL *) rxd->RxdRail; -+ EP_ENVELOPE *env = &rxd->RxdMain->Envelope; -+ EP_NODE_RAIL *nodeRail = &rail->r_generic.Nodes[env->NodeId]; -+ -+ if (rxd->RxdMain->Len == EP_RXD_PENDING || !RXD_BOUND2RAIL (rxdRail, rcvrRail) || nodeRail->State != EP_NODE_LOCAL_PASSIVATE) -+ continue; -+ -+ EPRINTF6 (DBG_DISCON, "%s: ep4rcvr_flush_flushing: rcvr %p rxd %p state %d elan node %d state %d\n", -+ rail->r_generic.Name, rcvr, rxd, (int)rxdRail->rxd_main->rxd_done, env->NodeId, rxd->State); -+ -+ switch (rxd->State) -+ { -+ case EP_RXD_FREE: -+ printk ("ep4rcvr_flush_flushing: rxd state is free but bound to a fail\n"); -+ break; -+ -+ case EP_RXD_RECEIVE_ACTIVE: -+ if (rxdRail->rxd_main->rxd_done == EP4_STATE_ACTIVE) /* incomplete message receive */ -+ { -+ EPRINTF4 (DBG_RCVR, "%s: ep4rcvr_flush_flushing: rcvr %p rxd %p nodeId %d - passive\n", -+ rail->r_generic.Name, rcvr, rxd, env->NodeId); -+ -+ nodeRail->MessageState |= EP_NODE_PASSIVE_MESSAGES; -+ continue; -+ } -+ break; -+ -+ default: -+ EP4_ASSERT (rail, EP_IS_RPC(env->Attr)); -+ -+ if (rxdRail->rxd_main->rxd_done == EP4_STATE_ACTIVE) /* incomplete RPC */ -+ { -+ EPRINTF4 (DBG_RCVR, "%s: ep4rcvr_flush_flushing: rcvr %p rxd %p nodeId %d - active\n", -+ rail->r_generic.Name, rcvr, rxd, env->NodeId); -+ -+ EP_INVALIDATE_XID (rxd->MsgXid); /* Ignore any previous NMD map responses */ -+ -+ nodeRail->MessageState |= EP_NODE_ACTIVE_MESSAGES; -+ continue; -+ } -+ break; -+ -+ case EP_RXD_BEEN_ABORTED: -+ printk ("ep4rcvr_flush_flushing: rxd state is aborted but bound to a fail\n"); -+ break; -+ } -+ -+ EPRINTF4 (DBG_RCVR, "%s: ep4rcvr_flush_flushing: rcvr %p rxd %p nodeId %d - finished\n", -+ rail->r_generic.Name, rcvr, rxd, env->NodeId); -+ } -+ -+ EP4_SPINEXIT (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), &rcvrRail->rcvr_main->rcvr_thread_lock); -+ spin_unlock_irqrestore (&rcvr->Lock, flags); -+} -+ -+void -+ep4rcvr_flush_callback (EP_RCVR *rcvr, EP4_RCVR_RAIL *rcvrRail) -+{ -+ EP4_RAIL *rail = RCVR_TO_RAIL(rcvrRail); -+ -+ switch (rail->r_generic.CallbackStep) -+ { -+ case EP_CB_FLUSH_FILTERING: -+ ep4rcvr_flush_filtering (rcvr, rcvrRail); -+ break; -+ -+ case EP_CB_FLUSH_FLUSHING: -+ ep4rcvr_flush_flushing (rcvr, rcvrRail); -+ break; -+ } -+} -+ -+void -+ep4rcvr_failover_callback (EP_RCVR *rcvr, EP4_RCVR_RAIL *rcvrRail) -+{ -+ EP_COMMS_SUBSYS *subsys = rcvr->Subsys; -+ EP4_RAIL *rail = RCVR_TO_RAIL (rcvrRail); -+ ELAN4_DEV *dev = rail->r_ctxt.ctxt_dev; -+ struct list_head *el, *nel; -+ unsigned long flags; -+#if SUPPORT_RAIL_FAILOVER -+ EP_SYS *sys = subsys->Subsys.Sys; -+#endif -+ -+ spin_lock_irqsave (&rcvr->Lock, flags); -+ EP4_SPINENTER (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), &rcvrRail->rcvr_main->rcvr_thread_lock); -+ -+ list_for_each_safe (el, nel, &rcvr->ActiveDescList) { -+ EP_RXD *rxd = list_entry (el, EP_RXD, Link); -+ EP4_RXD_RAIL *rxdRail = (EP4_RXD_RAIL *) rxd->RxdRail; -+ EP_ENVELOPE *env = &rxd->RxdMain->Envelope; -+ EP_NODE_RAIL *nodeRail = &rail->r_generic.Nodes[env->NodeId]; -+#if SUPPORT_RAIL_FAILOVER -+ EP_NODE *node = &sys->Nodes[env->NodeId]; -+ EP_MANAGER_MSG_BODY msgBody; -+#endif -+ -+ if (rxd->RxdMain->Len == EP_RXD_PENDING || !RXD_BOUND2RAIL(rxdRail,rcvrRail) || nodeRail->State != EP_NODE_PASSIVATED) -+ continue; -+ -+ EPRINTF5 (DBG_FAILOVER, "%s: ep4rcvr_failover_callback: rcvr %p rxd %p elan node %d state %d\n", -+ rail->r_generic.Name, rcvr, rxd, env->NodeId, (int)rxdRail->rxd_main->rxd_done); -+ -+ switch (rxd->State) -+ { -+ case EP_RXD_FREE: -+ printk ("ep4rcvr_failover_callback: rxd state is free but bound to a rail\n"); -+ break; -+ -+ case EP_RXD_RECEIVE_ACTIVE: -+ if (rxdRail->rxd_main->rxd_done == EP4_STATE_ACTIVE) /* incomplete message receive */ -+ { -+ EPRINTF4 (DBG_FAILOVER, "%s: ep4rcvr_failover_callback: rcvr %p rxd %p nodeId %d - unbind\n", rail->r_generic.Name, rcvr, rxd, env->NodeId); -+ -+ EP4_RXD_FORCE_PRIVATE(rxdRail); -+ -+ unbind_rxd_rail (rxd, rxdRail); -+ -+ free_rxd_rail (rcvrRail, rxdRail); -+ -+ /* epcomms thread will requeue on different rail */ -+ ep_kthread_schedule (&subsys->Thread, lbolt); -+ continue; -+ } -+ break; -+ -+ default: -+ EP4_ASSERT (rail, EP_IS_RPC(env->Attr)); -+ -+#if SUPPORT_RAIL_FAILOVER -+ /* XXXX - no rail failover for now .... */ -+ if (rxdRail->rxd_main->rxd_done == EP4_STATE_ACTIVE && !EP_IS_NO_FAILOVER(env->Attr)) /* incomplete RPC, which can be failed over */ -+ { -+ EPRINTF6 (DBG_FAILOVER, "%s: ep4rcvr_failover_callback: rxd %p State %d Xid %llxx MsgXid %llxx nodeId %d - failover\n", -+ rail->r_generic.Name, rxd, rxd->State, (long long)env->Xid.Unique, (long long)rxd->MsgXid.Unique, env->NodeId); -+ -+ if (EP_XID_INVALID(rxd->MsgXid)) -+ rxd->MsgXid = ep_xid_cache_alloc (sys, &rcvr->XidCache); -+ -+ /* XXXX maybe only send the message if the node failover retry is now ? */ -+ msgBody.Failover.Xid = env->Xid; -+ msgBody.Failover.Railmask = node->ConnectedRails; -+ -+ ep_send_message (&rail->r_generic, env->NodeId, EP_MANAGER_MSG_TYPE_FAILOVER_REQUEST, rxd->MsgXid, &msgBody); -+ -+ nodeRail->MessageState |= EP_NODE_ACTIVE_MESSAGES; -+ continue; -+ } -+#endif -+ break; -+ -+ case EP_RXD_BEEN_ABORTED: -+ printk ("ep4rcvr_failover_callback: rxd state is aborted but bound to a fail\n"); -+ break; -+ } -+ EPRINTF3 (DBG_FAILOVER, "%s: ep4rcvr_failover_callback: rxd %p nodeId %d - finished\n", rail->r_generic.Name, rxd, env->NodeId); -+ } -+ -+ EP4_SPINEXIT (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), &rcvrRail->rcvr_main->rcvr_thread_lock); -+ spin_unlock_irqrestore (&rcvr->Lock, flags); -+} -+ -+void -+ep4rcvr_disconnect_callback (EP_RCVR *rcvr, EP4_RCVR_RAIL *rcvrRail) -+{ -+ EP4_RAIL *rail = RCVR_TO_RAIL (rcvrRail); -+ ELAN4_DEV *dev = rail->r_ctxt.ctxt_dev; -+ struct list_head *el, *nel; -+ struct list_head rxdList; -+ unsigned long flags; -+ -+ INIT_LIST_HEAD (&rxdList); -+ -+ spin_lock_irqsave (&rcvr->Lock, flags); -+ EP4_SPINENTER (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), &rcvrRail->rcvr_main->rcvr_thread_lock); -+ -+ list_for_each_safe (el, nel, &rcvr->ActiveDescList) { -+ EP_RXD *rxd = list_entry (el, EP_RXD, Link); -+ EP4_RXD_RAIL *rxdRail = (EP4_RXD_RAIL *) rxd->RxdRail; -+ EP_ENVELOPE *env = &rxd->RxdMain->Envelope; -+ EP_NODE_RAIL *nodeRail = &rail->r_generic.Nodes[env->NodeId]; -+ -+ if (rxd->RxdMain->Len == EP_RXD_PENDING || !RXD_BOUND2RAIL(rxdRail,rcvrRail) || nodeRail->State != EP_NODE_DISCONNECTING) -+ continue; -+ -+ EPRINTF5 (DBG_DISCON, "%s: ep4rcvr_disconnect_callback: rcvr %p rxd %p elan node %d state %x\n", rail->r_generic.Name, rcvr, rxd, env->NodeId, rxd->State); -+ -+ switch (rxd->State) -+ { -+ case EP_RXD_FREE: -+ printk ("ep4rcvr_disconnect_callback: rxd state is free but bound to a rail\n"); -+ break; -+ -+ case EP_RXD_RECEIVE_ACTIVE: -+ if (rxdRail->rxd_main->rxd_done == EP4_STATE_ACTIVE) /* incomplete message receive */ -+ { -+ EPRINTF4 (DBG_RCVR, "%s: ep4rcvr_disconnect_callback: rcvr %p rxd %p nodeId %d - unbind\n", rail->r_generic.Name, rcvr, rxd, env->NodeId); -+ -+ EP4_RXD_FORCE_PRIVATE (rxdRail); -+ -+ unbind_rxd_rail (rxd, rxdRail); -+ free_rxd_rail (rcvrRail, rxdRail); -+ -+ /* remark it as pending if it was partially received */ -+ rxd->RxdMain->Len = EP_RXD_PENDING; -+ -+ /* epcomms thread will requeue on different rail */ -+ ep_kthread_schedule (&rcvr->Subsys->Thread, lbolt); -+ continue; -+ } -+ break; -+ -+ default: -+ if (rxdRail->rxd_main->rxd_done == EP4_STATE_ACTIVE || rxdRail->rxd_main->rxd_done == EP4_STATE_PRIVATE) /* incomplete RPC */ -+ { -+ EPRINTF5 (DBG_RCVR, "%s: ep4rcvr_disconnect_callback: rcvr %p rxd %p nodeId %d state %x - not able to failover\n", -+ rail->r_generic.Name, rcvr, rxd, env->NodeId, rxd->State); -+ -+ EP4_RXD_FORCE_PRIVATE (rxdRail); -+ -+ unbind_rxd_rail (rxd, rxdRail); -+ free_rxd_rail (rcvrRail, rxdRail); -+ -+ /* Ignore any previous NMD/failover responses */ -+ EP_INVALIDATE_XID (rxd->MsgXid); -+ -+ /* Remove from active list */ -+ list_del (&rxd->Link); -+ -+ if (rxd->State == EP_RXD_RPC_IN_PROGRESS) /* ownder by user .... */ -+ rxd->State = EP_RXD_BEEN_ABORTED; -+ else /* queue for completion */ -+ { -+ rxd->RxdMain->Len = EP_CONN_RESET; /* ensure ep_rxd_status() fails */ -+ list_add_tail (&rxd->Link, &rxdList); -+ } -+ continue; -+ } -+ break; -+ -+ case EP_RXD_BEEN_ABORTED: -+ printk ("ep4rcvr_disconnect_callback: rxd state is aborted but bound to a rail\n"); -+ break; -+ } -+ -+ printk ("%s: ep4rcvr_disconnect_callback: rcvr %p rxd %p nodeId %d - finished\n", -+ rail->r_generic.Name, rcvr, rxd, env->NodeId); -+ EPRINTF4 (DBG_RCVR, "%s: ep4rcvr_disconnect_callback: rcvr %p rxd %p nodeId %d - finished\n", -+ rail->r_generic.Name, rcvr, rxd, env->NodeId); -+ ep4rcvr_display_rxd (&di_ep_debug, &rxdRail->rxd_generic); -+ } -+ -+ EP4_SPINEXIT (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), &rcvrRail->rcvr_main->rcvr_thread_lock); -+ spin_unlock_irqrestore (&rcvr->Lock, flags); -+ -+ while (! list_empty (&rxdList)) -+ { -+ EP_RXD *rxd = list_entry (rxdList.next, EP_RXD, Link); -+ -+ list_del (&rxd->Link); -+ -+ rxd->Handler (rxd); -+ } -+} -+ -+void -+ep4rcvr_neterr_flush (EP_RCVR *rcvr, EP4_RCVR_RAIL *rcvrRail, unsigned int nodeId, EP_NETERR_COOKIE *cookies) -+{ -+ EP4_COMMS_RAIL *commsRail = RCVR_TO_COMMS(rcvrRail); -+ EP4_RAIL *rail = RCVR_TO_RAIL (rcvrRail); -+ ELAN4_DEV *dev = rail->r_ctxt.ctxt_dev; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&rcvr->Lock, flags); -+ EP4_SPINENTER (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), &rcvrRail->rcvr_main->rcvr_thread_lock); -+ -+ /* Insert an setevent command into the thread's command queue -+ * to ensure that all sten packets have completed */ -+ elan4_guard (rcvrRail->rcvr_ecq->ecq_cq, GUARD_ALL_CHANNELS); -+ ep4comms_flush_setevent (commsRail, rcvrRail->rcvr_ecq->ecq_cq); -+ -+ EP4_SPINEXIT (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), &rcvrRail->rcvr_main->rcvr_thread_lock); -+ spin_unlock_irqrestore (&rcvr->Lock, flags); -+} -+ -+void -+ep4rcvr_neterr_check (EP_RCVR *rcvr, EP4_RCVR_RAIL *rcvrRail, unsigned int nodeId, EP_NETERR_COOKIE *cookies) -+{ -+ EP4_RAIL *rail = RCVR_TO_RAIL (rcvrRail); -+ ELAN4_DEV *dev = rail->r_ctxt.ctxt_dev; -+ struct list_head *el; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&rcvr->Lock, flags); -+ EP4_SPINENTER (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), &rcvrRail->rcvr_main->rcvr_thread_lock); -+ -+ list_for_each (el, &rcvr->ActiveDescList) { -+ EP_RXD *rxd = list_entry (el, EP_RXD, Link); -+ EP4_RXD_RAIL *rxdRail = (EP4_RXD_RAIL *) rxd->RxdRail; -+ EP_ENVELOPE *env = &rxd->RxdMain->Envelope; -+ -+ if (rxd->RxdMain->Len == EP_RXD_PENDING || !RXD_BOUND2RAIL(rxdRail,rcvrRail) || env->NodeId != nodeId) -+ continue; -+ -+ if (rxd->State == EP_RXD_RECEIVE_ACTIVE || rxd->State == EP_RXD_GET_ACTIVE) -+ { -+ EP_NETERR_COOKIE cookie; -+ unsigned int first, this; -+ -+ if (rxd->State == EP_RXD_RECEIVE_ACTIVE) -+ first = (EP_MAXFRAG+1) - (( EP_IS_MULTICAST(env->Attr) ? 1 : 0) + (env->nFrags == 0 ? 1 : env->nFrags)); -+ else -+ first = (EP_MAXFRAG+1) - rxd->nFrags; -+ -+ for (this = first; this < (EP_MAXFRAG+1); this++) -+ if (rxdRail->rxd_main->rxd_sent[this] == EP4_STATE_ACTIVE) -+ break; -+ -+ if (this > first) -+ { -+ /* Look at the last completed STEN packet and if it's neterr cookie matches, then change -+ * the rxd to look the same as if the sten packet had failed and then schedule it for retry */ -+ cookie = elan4_sdram_readq (dev, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[--this].c_cookie)); -+ -+ if (cookie == cookies[0] || cookie == cookies[1]) -+ { -+ EP_NETERR_COOKIE ncookie = ep4_neterr_cookie (rail, env->NodeId) | EP4_COOKIE_STEN; -+ -+ EPRINTF6 (DBG_NETWORK_ERROR, "%s: ep4rcvr_neterr_check: cookie <%lld%s%s%s%s> matches rxd %p rxdRail %p this %d new cookie <%lld%s%s%s%s>\n", -+ rail->r_generic.Name, EP4_COOKIE_STRING(cookie), rxd, rxdRail, this, EP4_COOKIE_STRING(ncookie)); -+ -+ printk ("%s: ep4rcvr_neterr_check: cookie <%lld%s%s%s%s> matches rxd %p rxdRail %p this %d new cookie <%lld%s%s%s%s>\n", -+ rail->r_generic.Name, EP4_COOKIE_STRING(cookie), rxd, rxdRail, this, EP4_COOKIE_STRING(ncookie)); -+ -+ /* Allocate a new cookie for this sten packet, since this message could be received more than once. -+ * If the second arrives after we've sucessfully sent the response and the packet completes, then we -+ * could try and requeue it after the next sten packet got nacked. */ -+ elan4_sdram_writeq (dev, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[this].c_cookie), ncookie); -+ -+ rxdRail->rxd_main->rxd_sent[this] = EP4_STATE_ACTIVE; -+ rxdRail->rxd_main->rxd_failed = EP4_STATE_FAILED; -+ -+ spin_lock (&rcvrRail->rcvr_retrylock); -+ -+ EP4_ASSERT (rail, rxdRail->rxd_retry_time == 0); -+ -+ rxdRail->rxd_retry_time = lbolt + EP_RETRY_LOW_PRI_TIME; -+ -+ list_add_tail (&rxdRail->rxd_retry_link, &rcvrRail->rcvr_retrylist); -+ -+ ep_kthread_schedule (&rail->r_retry_thread, rxdRail->rxd_retry_time); -+ -+ spin_unlock (&rcvrRail->rcvr_retrylock); -+ } -+ } -+ } -+ } -+ EP4_SPINEXIT (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), &rcvrRail->rcvr_main->rcvr_thread_lock); -+ spin_unlock_irqrestore (&rcvr->Lock, flags); -+} -+ -+int -+ep4rcvr_queue_rxd (EP_RXD *rxd, EP_RCVR_RAIL *r) -+{ -+ EP4_RCVR_RAIL *rcvrRail = (EP4_RCVR_RAIL *) r; -+ EP4_RAIL *rail = RCVR_TO_RAIL (rcvrRail); -+ ELAN4_DEV *dev = rail->r_ctxt.ctxt_dev; -+ EP4_RXD_RAIL *rxdRail; -+ register int i; -+ -+ ASSERT (SPINLOCK_HELD(&rxd->Rcvr->Lock)); -+ -+ if ((rxdRail = get_rxd_rail (rcvrRail)) == NULL) -+ return 0; -+ -+ /* Flush the Elan TLB if mappings have changed */ -+ ep_perrail_dvma_sync (&rail->r_generic); -+ -+ EPRINTF6 (DBG_RCVR, "%s: ep4rcvr_queue_rxd: rcvr %p rxd %p rxdRail %p buffer %x len %x\n", -+ rail->r_generic.Name, rxd->Rcvr, rxd, rxdRail, rxd->Data.nmd_addr, rxd->Data.nmd_len); -+ -+ /* bind the rxdRail and rxd together */ -+ bind_rxd_rail (rxd, rxdRail); -+ -+ elan4_sdram_writel (dev, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_buffer.nmd_addr), rxd->Data.nmd_addr); /* PCI write */ -+ elan4_sdram_writel (dev, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_buffer.nmd_len), rxd->Data.nmd_len); /* PCI write */ -+ elan4_sdram_writel (dev, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_buffer.nmd_attr), rxd->Data.nmd_attr); /* PCI write */ -+ -+ /* Mark as active */ -+ elan4_sdram_writeq (dev, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CountAndType), -+ E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS)); -+ -+ for (i = 0; i <= EP_MAXFRAG; i++) -+ rxdRail->rxd_main->rxd_sent[i] = EP4_STATE_ACTIVE; -+ -+ rxdRail->rxd_main->rxd_failed = EP4_STATE_ACTIVE; -+ rxdRail->rxd_main->rxd_done = EP4_STATE_ACTIVE; -+ -+ elan4_sdram_writeq (dev, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[0]) + 0x00, /* %r0 */ -+ ep_symbol (&rail->r_threadcode, "c_queue_rxd")); -+ elan4_sdram_writeq (dev, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[0]) + 0x10, /* %r2 */ -+ rcvrRail->rcvr_elan_addr); -+ elan4_sdram_writeq (dev, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[0]) + 0x18, /* %r3 */ -+ rxdRail->rxd_elan_addr); -+ -+ elan4_sdram_writeq (dev, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_start.ev_CountAndType), -+ E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_RXD_START_CMD_NDWORDS)); -+ -+ ep4_set_event_cmd (rxdRail->rxd_scq, rxdRail->rxd_elan_addr + offsetof (EP4_RXD_RAIL_ELAN, rxd_start)); -+ -+ return 1; -+} -+ -+void -+ep4rcvr_rpc_put (EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags) -+{ -+ EP4_RXD_RAIL *rxdRail = (EP4_RXD_RAIL *) rxd->RxdRail; -+ EP4_RCVR_RAIL *rcvrRail = (EP4_RCVR_RAIL *) rxdRail->rxd_generic.RcvrRail; -+ EP4_RAIL *rail = RCVR_TO_RAIL (rcvrRail); -+ ELAN4_DEV *dev = RCVR_TO_DEV (rcvrRail); -+ sdramaddr_t rxdElan = rxdRail->rxd_elan; -+ EP_ENVELOPE *env = &rxd->RxdMain->Envelope; -+ unsigned long first = (EP_MAXFRAG+1) - nFrags; -+ EP4_RXD_DMA_CMD cmd; -+ register int i, len; -+ -+ EP4_ASSERT (rail, rxd->State == EP_RXD_PUT_ACTIVE); -+ EP4_ASSERT (rail, rxdRail->rxd_main->rxd_done == EP4_STATE_PRIVATE); -+ EP4_SDRAM_ASSERT (rail, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CountAndType), -+ E4_EVENT_INIT_VALUE (0, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS)); -+ -+ /* Flush the Elan TLB if mappings have changed */ -+ ep_perrail_dvma_sync (&rail->r_generic); -+ -+ /* Generate the DMA chain to put the data */ -+ for (i = 0, len = 0; i < nFrags; i++, len += local->nmd_len, local++, remote++) -+ { -+ cmd.c_dma_typeSize = RUN_DMA_CMD | E4_DMA_TYPE_SIZE(local->nmd_len, DMA_DataTypeByte, 0, EP4_DMA_RETRYCOUNT); -+ cmd.c_dma_cookie = ep4_neterr_cookie (rail, env->NodeId) | EP4_COOKIE_DMA; -+ cmd.c_dma_vproc = EP_VP_DATA(env->NodeId); -+ cmd.c_dma_srcAddr = local->nmd_addr; -+ cmd.c_dma_dstAddr = remote->nmd_addr; -+ if (i == (nFrags-1)) -+ cmd.c_dma_srcEvent = rxdRail->rxd_elan_addr + offsetof (EP4_RXD_RAIL_ELAN, rxd_done); -+ else -+ cmd.c_dma_srcEvent = rxdRail->rxd_elan_addr + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first + i]); -+ cmd.c_dma_dstEvent = 0; -+ cmd.c_nop_cmd = NOP_CMD; -+ -+ EPRINTF7 (DBG_RCVR, "%s: ep4rcvr_rpc_put: rxd %p [XID=%llx] idx=%d Source=%08x Dest=%08x Len=%x\n", -+ rail->r_generic.Name, rxd, (long long)env->Xid.Unique, i, local->nmd_addr, remote->nmd_addr, local->nmd_len); -+ -+ elan4_sdram_copyq_to_sdram (dev, &cmd, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[first + i]), sizeof (EP4_RXD_DMA_CMD)); -+ } -+ -+ /* Initialise the event chain */ -+ for (i = 0; i < nFrags-1; i++) -+ elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first + i]), -+ E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_RXD_DMA_CMD_NDWORDS)); -+ -+ elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done), -+ E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS)); -+ -+ for (i = 0; i <= EP_MAXFRAG; i++) -+ rxdRail->rxd_main->rxd_sent[i] = EP4_STATE_ACTIVE; -+ -+ rxdRail->rxd_main->rxd_failed = EP4_STATE_ACTIVE; -+ rxdRail->rxd_main->rxd_done = EP4_STATE_ACTIVE; -+ -+ /* Initialise the previous event to start the whole chain off */ -+ elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first - 1]), -+ E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_RXD_DMA_CMD_NDWORDS)); -+ -+ EP4_ASSERT (rail, rail->r_generic.Nodes[env->NodeId].State >= EP_NODE_CONNECTED && rail->r_generic.Nodes[env->NodeId].State <= EP_NODE_LOCAL_PASSIVATE); -+ -+ /* finally issue the setevent to start the whole chain */ -+ ep4_set_event_cmd (rxdRail->rxd_scq, rxdRail->rxd_elan_addr + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first - 1])); -+ -+ BucketStat (rxd->Rcvr->Subsys, RPCPut, len); -+} -+ -+void -+ep4rcvr_rpc_get (EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags) -+{ -+ EP4_RXD_RAIL *rxdRail = (EP4_RXD_RAIL *) rxd->RxdRail; -+ EP4_RCVR_RAIL *rcvrRail = (EP4_RCVR_RAIL *) rxdRail->rxd_generic.RcvrRail; -+ EP4_RAIL *rail = RCVR_TO_RAIL (rcvrRail); -+ ELAN4_DEV *dev = RCVR_TO_DEV (rcvrRail); -+ sdramaddr_t rxdElan = rxdRail->rxd_elan; -+ EP_ENVELOPE *env = &rxd->RxdMain->Envelope; -+ unsigned long first = (EP_MAXFRAG+1) - nFrags; -+ register int i, len; -+ -+ EP4_ASSERT (rail, rxd->State == EP_RXD_GET_ACTIVE); -+ EP4_ASSERT (rail, rxdRail->rxd_main->rxd_done == EP4_STATE_PRIVATE); -+ EP4_SDRAM_ASSERT (rail, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CountAndType), -+ E4_EVENT_INIT_VALUE (0, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS)); -+ -+ /* Flush the Elan TLB if mappings have changed */ -+ ep_perrail_dvma_sync (&rail->r_generic); -+ -+ /* Generate the DMA chain to get the data */ -+ for (i = 0, len = 0; i < nFrags; i++, len += local->nmd_len, local++, remote++) -+ { -+ EPRINTF7 (DBG_RCVR, "%s: ep4rcvr_rpc_get rxd %p [XID=%llx] idx=%d Source=%08x Dest=%08x Len=%x\n", -+ rail->r_generic.Name, rxd, (long long)env->Xid.Unique, i, remote->nmd_addr, local->nmd_addr, remote->nmd_len); -+ -+ elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[first + i].c_open), -+ OPEN_STEN_PKT_CMD | OPEN_PACKET(0, PACK_OK | RESTART_COUNT_ZERO, EP_VP_DATA(env->NodeId))); -+ elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[first + i].c_trans), -+ SEND_TRANS_CMD | ((TR_REMOTEDMA | TR_WAIT_FOR_EOP) << 16)); -+ elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[first + i].c_cookie), -+ ep4_neterr_cookie (rail, env->NodeId) | EP4_COOKIE_STEN); -+ elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[first + i].c_dma_typeSize), -+ E4_DMA_TYPE_SIZE (local->nmd_len, DMA_DataTypeByte, 0, EP4_DMA_RETRYCOUNT)); -+ elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[first + i].c_dma_cookie), -+ ep4_neterr_cookie (rail, env->NodeId) | EP4_COOKIE_DMA); -+ elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[first + i].c_dma_vproc), -+ EP_VP_DATA (rail->r_generic.Position.pos_nodeid)); -+ elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[first + i].c_dma_srcAddr), -+ remote->nmd_addr); -+ elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[first + i].c_dma_dstAddr), -+ local->nmd_addr); -+ elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[first + i].c_dma_srcEvent), -+ 0); -+ elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[first + i].c_dma_dstEvent), -+ i == (nFrags-1) ? rxdRail->rxd_elan_addr + offsetof (EP4_RXD_RAIL_ELAN, rxd_done) : -+ rxdRail->rxd_elan_addr + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first + i])); -+ } -+ -+ /* Initialise the event chain */ -+ for (i = 0; i < nFrags-1; i++) -+ elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first + i]), -+ E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_RXD_STEN_CMD_NDWORDS)); -+ -+ elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done), -+ E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS)); -+ -+ for (i = 0; i <= EP_MAXFRAG; i++) -+ rxdRail->rxd_main->rxd_sent[i] = EP4_STATE_ACTIVE; -+ -+ rxdRail->rxd_main->rxd_failed = EP4_STATE_ACTIVE; -+ rxdRail->rxd_main->rxd_done = EP4_STATE_ACTIVE; -+ -+ /* Initialise the previous event to start the whole chain off */ -+ elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first - 1]), -+ E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_RXD_STEN_CMD_NDWORDS)); -+ -+ EP4_ASSERT (rail, rail->r_generic.Nodes[env->NodeId].State >= EP_NODE_CONNECTED && rail->r_generic.Nodes[env->NodeId].State <= EP_NODE_LOCAL_PASSIVATE); -+ -+ /* finally issue the setevent to start the whole chain */ -+ ep4_set_event_cmd (rxdRail->rxd_scq, rxdRail->rxd_elan_addr + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first - 1])); -+ -+ BucketStat (rxd->Rcvr->Subsys, RPCPut, len); -+} -+ -+void -+ep4rcvr_rpc_complete (EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags) -+{ -+ EP4_RXD_RAIL *rxdRail = (EP4_RXD_RAIL *) rxd->RxdRail; -+ EP4_RCVR_RAIL *rcvrRail = (EP4_RCVR_RAIL *) rxdRail->rxd_generic.RcvrRail; -+ EP4_RAIL *rail = RCVR_TO_RAIL (rcvrRail); -+ ELAN4_DEV *dev = RCVR_TO_DEV (rcvrRail); -+ sdramaddr_t rxdElan = rxdRail->rxd_elan; -+ EP_ENVELOPE *env = &rxd->RxdMain->Envelope; -+ unsigned long first = (EP_MAXFRAG+1) - nFrags - 1; -+ EP4_RXD_DMA_CMD cmd; -+ register int i, len; -+ -+ EP4_ASSERT (rail, rxd->State == EP_RXD_COMPLETE_ACTIVE); -+ EP4_ASSERT (rail, rxdRail->rxd_main->rxd_done == EP4_STATE_PRIVATE); -+ EP4_SDRAM_ASSERT (rail, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CountAndType), -+ E4_EVENT_INIT_VALUE (0, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS)); -+ -+ /* Flush the Elan TLB if mappings have changed */ -+ ep_perrail_dvma_sync (&rail->r_generic); -+ -+ /* Generate the DMA chain to put the data */ -+ for (i = 0, len = 0; i < nFrags; i++, len += local->nmd_len, local++, remote++) -+ { -+ cmd.c_dma_typeSize = RUN_DMA_CMD | E4_DMA_TYPE_SIZE(local->nmd_len, DMA_DataTypeByte, 0, EP4_DMA_RETRYCOUNT); -+ cmd.c_dma_cookie = ep4_neterr_cookie (rail, env->NodeId) | EP4_COOKIE_DMA; -+ cmd.c_dma_vproc = EP_VP_DATA(env->NodeId); -+ cmd.c_dma_srcAddr = local->nmd_addr; -+ cmd.c_dma_dstAddr = remote->nmd_addr; -+ cmd.c_dma_srcEvent = rxdRail->rxd_elan_addr + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first + i]); -+ cmd.c_dma_dstEvent = 0; -+ cmd.c_nop_cmd = NOP_CMD; -+ -+ EPRINTF7 (DBG_RCVR, "%s: ep4rcvr_rpc_complete: rxd %p [XID=%llx] idx=%d Source=%08x Dest=%08x Len=%x\n", -+ rail->r_generic.Name, rxd, (long long)env->Xid.Unique, i, local->nmd_addr, remote->nmd_addr, local->nmd_len); -+ -+ elan4_sdram_copyq_to_sdram (dev, &cmd, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[first + i]), sizeof (EP4_RXD_DMA_CMD)); -+ } -+ -+ /* Initialise the status block dma */ -+ cmd.c_dma_typeSize = RUN_DMA_CMD | E4_DMA_TYPE_SIZE(EP_STATUSBLK_SIZE, DMA_DataTypeByte, 0, EP4_DMA_RETRYCOUNT); -+ cmd.c_dma_cookie = ep4_neterr_cookie (rail, env->NodeId) | EP4_COOKIE_DMA; -+ cmd.c_dma_vproc = EP_VP_DATA(env->NodeId); -+ cmd.c_dma_srcAddr = rxd->NmdMain.nmd_addr + offsetof (EP_RXD_MAIN, StatusBlk); -+ cmd.c_dma_dstAddr = env->TxdMain.nmd_addr + offsetof (EP_TXD_MAIN, StatusBlk); -+ cmd.c_dma_srcEvent = rxdRail->rxd_elan_addr + offsetof (EP4_RXD_RAIL_ELAN, rxd_done); -+ cmd.c_dma_dstEvent = env->TxdRail + offsetof (EP4_TXD_RAIL_ELAN, txd_done);; -+ cmd.c_nop_cmd = NOP_CMD; -+ -+ EPRINTF6 (DBG_RCVR, "%s: ep4rcvr_rpc_complete: rxd %p [XID=%llx] statusblk source=%08x dest=%08x len=%llx\n", -+ rail->r_generic.Name, rxd, (long long)env->Xid.Unique, (int) cmd.c_dma_srcAddr, (int) cmd.c_dma_dstAddr, (long long)EP_STATUSBLK_SIZE); -+ -+ elan4_sdram_copyq_to_sdram (dev, &cmd, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[EP_MAXFRAG]), sizeof (EP4_RXD_DMA_CMD)); -+ -+ /* Initialise the event chain */ -+ for (i = 0; i < nFrags; i++) -+ elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first + i]), -+ E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_RXD_DMA_CMD_NDWORDS)); -+ -+ elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done), -+ E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS)); -+ -+ for (i = 0; i <= EP_MAXFRAG; i++) -+ rxdRail->rxd_main->rxd_sent[i] = EP4_STATE_ACTIVE; -+ -+ rxdRail->rxd_main->rxd_failed = EP4_STATE_ACTIVE; -+ rxdRail->rxd_main->rxd_done = EP4_STATE_ACTIVE; -+ -+ /* Initialise the previous event to start the whole chain off */ -+ elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first - 1]), -+ E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_RXD_DMA_CMD_NDWORDS)); -+ -+ EP4_ASSERT (rail, rail->r_generic.Nodes[env->NodeId].State >= EP_NODE_CONNECTED && rail->r_generic.Nodes[env->NodeId].State <= EP_NODE_LOCAL_PASSIVATE); -+ -+ /* finally issue the setevent to start the whole chain */ -+ ep4_set_event_cmd (rxdRail->rxd_scq, rxdRail->rxd_elan_addr + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first - 1])); -+ -+ BucketStat (rxd->Rcvr->Subsys, CompleteRPC, len); -+} -+ -+EP_RXD * -+ep4rcvr_steal_rxd (EP_RCVR_RAIL *r) -+{ -+ /* XXXX - TBD */ -+ return NULL; -+} -+ -+long -+ep4rcvr_check (EP_RCVR_RAIL *r, long nextRunTime) -+{ -+ EP4_RCVR_RAIL *rcvrRail = (EP4_RCVR_RAIL *) r; -+ EP4_RAIL *rail = RCVR_TO_RAIL (rcvrRail); -+ -+ if (rcvrRail->rcvr_freecount < ep_rxd_lowat && !alloc_rxd_block (rcvrRail)) -+ { -+ EPRINTF1 (DBG_RCVR,"%s: failed to grow rxd rail pool\n", rail->r_generic.Name); -+ -+ if (nextRunTime == 0 || AFTER (nextRunTime, lbolt + RESOURCE_RETRY_TIME)) -+ nextRunTime = lbolt + RESOURCE_RETRY_TIME; -+ } -+ -+ return nextRunTime; -+} -+ -+unsigned long -+ep4rcvr_retry (EP4_RAIL *rail, void *arg, unsigned long nextRunTime) -+{ -+ EP4_RCVR_RAIL *rcvrRail = (EP4_RCVR_RAIL *) arg; -+ ELAN4_DEV *dev = RCVR_TO_DEV(rcvrRail); -+ unsigned long flags; -+ -+ spin_lock_irqsave (&rcvrRail->rcvr_retrylock, flags); -+ while (! list_empty (&rcvrRail->rcvr_retrylist)) -+ { -+ EP4_RXD_RAIL *rxdRail = list_entry (rcvrRail->rcvr_retrylist.next, EP4_RXD_RAIL, rxd_retry_link); -+ EP_ENVELOPE *env = &rxdRail->rxd_generic.Rxd->RxdMain->Envelope; -+ unsigned int first = (EP_MAXFRAG+1) - ((env->Attr & EP_MULTICAST ? 1 : 0) + (env->nFrags == 0 ? 1 : env->nFrags)); -+ -+ if (BEFORE (lbolt, rxdRail->rxd_retry_time)) -+ { -+ if (nextRunTime == 0 || AFTER (nextRunTime, rxdRail->rxd_retry_time)) -+ nextRunTime = rxdRail->rxd_retry_time; -+ -+ break; -+ } -+ -+ list_del (&rxdRail->rxd_retry_link); -+ rxdRail->rxd_retry_time = 0; -+ -+ /* determine which sten packet to resubmit */ -+ for (; first < (EP_MAXFRAG+1); first++) -+ if (rxdRail->rxd_main->rxd_sent[first] == EP4_STATE_ACTIVE) -+ break; -+ -+ EPRINTF3 (DBG_RETRY, "%s: ep4rcvr_retry: rxdRail %p, reissuing sten[%d]\n", rail->r_generic.Name, rxdRail, first); -+ -+ /* re-initialise the fail event */ -+ elan4_sdram_writeq (dev, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed.ev_CountAndType), -+ E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS)); -+ -+ rxdRail->rxd_main->rxd_failed = EP4_STATE_ACTIVE; -+ -+ /* re-initialise the chain event to resubmit this sten packet */ -+ elan4_sdram_writeq (dev, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first-1].ev_CountAndType), -+ E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_RXD_STEN_CMD_NDWORDS)); -+ -+ /* finally issue the setevent to start the chain again */ -+ ep4_set_event_cmd (rxdRail->rxd_scq, rxdRail->rxd_elan_addr + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first - 1])); -+ } -+ spin_unlock_irqrestore (&rcvrRail->rcvr_retrylock, flags); -+ -+ return nextRunTime; -+} -+ -+void -+ep4rcvr_add_rail (EP_RCVR *rcvr, EP_COMMS_RAIL *commsRail) -+{ -+ EP4_RAIL *rail = (EP4_RAIL *) commsRail->Rail; -+ ELAN4_DEV *dev = rail->r_ctxt.ctxt_dev; -+ sdramaddr_t qdescs = ((EP4_COMMS_RAIL *) commsRail)->r_descs; -+ EP4_RCVR_RAIL *rcvrRail; -+ E4_InputQueue qdesc; -+ E4_ThreadRegs tregs; -+ sdramaddr_t stack; -+ unsigned long flags; -+ -+ KMEM_ZALLOC (rcvrRail, EP4_RCVR_RAIL *, sizeof (EP4_RCVR_RAIL), 1); -+ -+ spin_lock_init (&rcvrRail->rcvr_freelock); -+ INIT_LIST_HEAD (&rcvrRail->rcvr_freelist); -+ INIT_LIST_HEAD (&rcvrRail->rcvr_blocklist); -+ -+ kcondvar_init (&rcvrRail->rcvr_cleanup_sleep); -+ kcondvar_init (&rcvrRail->rcvr_freesleep); -+ -+ INIT_LIST_HEAD (&rcvrRail->rcvr_retrylist); -+ spin_lock_init (&rcvrRail->rcvr_retrylock); -+ -+ rcvrRail->rcvr_generic.CommsRail = commsRail; -+ rcvrRail->rcvr_generic.Rcvr = rcvr; -+ -+ rcvrRail->rcvr_main = ep_alloc_main (&rail->r_generic, sizeof (EP4_RCVR_RAIL_MAIN), 0, &rcvrRail->rcvr_main_addr); -+ rcvrRail->rcvr_elan = ep_alloc_elan (&rail->r_generic, sizeof (EP4_RCVR_RAIL_ELAN), 0, &rcvrRail->rcvr_elan_addr); -+ rcvrRail->rcvr_slots = ep_alloc_elan (&rail->r_generic, EP_INPUTQ_SIZE * rcvr->InputQueueEntries, 0, &rcvrRail->rcvr_slots_addr); -+ stack = ep_alloc_elan (&rail->r_generic, EP4_STACK_SIZE, 0, &rcvrRail->rcvr_stack); -+ -+ /* allocate a command queue for the thread to use, plus space for it to wait/reschedule */ -+ rcvrRail->rcvr_ecq = ep4_alloc_ecq (rail, CQ_Size64K); -+ rcvrRail->rcvr_resched = ep4_get_ecq (rail, EP4_ECQ_ATOMIC, 8); -+ -+ ep4_register_intcookie (rail, &rcvrRail->rcvr_stall_intcookie, rcvrRail->rcvr_elan_addr + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_stall_intcookie), -+ rcvr_stall_interrupt, rcvrRail); -+ -+ /* Initialise the elan portion */ -+ elan4_sdram_writeq (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_qevent.ev_CountAndType), 0); -+ elan4_sdram_writeq (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_halt.ev_CountAndType), 0); -+ elan4_sdram_writeq (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), 0); -+ elan4_sdram_writeq (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_pending_tailp), -+ rcvrRail->rcvr_elan_addr + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_pending_head)); -+ elan4_sdram_writeq (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_pending_head), 0); -+ elan4_sdram_writeq (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_stall_intcookie), 0); -+ elan4_sdram_writeq (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_qbase), rcvrRail->rcvr_slots_addr); -+ elan4_sdram_writeq (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_qlast), -+ rcvrRail->rcvr_slots_addr + EP_INPUTQ_SIZE * (rcvr->InputQueueEntries-1)); -+ -+ /* Initialise the main memory portion */ -+ rcvrRail->rcvr_main->rcvr_thread_lock = 0; -+ -+ /* Install our retry handler */ -+ rcvrRail->rcvr_retryops.op_func = ep4rcvr_retry; -+ rcvrRail->rcvr_retryops.op_arg = rcvrRail; -+ -+ ep4_add_retry_ops (rail, &rcvrRail->rcvr_retryops); -+ -+ /* Update the queue desriptor */ -+ qdesc.q_bptr = rcvrRail->rcvr_slots_addr; -+ qdesc.q_fptr = rcvrRail->rcvr_slots_addr; -+ qdesc.q_control = E4_InputQueueControl (rcvrRail->rcvr_slots_addr, rcvrRail->rcvr_slots_addr + (EP_INPUTQ_SIZE * (rcvr->InputQueueEntries-1)), EP_INPUTQ_SIZE); -+ qdesc.q_event = rcvrRail->rcvr_elan_addr + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_qevent); -+ -+ ep4_write_qdesc (rail, qdescs + (rcvr->Service * EP_QUEUE_DESC_SIZE), &qdesc); -+ -+ spin_lock_irqsave (&rcvr->Lock, flags); -+ rcvr->Rails[rail->r_generic.Number] = &rcvrRail->rcvr_generic; -+ rcvr->RailMask |= EP_RAIL2RAILMASK (rail->r_generic.Number); -+ spin_unlock_irqrestore (&rcvr->Lock, flags); -+ -+ { -+ sdramaddr_t stackTop = stack + EP4_STACK_SIZE; -+ E4_Addr stackTopAddr = rcvrRail->rcvr_stack + EP4_STACK_SIZE; -+ -+ ep4_init_thread (rail, &tregs, stackTop, stackTopAddr, ep_symbol (&rail->r_threadcode, "ep4comms_rcvr"), 6, -+ (E4_uint64) rail->r_elan_addr, (E4_uint64) rcvrRail->rcvr_elan_addr, (E4_uint64) rcvrRail->rcvr_main_addr, -+ (E4_uint64) EP_MSGQ_ADDR(rcvr->Service), (E4_uint64) rcvrRail->rcvr_ecq->ecq_addr, (E4_uint64) rcvrRail->rcvr_resched->ecq_addr); -+ } -+ -+ /* Issue the command to the threads private command queue */ -+ elan4_run_thread_cmd (rcvrRail->rcvr_ecq->ecq_cq, &tregs); -+ -+ ep_procfs_rcvr_add_rail(&(rcvrRail->rcvr_generic)); -+} -+ -+void -+ep4rcvr_del_rail (EP_RCVR *rcvr, EP_COMMS_RAIL *commsRail) -+{ -+ EP4_RAIL *rail = (EP4_RAIL *) commsRail->Rail; -+ EP4_RCVR_RAIL *rcvrRail = (EP4_RCVR_RAIL *) rcvr->Rails[rail->r_generic.Number]; -+ ELAN4_HALTOP haltop; -+ struct list_head *el, *nel; -+ unsigned long flags; -+ -+ ep_procfs_rcvr_del_rail(&(rcvrRail->rcvr_generic)); -+ -+ /* Run a halt operation to mark the input queue as full and -+ * request the thread to halt */ -+ haltop.op_mask = INT_DiscardingHighPri | INT_TProcHalted; -+ haltop.op_function = rcvr_stall_haltop; -+ haltop.op_arg = rcvrRail; -+ -+ elan4_queue_haltop (rail->r_ctxt.ctxt_dev, &haltop); -+ -+ /* Wait for the thread to tell us it's processed the input queue */ -+ spin_lock_irqsave (&rcvr->Lock, flags); -+ while (! rcvrRail->rcvr_thread_halted) -+ kcondvar_wait (&rcvrRail->rcvr_cleanup_sleep, &rcvr->Lock, &flags); -+ rcvrRail->rcvr_thread_halted = 0; -+ -+ /* flag the rail as no longer available */ -+ rcvr->RailMask &= ~EP_RAIL2RAILMASK (rail->r_generic.Number); -+ -+ /* wait for all active communications to terminate */ -+ for (;;) -+ { -+ int mustWait = 0; -+ -+ list_for_each (el, &rcvr->ActiveDescList) { -+ EP_RXD *rxd = list_entry (el, EP_RXD, Link); -+ EP4_RXD_RAIL *rxdRail = (EP4_RXD_RAIL *) rxd->RxdRail; -+ -+ if (rxdRail && RXD_BOUND2RAIL (rxdRail, rcvrRail) && rxd->RxdMain->Len != EP_RXD_PENDING) -+ { -+ mustWait++; -+ break; -+ } -+ } -+ -+ if (! mustWait) -+ break; -+ -+ rcvrRail->rcvr_cleanup_waiting++; -+ kcondvar_wait (&rcvrRail->rcvr_cleanup_sleep, &rcvr->Lock, &flags); -+ } -+ -+ /* at this point all rxd's in the list that are bound to the deleting rail are pending */ -+ list_for_each_safe (el, nel, &rcvr->ActiveDescList) { -+ EP_RXD *rxd = list_entry (el, EP_RXD, Link); -+ EP4_RXD_RAIL *rxdRail = (EP4_RXD_RAIL *) rxd->RxdRail; -+ -+ if (rxdRail && RXD_BOUND2RAIL (rxdRail, rcvrRail)) -+ { -+ EP4_RXD_ASSERT_PENDING (rxdRail); -+ EP4_RXD_FORCE_PRIVATE (rxdRail); -+ -+ unbind_rxd_rail (rxd, rxdRail); -+ free_rxd_rail (rcvrRail, rxdRail); -+ } -+ } -+ spin_unlock_irqrestore (&rcvr->Lock, flags); -+ -+ /* wait for all rxd's for this rail to become free */ -+ spin_lock_irqsave (&rcvrRail->rcvr_freelock, flags); -+ while (rcvrRail->rcvr_freecount != rcvrRail->rcvr_totalcount) -+ { -+ rcvrRail->rcvr_freewaiting++; -+ kcondvar_wait (&rcvrRail->rcvr_freesleep, &rcvrRail->rcvr_freelock, &flags); -+ } -+ spin_unlock_irqrestore (&rcvrRail->rcvr_freelock, flags); -+ -+ /* can now remove the rail as it can no longer be used */ -+ spin_lock_irqsave (&rcvr->Lock, flags); -+ rcvr->Rails[rail->r_generic.Number] = NULL; -+ spin_unlock_irqrestore (&rcvr->Lock, flags); -+ -+ /* all the rxd's accociated with DescBlocks must be in the FreeDescList */ -+ ASSERT (rcvrRail->rcvr_totalcount == rcvrRail->rcvr_freecount); -+ -+ /* run through the DescBlockList deleting them */ -+ while (!list_empty (&rcvrRail->rcvr_blocklist)) -+ free_rxd_block (rcvrRail, list_entry(rcvrRail->rcvr_blocklist.next, EP4_RXD_RAIL_BLOCK , blk_link)); -+ -+ /* it had better be empty after that */ -+ ASSERT ((rcvrRail->rcvr_totalcount == 0) && (rcvrRail->rcvr_totalcount == rcvrRail->rcvr_freecount)); -+ -+ ep4_remove_retry_ops (rail, &rcvrRail->rcvr_retryops); -+ -+ ep4_deregister_intcookie (rail, &rcvrRail->rcvr_stall_intcookie); -+ -+ ep4_put_ecq (rail, rcvrRail->rcvr_resched, 8); -+ ep4_free_ecq (rail, rcvrRail->rcvr_ecq); -+ -+ ep_free_elan (&rail->r_generic, rcvrRail->rcvr_stack, EP4_STACK_SIZE); -+ ep_free_elan (&rail->r_generic, rcvrRail->rcvr_slots_addr, EP_INPUTQ_SIZE * rcvr->InputQueueEntries); -+ ep_free_elan (&rail->r_generic, rcvrRail->rcvr_elan_addr, sizeof (EP4_RCVR_RAIL_ELAN)); -+ ep_free_main (&rail->r_generic, rcvrRail->rcvr_main_addr, sizeof (EP4_RCVR_RAIL_MAIN)); -+ -+ KMEM_FREE (rcvrRail, sizeof (EP4_RCVR_RAIL)); -+} -+ -+void -+ep4rcvr_display_rxd (DisplayInfo *di, EP_RXD_RAIL *r) -+{ -+ EP4_RXD_RAIL *rxdRail = (EP4_RXD_RAIL *) r; -+ sdramaddr_t rxdElan = rxdRail->rxd_elan; -+ EP4_RAIL *rail = RCVR_TO_RAIL (rxdRail->rxd_generic.RcvrRail); -+ ELAN4_DEV *dev = rail->r_ctxt.ctxt_dev; -+ int i; -+ -+ (di->func)(di->arg, " Rail %d rxd %p elan %lx(%x) main %p(%x) ecq %d scq %d debug %llx\n", rail->r_generic.Number, -+ rxdRail, rxdRail->rxd_elan, rxdRail->rxd_elan_addr, rxdRail->rxd_main, rxdRail->rxd_main_addr, -+ elan4_cq2num(rxdRail->rxd_ecq->ecq_cq), elan4_cq2num(rxdRail->rxd_scq->ecq_cq), -+ elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_debug))); -+ (di->func)(di->arg, " start %016llx %016llx %016llx [%016llx %016llx]\n", -+ elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_start.ev_CountAndType)), -+ elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_start.ev_Params[0])), -+ elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_start.ev_Params[1])), -+ elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[0].c_cookie)), -+ elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[0].c_dma_cookie))); -+ -+ for (i = 0; i < EP_MAXFRAG; i++) -+ (di->func)(di->arg, " chain[%d] %016llx %016llx %016llx [%016llx %016llx]\n", i, -+ elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[i].ev_CountAndType)), -+ elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[i].ev_Params[0])), -+ elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[i].ev_Params[1])), -+ elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[i+1].c_cookie)), -+ elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[i+1].c_dma_cookie))); -+ (di->func)(di->arg, " done %016llx %016llx %016llx -> %016llx\n", -+ elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CountAndType)), -+ elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_Params[0])), -+ elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_Params[1])), -+ rxdRail->rxd_main->rxd_done); -+ (di->func)(di->arg, " fail %016llx %016llx %016llx -> %016llx\n", -+ elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed.ev_CountAndType)), -+ elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed.ev_Params[0])), -+ elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed.ev_Params[1])), -+ rxdRail->rxd_main->rxd_failed); -+ (di->func)(di->arg, " next %016llx queued %016llx main %016llx\n", -+ elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_next)), -+ elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_queued)), -+ elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_main))); -+ (di->func)(di->arg, " sent %016llx %016llx %016llx %016llx %016llx\n", -+ rxdRail->rxd_main->rxd_sent[0], rxdRail->rxd_main->rxd_sent[1], rxdRail->rxd_main->rxd_sent[2], -+ rxdRail->rxd_main->rxd_sent[3], rxdRail->rxd_main->rxd_sent[4]); -+} -+ -+void -+ep4rcvr_display_rcvr (DisplayInfo *di, EP_RCVR_RAIL *r) -+{ -+ EP_RCVR *rcvr = r->Rcvr; -+ EP4_RCVR_RAIL *rcvrRail = (EP4_RCVR_RAIL *) r; -+ EP4_COMMS_RAIL *commsRail = RCVR_TO_COMMS(rcvrRail); -+ EP4_RAIL *rail = RCVR_TO_RAIL (rcvrRail); -+ ELAN4_DEV *dev = rail->r_ctxt.ctxt_dev; -+ sdramaddr_t rcvrElan = rcvrRail->rcvr_elan; -+ sdramaddr_t qdesc = commsRail->r_descs + (rcvr->Service * EP_QUEUE_DESC_SIZE); -+ sdramaddr_t event = rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_qevent); -+ unsigned int freeCount = 0; -+ unsigned int blockCount = 0; -+ struct list_head *el; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&rcvrRail->rcvr_freelock, flags); -+ list_for_each (el, &rcvrRail->rcvr_freelist) -+ freeCount++; -+ list_for_each (el, &rcvrRail->rcvr_blocklist) -+ blockCount++; -+ spin_unlock_irqrestore(&rcvrRail->rcvr_freelock, flags); -+ -+ (di->func)(di->arg, " Rail %d elan %lx(%x) main %p(%x) ecq %d resched %d debug %llx\n", -+ rail->r_generic.Number, rcvrRail->rcvr_elan, rcvrRail->rcvr_elan_addr, -+ rcvrRail->rcvr_main, rcvrRail->rcvr_main_addr, elan4_cq2num(rcvrRail->rcvr_ecq->ecq_cq), -+ elan4_cq2num (rcvrRail->rcvr_resched->ecq_cq), -+ elan4_sdram_readq (dev, rcvrElan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_debug))); -+ (di->func)(di->arg, " free %d (%d) total %d blocks %d\n", -+ rcvrRail->rcvr_freecount, freeCount, rcvrRail->rcvr_totalcount, blockCount); -+ (di->func)(di->arg, " spinlock %016llx %016llx\n", rcvrRail->rcvr_main->rcvr_thread_lock, -+ elan4_sdram_readq (dev, rcvrElan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock))); -+ (di->func)(di->arg, " queue: bptr %016llx fptr %016llx control %016llx (base %lx %x)\n", -+ elan4_sdram_readq (dev, qdesc + offsetof (E4_InputQueue, q_bptr)), -+ elan4_sdram_readq (dev, qdesc + offsetof (E4_InputQueue, q_fptr)), -+ elan4_sdram_readq (dev, qdesc + offsetof (E4_InputQueue, q_control)), -+ rcvrRail->rcvr_slots, rcvrRail->rcvr_slots_addr); -+ (di->func)(di->arg, " event %016llx %016llx %016llx\n", -+ elan4_sdram_readq (dev, event + offsetof (E4_Event32, ev_CountAndType)), -+ elan4_sdram_readq (dev, event + offsetof (E4_Event32, ev_Params[0])), -+ elan4_sdram_readq (dev, event + offsetof (E4_Event32, ev_Params[1]))); -+ (di->func)(di->arg, " pending_tailp %016llx pending_head %016llx\n", -+ elan4_sdram_readq (dev, rcvrElan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_pending_tailp)), -+ elan4_sdram_readq (dev, rcvrElan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_pending_head))); -+} -+ -+void -+ep4rcvr_fillout_rail_stats(EP_RCVR_RAIL *rcvr_rail, char *str) { -+ /* no stats here yet */ -+ /* EP4_RCVR_RAIL * ep4rcvr_rail = (EP4_RCVR_RAIL *) rcvr_rail; */ -+} -+ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/ep/epcommsTx.c linux-2.6.9/drivers/net/qsnet/ep/epcommsTx.c ---- clean/drivers/net/qsnet/ep/epcommsTx.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/epcommsTx.c 2005-09-02 07:04:02.000000000 -0400 -@@ -0,0 +1,919 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: epcommsTx.c,v 1.30.2.2 2005/09/02 11:04:02 david Exp $ $Name: QSNETMODULES-5-11-3_20050907 $" -+/* $Source: /cvs/master/quadrics/epmod/epcommsTx.c,v $*/ -+ -+#include -+ -+#include -+#include -+#include -+ -+#include "cm.h" -+#include "debug.h" -+ -+unsigned int ep_txd_lowat = 5; -+ -+static int -+AllocateTxdBlock (EP_XMTR *xmtr, EP_ATTRIBUTE attr, EP_TXD **txdp) -+{ -+ EP_TXD_BLOCK *blk; -+ EP_TXD *txd; -+ EP_TXD_MAIN *pTxdMain; -+ int i; -+ unsigned long flags; -+ -+ EPRINTF1 (DBG_XMTR, "AllocateTxdBlock: xmtr=%p\n", xmtr); -+ -+ KMEM_ZALLOC (blk, EP_TXD_BLOCK *, sizeof (EP_TXD_BLOCK), ! (attr & EP_NO_SLEEP)); -+ -+ if (blk == NULL) -+ return -ENOMEM; -+ -+ if ((pTxdMain = ep_shared_alloc_main (xmtr->Subsys->Subsys.Sys, EP_TXD_MAIN_SIZE * EP_NUM_TXD_PER_BLOCK, attr, &blk->NmdMain)) == (sdramaddr_t) 0) -+ { -+ KMEM_FREE (blk, sizeof (EP_TXD_BLOCK)); -+ return -ENOMEM; -+ } -+ -+ for (txd = &blk->Txd[0], i = 0; i < EP_NUM_TXD_PER_BLOCK; i++, txd++) -+ { -+ txd->Xmtr = xmtr; -+ txd->TxdMain = pTxdMain; -+ -+ ep_nmd_subset (&txd->NmdMain, &blk->NmdMain, (i * EP_TXD_MAIN_SIZE), EP_TXD_MAIN_SIZE); -+ -+ /* move onto next descriptor */ -+ pTxdMain = (EP_TXD_MAIN *) ((unsigned long) pTxdMain + EP_TXD_MAIN_SIZE); -+ } -+ -+ spin_lock_irqsave (&xmtr->FreeDescLock, flags); -+ -+ list_add (&blk->Link, &xmtr->DescBlockList); -+ xmtr->TotalDescCount += EP_NUM_TXD_PER_BLOCK; -+ -+ for (i = txdp ? 1 : 0; i < EP_NUM_TXD_PER_BLOCK; i++) -+ { -+ list_add (&blk->Txd[i].Link, &xmtr->FreeDescList); -+ -+ xmtr->FreeDescCount++; -+ -+ if (xmtr->FreeDescWanted) -+ { -+ xmtr->FreeDescWanted--; -+ kcondvar_wakeupone (&xmtr->FreeDescSleep, &xmtr->FreeDescLock); -+ } -+ } -+ spin_unlock_irqrestore (&xmtr->FreeDescLock, flags); -+ -+ if (txdp) -+ *txdp = &blk->Txd[0]; -+ -+ return 0; -+} -+ -+static void -+FreeTxdBlock (EP_XMTR *xmtr, EP_TXD_BLOCK *blk) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave (&xmtr->FreeDescLock, flags); -+ list_del (&blk->Link); -+ -+ xmtr->TotalDescCount -= EP_NUM_RXD_PER_BLOCK; -+ xmtr->FreeDescCount -= EP_NUM_RXD_PER_BLOCK; -+ spin_unlock_irqrestore (&xmtr->FreeDescLock, flags); -+ -+ ep_shared_free_main (xmtr->Subsys->Subsys.Sys, &blk->NmdMain); -+ KMEM_FREE (blk, sizeof (EP_TXD_BLOCK)); -+} -+ -+static EP_TXD * -+GetTxd (EP_XMTR *xmtr, EP_ATTRIBUTE attr) -+{ -+ EP_COMMS_SUBSYS *subsys = xmtr->Subsys; -+ EP_TXD *txd; -+ int low_on_txds; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&xmtr->FreeDescLock, flags); -+ -+ while (list_empty (&xmtr->FreeDescList)) -+ { -+ if (! (attr & EP_NO_ALLOC)) -+ { -+ spin_unlock_irqrestore (&xmtr->FreeDescLock, flags); -+ -+ if (AllocateTxdBlock (xmtr, attr, &txd) == ESUCCESS) -+ return (txd); -+ -+ spin_lock_irqsave (&xmtr->FreeDescLock, flags); -+ } -+ -+ if (attr & EP_NO_SLEEP) -+ { -+ spin_unlock_irqrestore (&xmtr->FreeDescLock, flags); -+ -+ return (NULL); -+ } -+ -+ xmtr->FreeDescWanted++; -+ kcondvar_wait (&xmtr->FreeDescSleep, &xmtr->FreeDescLock, &flags); -+ } -+ -+ txd = list_entry (xmtr->FreeDescList.next, EP_TXD, Link); -+ -+ list_del (&txd->Link); -+ -+ /* Wakeup the descriptor primer thread if there's not many left */ -+ low_on_txds = (--xmtr->FreeDescCount < ep_txd_lowat); -+ -+ spin_unlock_irqrestore (&xmtr->FreeDescLock, flags); -+ -+ if (low_on_txds) -+ ep_kthread_schedule (&subsys->Thread, lbolt); -+ -+ return (txd); -+} -+ -+void -+FreeTxd (EP_XMTR *xmtr, EP_TXD *txd) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave (&xmtr->FreeDescLock, flags); -+ -+ list_add (&txd->Link, &xmtr->FreeDescList); -+ -+ xmtr->FreeDescCount++; -+ -+ if (xmtr->FreeDescWanted) /* someone waiting for a receive */ -+ { /* descriptor, so wake them up */ -+ xmtr->FreeDescWanted--; -+ kcondvar_wakeupone (&xmtr->FreeDescSleep, &xmtr->FreeDescLock); -+ } -+ -+ spin_unlock_irqrestore (&xmtr->FreeDescLock, flags); -+} -+ -+int -+TxdShouldStabalise (EP_TXD_RAIL *txdRail, EP_RAIL *rail) -+{ -+ EP_TXD *txd = txdRail->Txd; -+ EP_XMTR *xmtr = txd->Xmtr; -+ EP_ATTRIBUTE attr = txd->Envelope.Attr; -+ int stabilise; -+ extern int txd_stabilise; -+ -+ switch (EP_ATTR2TYPE (attr)) -+ { -+ case EP_TYPE_SVC_INDICATOR: /* is the rail in the current service indicator rail mask */ -+ if ((txd_stabilise & 4) == 0) -+ return 0; -+ -+ stabilise = (ep_xmtr_svc_indicator_railmask (xmtr, EP_ATTR2DATA (attr), txd->NodeId) & EP_RAIL2RAILMASK (rail->Number)) == 0; -+ break; -+ -+ case EP_TYPE_TIMEOUT: -+ if ((txd_stabilise & 2) == 0) -+ return 0; -+ -+ stabilise = AFTER(lbolt, txdRail->Txd->TimeStamp + EP_ATTR2DATA(attr)); -+ break; -+ -+ default: -+ if ((txd_stabilise & 1) == 0) -+ return 0; -+ -+ stabilise = AFTER(lbolt, txdRail->Txd->TimeStamp + EP_DEFAULT_TIMEOUT); -+ break; -+ } -+ -+ if (stabilise) -+ { -+ txd->Envelope.Attr = EP_SET_TXD_STABALISING(txd->Envelope.Attr); -+ txd->RetryTime = lbolt; -+ -+ ep_kthread_schedule (&xmtr->Subsys->Thread, lbolt); -+ } -+ -+ return stabilise; -+} -+ -+void ep_xmtr_txd_stat(EP_XMTR *xmtr, EP_TXD *txd) -+{ -+ int f; -+ unsigned long size; -+ EP_TXD_RAIL *txdRail = txd->TxdRail; -+ -+ size = 0; -+ for (f=0; f < txd->Envelope.nFrags; f++) -+ size += txd->Envelope.Frags[f].nmd_len; -+ -+ INC_STAT(xmtr->stats,tx); -+ ADD_STAT(xmtr->stats,tx_len, size); -+ -+ if ((txdRail != NULL) && (txdRail->XmtrRail != NULL)){ -+ INC_STAT(txdRail->XmtrRail->stats,tx); -+ ADD_STAT(txdRail->XmtrRail->stats,tx_len, size); -+ -+ if ((txdRail->XmtrRail->CommsRail != NULL) && ( txdRail->XmtrRail->CommsRail->Rail != NULL)) { -+ INC_STAT(txdRail->XmtrRail->CommsRail->Rail->Stats,tx); -+ ADD_STAT(txdRail->XmtrRail->CommsRail->Rail->Stats,tx_len, size); -+ } -+ } -+} -+ -+static int -+PollActiveTransmitList (EP_XMTR *xmtr, int flag) -+{ -+ struct list_head *el, *nel; -+ struct list_head list; -+ unsigned long flags; -+ int count; -+ -+ INIT_LIST_HEAD (&list); -+ -+ spin_lock_irqsave (&xmtr->Lock, flags); -+ list_for_each_safe (el, nel, &xmtr->ActiveDescList) { -+ EP_TXD *txd = list_entry (el, EP_TXD, Link); -+ EP_TXD_RAIL *txdRail = txd->TxdRail; -+ -+ if (txdRail == NULL) -+ continue; -+ -+ ASSERT (txdRail->Txd == txd); -+ -+ if (EP_XMTR_OP (txdRail->XmtrRail,PollTxd) (txdRail->XmtrRail, txdRail, flag)) -+ { -+ list_del (&txd->Link); /* remove from active transmit list */ -+ list_add_tail (&txd->Link, &list); /* and add to list to call handlers */ -+ } -+ } -+ -+ spin_unlock_irqrestore (&xmtr->Lock, flags); -+ -+ for (count = 0; !list_empty (&list); count++) -+ { -+ EP_TXD *txd = list_entry (list.next, EP_TXD, Link); -+ -+ list_del (&txd->Link); -+ -+ txd->Handler (txd, txd->Arg, EP_SUCCESS); -+ -+ FreeTxd (xmtr, txd); -+ } -+ return (count); -+} -+ -+static inline void -+DoTransmit (EP_XMTR *xmtr, EP_TXD *txd) -+{ -+ EP_RAILMASK nmdRailMask = ep_nmd2railmask (txd->Envelope.Frags, txd->Envelope.nFrags); -+ EP_XMTR_RAIL *xmtrRail; -+ unsigned long flags; -+ int rnum; -+ -+ spin_lock_irqsave (&xmtr->Lock, flags); -+ -+ if (EP_IS_SVC_INDICATOR(txd->Envelope.Attr)) -+ nmdRailMask = nmdRailMask & ep_xmtr_svc_indicator_railmask(xmtr, EP_ATTR2DATA(txd->Envelope.Attr), txd->NodeId); -+ -+ if (EP_IS_PREFRAIL_SET(txd->Envelope.Attr)) -+ rnum = EP_ATTR2PREFRAIL(txd->Envelope.Attr); -+ else -+ rnum = ep_xmtr_prefrail (xmtr, nmdRailMask, txd->NodeId); -+ -+ if (rnum < 0 || !(nmdRailMask & EP_RAIL2RAILMASK(rnum))) -+ xmtrRail = NULL; -+ else -+ xmtrRail = xmtr->Rails[rnum]; -+ -+ /* Allocate the XID while holding the xmtr->Lock from our XID cache */ -+ txd->Envelope.Xid = ep_xid_cache_alloc (xmtr->Subsys->Subsys.Sys, &xmtr->XidCache); -+ -+ EPRINTF7 (DBG_XMTR, "ep: transmit txd %p to %d/%d: Xid %llx nFrags %d [%08x.%d]\n", -+ txd, txd->NodeId, txd->Service, (long long) txd->Envelope.Xid.Unique, -+ txd->Envelope.nFrags, txd->Envelope.Frags[0].nmd_addr, txd->Envelope.Frags[0].nmd_len); -+ -+ /* Store time transmit started to timeout if not received */ -+ txd->TimeStamp = lbolt; -+ -+ /* Initialise the retry backoff */ -+ txd->Backoff.type = EP_BACKOFF_FREE; -+ -+ list_add_tail (&txd->Link, &xmtr->ActiveDescList); -+ -+ if (xmtrRail == NULL || !EP_XMTR_OP(xmtrRail,BindTxd) (txd, xmtrRail, EP_TXD_PHASE_ACTIVE)) -+ ep_kthread_schedule (&xmtr->Subsys->Thread, lbolt); -+ -+ spin_unlock_irqrestore (&xmtr->Lock, flags); -+ -+ if (EP_IS_NO_INTERRUPT(txd->Envelope.Attr)) -+ PollActiveTransmitList (xmtr, POLL_TX_LIST); -+} -+ -+EP_STATUS -+ep_transmit_message (EP_XMTR *xmtr, unsigned int dest, EP_SERVICE service, EP_ATTRIBUTE attr, -+ EP_TXH *handler, void *arg, EP_PAYLOAD *payload, EP_NMD *nmd, int nFrags) -+{ -+ EP_TXD *txd; -+ int i, len; -+ -+ if (nFrags > EP_MAXFRAG || service > EP_MSG_NSVC) -+ return (EP_EINVAL); -+ -+ if ((txd = GetTxd (xmtr, attr)) == NULL) -+ return (EP_ENOMEM); -+ -+ txd->Handler = handler; -+ txd->Arg = arg; -+ txd->Service = service; -+ txd->NodeId = (unsigned short) dest; -+ -+ /* Initialise the envelope */ -+ txd->Envelope.Version = EP_ENVELOPE_VERSION; -+ txd->Envelope.Attr = EP_CLEAR_LOCAL_ATTR(attr); -+ txd->Envelope.Range = EP_RANGE (dest, dest); -+ txd->Envelope.TxdMain = txd->NmdMain; -+ txd->Envelope.nFrags = nFrags; -+ -+ for (i = len = 0; i < nFrags; len += nmd[i].nmd_len, i++) -+ txd->Envelope.Frags[i] = nmd[i]; -+ -+ if (payload) -+ { -+ txd->Envelope.Attr = EP_SET_HAS_PAYLOAD(txd->Envelope.Attr); -+ -+ bcopy (payload, &txd->Payload, sizeof (EP_PAYLOAD)); -+ } -+ -+ DoTransmit (xmtr, txd); -+ -+ BucketStat (xmtr->Subsys, DataXmit, len); -+ -+ return (EP_SUCCESS); -+} -+ -+EP_STATUS -+ep_multicast_message (EP_XMTR *xmtr, unsigned int destLo, unsigned int destHi, bitmap_t *bitmap, EP_SERVICE service, -+ EP_ATTRIBUTE attr, EP_TXH *handler, void *arg, EP_PAYLOAD *payload, EP_NMD *nmd, int nFrags) -+{ -+ EP_SYS *sys = xmtr->Subsys->Subsys.Sys; -+ EP_TXD *txd; -+ int nnodes; -+ int i, len; -+ unsigned long flags; -+ -+ if (nFrags > EP_MAXFRAG || service > EP_MSG_NSVC) -+ return (EP_EINVAL); -+ -+ if (destLo == -1) -+ destLo = sys->Position.pos_nodeid & ~(EP_MAX_NODES-1); -+ -+ if (destHi == -1 && (destHi = ((sys->Position.pos_nodeid + EP_MAX_NODES) & ~(EP_MAX_NODES-1)) - 1) >= sys->Position.pos_nodes) -+ destHi = sys->Position.pos_nodes-1; -+ -+ nnodes = (destHi-destLo+1); -+ -+ if ((txd = GetTxd (xmtr, attr)) == NULL) -+ return (EP_ENOMEM); -+ -+ txd->Handler = handler; -+ txd->Arg = arg; -+ txd->Service = service; -+ -+ /* Initialise the envelope */ -+ txd->Envelope.Version = EP_ENVELOPE_VERSION; -+ txd->Envelope.Attr = EP_SET_MULTICAST(EP_CLEAR_LOCAL_ATTR(attr)); -+ txd->Envelope.Range = EP_RANGE (destLo, destHi); -+ txd->Envelope.TxdMain = txd->NmdMain; -+ txd->Envelope.nFrags = nFrags; -+ -+ for (i = len = 0; i < nFrags; len += nmd[i].nmd_len, i++) -+ txd->Envelope.Frags[i] = nmd[i]; -+ -+ if (payload) -+ { -+ txd->Envelope.Attr = EP_SET_HAS_PAYLOAD(txd->Envelope.Attr); -+ -+ bcopy (payload, &txd->Payload, sizeof (EP_PAYLOAD)); -+ } -+ -+ spin_lock_irqsave (&sys->NodeLock, flags); -+ if (EP_IS_SVC_INDICATOR(attr)) -+ ep_xmtr_svc_indicator_bitmap(xmtr, EP_ATTR2DATA(attr), txd->TxdMain->Bitmap, destLo, nnodes); -+ else -+ bt_subset (statemap_tobitmap(sys->NodeSet), txd->TxdMain->Bitmap, destLo, nnodes); -+ spin_unlock_irqrestore (&sys->NodeLock, flags); -+ -+ if (bitmap != NULL) /* bitmap supplied, so intersect it with */ -+ bt_intersect (txd->TxdMain->Bitmap, bitmap, nnodes); /* the current node set map */ -+ -+ if ((attr & EP_NOT_MYSELF) && destLo <= sys->Position.pos_nodeid && sys->Position.pos_nodeid <= destHi) -+ BT_CLEAR (txd->TxdMain->Bitmap, (sys->Position.pos_nodeid-destLo)); /* clear myself if not wanted */ -+ -+ if ((i = bt_lowbit (txd->TxdMain->Bitmap, nnodes)) < 0) -+ { -+ FreeTxd (xmtr, txd); -+ return (EP_NODE_DOWN); -+ } -+ -+ txd->NodeId = (unsigned short) i; -+ -+ DoTransmit (xmtr, txd); -+ -+ BucketStat (xmtr->Subsys, McastXmit, len); -+ -+ return (EP_SUCCESS); -+} -+ -+EP_STATUS -+ep_transmit_rpc (EP_XMTR *xmtr, unsigned int dest, EP_SERVICE service, EP_ATTRIBUTE attr, -+ EP_TXH *handler, void *arg, EP_PAYLOAD *payload, EP_NMD *nmd, int nFrags) -+{ -+ EP_TXD *txd; -+ int i, len; -+ -+ if (nFrags > EP_MAXFRAG || service > EP_MSG_NSVC) -+ return (EP_EINVAL); -+ -+ if ((txd = GetTxd (xmtr, attr)) == NULL) -+ return (EP_ENOMEM); -+ -+ txd->Handler = handler; -+ txd->Arg = arg; -+ txd->Service = service; -+ txd->NodeId = dest; -+ -+ /* Initialise the envelope */ -+ txd->Envelope.Version = EP_ENVELOPE_VERSION; -+ txd->Envelope.Attr = EP_SET_RPC(EP_CLEAR_LOCAL_ATTR(attr)); -+ txd->Envelope.Range = EP_RANGE (dest, dest); -+ txd->Envelope.TxdMain = txd->NmdMain; -+ txd->Envelope.nFrags = nFrags; -+ -+ for (i = len = 0; i < nFrags; len += nmd[i].nmd_len, i++) -+ txd->Envelope.Frags[i] = nmd[i]; -+ -+ if (payload) -+ { -+ txd->Envelope.Attr = EP_SET_HAS_PAYLOAD(txd->Envelope.Attr); -+ -+ bcopy (payload, &txd->Payload, sizeof (EP_PAYLOAD)); -+ } -+ -+ DoTransmit (xmtr, txd); -+ -+ BucketStat (xmtr->Subsys, RPCXmit, len); -+ -+ return (EP_SUCCESS); -+} -+ -+EP_STATUS -+ep_multicast_forward (EP_XMTR *xmtr, unsigned int dest, EP_SERVICE service, EP_ATTRIBUTE attr, EP_TXH *handler, void *arg, -+ EP_ENVELOPE *env, EP_PAYLOAD *payload, bitmap_t *bitmap, EP_NMD *nmd, int nFrags) -+{ -+ EP_TXD *txd; -+ int i, len; -+ -+ if (nFrags > EP_MAXFRAG || service > EP_MSG_NSVC) -+ return (EP_EINVAL); -+ -+ if ((txd = GetTxd (xmtr, attr)) == NULL) -+ return (EP_ENOMEM); -+ -+ txd->Handler = handler; -+ txd->Arg = arg; -+ txd->Service = service; -+ txd->NodeId = (unsigned short) dest; -+ -+ /* Initialise the envelope */ -+ txd->Envelope.Version = EP_ENVELOPE_VERSION; -+ txd->Envelope.Attr = EP_SET_MULTICAST(EP_CLEAR_LOCAL_ATTR(attr)); -+ txd->Envelope.Range = env->Range; -+ txd->Envelope.TxdMain = txd->NmdMain; -+ txd->Envelope.nFrags = nFrags; -+ -+ for (i = len = 0; i < nFrags; len += nmd[i].nmd_len, i++) -+ txd->Envelope.Frags[i] = nmd[i]; -+ -+ bt_copy (bitmap, txd->TxdMain->Bitmap, EP_RANGE_HIGH(env->Range) - EP_RANGE_LOW(env->Range) + 1); -+ -+ if (payload) -+ { -+ txd->Envelope.Attr = EP_SET_HAS_PAYLOAD(txd->Envelope.Attr); -+ -+ bcopy (payload, &txd->Payload, sizeof (EP_PAYLOAD)); -+ } -+ -+ DoTransmit (xmtr, txd); -+ -+ BucketStat (xmtr->Subsys, McastXmit, len); -+ -+ return (EP_SUCCESS); -+} -+ -+int -+ep_poll_transmits (EP_XMTR *xmtr) -+{ -+ return (PollActiveTransmitList (xmtr, POLL_TX_LIST)); -+} -+ -+int -+ep_enable_txcallbacks (EP_XMTR *xmtr) -+{ -+ return (PollActiveTransmitList (xmtr, ENABLE_TX_CALLBACK)); -+} -+ -+int -+ep_disable_txcallbacks (EP_XMTR *xmtr) -+{ -+ return (PollActiveTransmitList (xmtr, DISABLE_TX_CALLBACK)); -+} -+ -+/* functions for accessing fields of txds */ -+int ep_txd_node(EP_TXD *txd) { return (txd->NodeId); } -+EP_STATUSBLK *ep_txd_statusblk(EP_TXD *txd) { return (&txd->TxdMain->StatusBlk); } -+ -+void -+ep_xmtr_xid_msg_handler (void *arg, EP_MANAGER_MSG *msg) -+{ -+ EP_XMTR *xmtr = (EP_XMTR *) arg; -+ EP_SYS *sys = xmtr->Subsys->Subsys.Sys; -+ struct list_head *el,*nel; -+ unsigned long flags; -+ -+ switch (msg->Hdr.Type) -+ { -+ case EP_MANAGER_MSG_TYPE_FAILOVER_REQUEST: -+ spin_lock_irqsave (&xmtr->Lock, flags); -+ list_for_each (el, &xmtr->ActiveDescList) { -+ EP_TXD *txd = list_entry (el, EP_TXD, Link); -+ EP_TXD_RAIL *txdRail = txd->TxdRail; -+ -+ if (txdRail != NULL && EP_XIDS_MATCH (msg->Body.Failover.Xid, txd->Envelope.Xid)) -+ { -+ EP_XMTR_RAIL *xmtrRail = txdRail->XmtrRail; -+ EP_RAIL *rail = xmtrRail->CommsRail->Rail; -+ EP_MANAGER_MSG_BODY msgBody; -+ int rnum; -+ -+ if (! (msg->Body.Failover.Railmask & EP_RAIL2RAILMASK (rail->Number))) -+ { -+ /* Need to failover this txd to a different rail, select a rail from -+ * the set that she has asked us to use and which is connected to her -+ * on this transmitter. If there are no such rails, then in all probability -+ * we're offline on all common rails and eventually she will see we have no -+ * rails in common and abort the receive. */ -+ if ((rnum = ep_xmtr_prefrail (xmtr, msg->Body.Failover.Railmask, txd->NodeId)) < 0) -+ ep_debugf (DBG_XMTR, "%s: ep_xmtr_xid_msg_handler: FAILOVER_REQUEST but can't determine rail (%04x,%04x,%d,%04x)\n", -+ rail->Name, msg->Body.Failover.Railmask, xmtr->RailMask, txd->NodeId, sys->Nodes[txd->NodeId].ConnectedRails); -+ else -+ { -+ EP_XMTR_RAIL *nXmtrRail = xmtr->Rails[rnum]; -+ -+ EPRINTF4 (DBG_XMTR, "%s: ep_xmtr_xid_msg_handler: FAILOVER_REQUEST txd=%p XID=%llx-> rail %d\n", rail->Name, txd, (long long) txd->Envelope.Xid.Unique, rnum); -+ -+ /* Bind the txd rail onto the new rail - it doesn't matter if we fail -+ * as it will remain bound to the original rail */ -+ (void) EP_XMTR_OP (nXmtrRail, BindTxd) (txd, nXmtrRail, EP_TXD_PHASE_PASSIVE); -+ } -+ } -+ -+ /* Send a failover response including an envelope update */ -+ msgBody.FailoverTxd.Rail = rail->Number; -+ msgBody.FailoverTxd.Xid = txd->Envelope.Xid; -+ msgBody.FailoverTxd.TxdRail = txd->Envelope.TxdRail; -+ -+ ep_send_message (rail, msg->Hdr.NodeId, EP_MANAGER_MSG_TYPE_FAILOVER_RESPONSE, msg->Hdr.Xid, &msgBody); -+ } -+ } -+ spin_unlock_irqrestore (&xmtr->Lock, flags); -+ break; -+ -+ case EP_MANAGER_MSG_TYPE_GET_NODE_STATE_RESPONSE: { -+ int txd_has_not_sent_envelope = 0; -+ EP_TXD *txd = NULL; -+ EP_TXD_RAIL *txdRail = NULL; -+ -+ if (msg->Body.NodeState.NetworkErrorState != 0) -+ ep_kthread_schedule (&xmtr->Subsys->Thread, lbolt + MESSAGE_RETRY_TIME); -+ else -+ { -+ spin_lock_irqsave (&xmtr->Lock, flags); -+ list_for_each_safe (el, nel, &xmtr->ActiveDescList) { -+ -+ txd = list_entry (el, EP_TXD, Link); -+ txdRail = txd->TxdRail; -+ -+ if (txdRail != NULL && EP_XIDS_MATCH (msg->Hdr.Xid, txd->Envelope.Xid)) { -+ txd_has_not_sent_envelope = EP_XMTR_OP(txdRail->XmtrRail,CheckTxdState)(txd); -+ break; -+ } -+ } -+ -+ if (txd_has_not_sent_envelope) { -+ EPRINTF2 (DBG_STABILISE, "ep_xmtr_xid_msg_handler: GET_NODE_STATE_RESPONSE txd=%p XID=%llx not sent envelope\n", -+ txd, (long long) txd->Envelope.Xid.Unique); -+ -+ /* at this point it has finished stabalising */ -+ txd->Envelope.Attr = EP_CLEAR_TXD_STABALISING(txd->Envelope.Attr); -+ -+ /* store railmask into txd if not a service indicator or timeout */ -+ if (EP_IS_NO_TYPE(txd->Envelope.Attr)) -+ txd->Envelope.Attr = EP_SET_DATA(txd->Envelope.Attr, EP_TYPE_RAILMASK, msg->Body.NodeState.Railmask); -+ -+ spin_unlock_irqrestore (&xmtr->Lock, flags); -+ -+ /* TXD is now no longer bound to a rail , so let ep_check_xmtr() handle it */ -+ ep_kthread_schedule (&xmtr->Subsys->Thread, lbolt); -+ } -+ else -+ spin_unlock_irqrestore (&xmtr->Lock, flags); -+ } -+ break; -+ } -+ default: -+ panic ("ep_xmtr_xid_msg_handler: XID match but invalid message type\n"); -+ } -+} -+ -+EP_XMTR * -+ep_alloc_xmtr (EP_SYS *sys) -+{ -+ EP_COMMS_SUBSYS *subsys; -+ EP_XMTR *xmtr; -+ struct list_head *el; -+ -+ if ((subsys = (EP_COMMS_SUBSYS *) ep_subsys_find (sys, EPCOMMS_SUBSYS_NAME)) == NULL) -+ return (NULL); -+ -+ KMEM_ZALLOC (xmtr, EP_XMTR *, sizeof (EP_XMTR), 1); -+ -+ if (xmtr == NULL) -+ return (NULL); -+ -+ xmtr->Subsys = subsys; -+ -+ spin_lock_init (&xmtr->Lock); -+ INIT_LIST_HEAD (&xmtr->ActiveDescList); -+ -+ kcondvar_init (&xmtr->FreeDescSleep); -+ spin_lock_init (&xmtr->FreeDescLock); -+ INIT_LIST_HEAD (&xmtr->FreeDescList); -+ INIT_LIST_HEAD (&xmtr->DescBlockList); -+ -+ ep_xid_cache_init (sys, &xmtr->XidCache); -+ -+ xmtr->XidCache.MessageHandler = ep_xmtr_xid_msg_handler; -+ xmtr->XidCache.Arg = xmtr; -+ -+ kmutex_lock (&subsys->Lock); -+ list_add_tail (&xmtr->Link, &subsys->Transmitters); -+ -+ ep_procfs_xmtr_add(xmtr); -+ -+ /* Now add all rails which are already started */ -+ list_for_each (el, &subsys->Rails) { -+ EP_COMMS_RAIL *commsRail = list_entry (el, EP_COMMS_RAIL, Link); -+ -+ EP_RAIL_OP(commsRail, Xmtr.AddRail) (xmtr, commsRail); -+ } -+ kmutex_unlock (&subsys->Lock); -+ -+ ep_mod_inc_usecount(); -+ -+ return (xmtr); -+} -+ -+void -+ep_free_xmtr (EP_XMTR *xmtr) -+{ -+ EP_COMMS_SUBSYS *subsys = xmtr->Subsys; -+ EP_SYS *sys = subsys->Subsys.Sys; -+ struct list_head *el; -+ -+ kmutex_lock (&subsys->Lock); -+ list_for_each (el, &subsys->Rails) { -+ EP_COMMS_RAIL *rail = list_entry (el, EP_COMMS_RAIL, Link); -+ -+ EP_RAIL_OP(rail,Xmtr.DelRail) (xmtr, rail); -+ } -+ -+ list_del (&xmtr->Link); -+ kmutex_unlock (&subsys->Lock); -+ -+ /* all the desc's must be free */ -+ ASSERT(xmtr->FreeDescCount == xmtr->TotalDescCount); -+ -+ /* delete the descs */ -+ while (!list_empty (&xmtr->DescBlockList)) -+ FreeTxdBlock( xmtr, list_entry(xmtr->DescBlockList.next, EP_TXD_BLOCK , Link)); -+ -+ /* they had better all be gone now */ -+ ASSERT((xmtr->FreeDescCount == 0) && (xmtr->TotalDescCount == 0)); -+ -+ ep_procfs_xmtr_del(xmtr); -+ -+ ep_xid_cache_destroy (sys, &xmtr->XidCache); -+ -+ spin_lock_destroy (&xmtr->Lock); -+ KMEM_FREE (xmtr, sizeof (EP_XMTR)); -+ -+ ep_mod_dec_usecount(); -+} -+ -+long -+ep_check_xmtr (EP_XMTR *xmtr, long nextRunTime) -+{ -+ EP_COMMS_SUBSYS *subsys = xmtr->Subsys; -+ EP_SYS *sys = subsys->Subsys.Sys; -+ struct list_head *el, *nel; -+ struct list_head txdList; -+ unsigned long flags; -+ int timed_out=0; -+ int i; -+ EP_MANAGER_MSG_BODY body; -+ -+ INIT_LIST_HEAD (&txdList); -+ -+ /* See if we have any txd's which need to be bound to a rail */ -+ spin_lock_irqsave (&xmtr->Lock, flags); -+ list_for_each_safe (el, nel, &xmtr->ActiveDescList) { -+ EP_TXD *txd = list_entry (el, EP_TXD, Link); -+ EP_NODE *node = &sys->Nodes[txd->NodeId]; -+ EP_RAILMASK nodeRails = node->ConnectedRails & xmtr->RailMask; -+ EP_ENVELOPE *env = &txd->Envelope; -+ -+ if (EP_IS_TXD_STABALISING(txd->Envelope.Attr)) -+ { -+ ASSERT(txd->TxdRail != NULL); -+ -+ if (AFTER (lbolt, txd->RetryTime)) -+ { -+ EPRINTF6 (DBG_STABILISE, "ep_check_xmtr txd=%p txdRail=%p send get node state to %d Xid=%08x.%08x.%016llx\n", -+ txd, txd->TxdRail, txd->NodeId, env->Xid.Generation, env->Xid.Handle, (long long)env->Xid.Unique); -+ -+ body.Service = txd->Service; -+ if (ep_send_message ( txd->TxdRail->XmtrRail->CommsRail->Rail, txd->NodeId, EP_MANAGER_MSG_TYPE_GET_NODE_STATE, env->Xid, &body) == 0) -+ txd->RetryTime = lbolt + (MESSAGE_RETRY_TIME << ep_backoff (&txd->Backoff, EP_BACKOFF_STABILISE)); -+ else -+ txd->RetryTime = lbolt + MSGBUSY_RETRY_TIME; -+ } -+ -+ ep_kthread_schedule (&subsys->Thread, txd->RetryTime); -+ continue; -+ } -+ -+ if (txd->TxdRail != NULL) -+ continue; -+ -+ switch (EP_ATTR2TYPE(txd->Envelope.Attr)) -+ { -+ case EP_TYPE_SVC_INDICATOR: -+ { -+ EP_RAILMASK rmask=0; -+ struct list_head *tmp; -+ -+ list_for_each (tmp, &subsys->Rails) { -+ EP_COMMS_RAIL *commsRail = list_entry (tmp, EP_COMMS_RAIL, Link); -+ if ( cm_svc_indicator_is_set(commsRail->Rail, EP_ATTR2DATA(txd->Envelope.Attr), txd->NodeId)) -+ rmask |= EP_RAIL2RAILMASK(commsRail->Rail->Number); -+ } -+ nodeRails &= rmask; -+ break; -+ } -+ case EP_TYPE_TIMEOUT: -+ timed_out = AFTER(lbolt, txd->TimeStamp + EP_ATTR2DATA(txd->Envelope.Attr)) ? (1) : (0); -+ break; -+ case EP_TYPE_RAILMASK: -+ nodeRails &= EP_ATTR2DATA(txd->Envelope.Attr); -+ break; -+ default: -+ timed_out = AFTER(lbolt, txd->TimeStamp + EP_DEFAULT_TIMEOUT) ? (1) : (0); -+ break; -+ } -+ -+ if (nodeRails == 0 || timed_out || (EP_IS_NO_FAILOVER(env->Attr) && EP_IS_PREFRAIL_SET(env->Attr) && -+ (nodeRails & EP_RAIL2RAILMASK(EP_ATTR2PREFRAIL(env->Attr))) == 0)) -+ { -+ EPRINTF5 (timed_out ? DBG_STABILISE : DBG_XMTR, "ep_check_xmtr: txd=%p XID=%llx to %d no rails connected or cannot failover (nodeRails=0x%x,timed_out=%d\n", -+ txd, (long long) env->Xid.Unique, txd->NodeId, nodeRails, timed_out); -+ -+ list_del (&txd->Link); -+ list_add_tail (&txd->Link, &txdList); -+ } -+ else -+ { -+ EP_XMTR_RAIL *xmtrRail; -+ int i, len, rnum; -+ -+ if (EP_IS_PREFRAIL_SET(env->Attr) && (nodeRails & EP_RAIL2RAILMASK(EP_ATTR2PREFRAIL(env->Attr)))) -+ rnum = EP_ATTR2PREFRAIL(env->Attr); -+ else -+ rnum = ep_pickRail (nodeRails); -+ -+ EPRINTF3 (DBG_XMTR, "ep_check_xmtr: txd=%p XID=%llx mapping NMDs onto rail %d \n", txd, (long long) env->Xid.Unique, rnum); -+ -+ for (i = len = 0; i < env->nFrags; i++, len += env->Frags[i].nmd_len) -+ ep_nmd_map_rails (sys, &env->Frags[i], nodeRails); -+ -+ if ((xmtrRail = xmtr->Rails[rnum]) == NULL || -+ !EP_XMTR_OP(xmtrRail,BindTxd) (txd, xmtrRail, EP_TXD_PHASE_ACTIVE)) -+ ep_kthread_schedule (&subsys->Thread, lbolt + RESOURCE_RETRY_TIME); -+ } -+ } -+ spin_unlock_irqrestore (&xmtr->Lock, flags); -+ -+ while (! list_empty (&txdList)) -+ { -+ EP_TXD *txd = list_entry (txdList.next, EP_TXD, Link); -+ list_del (&txd->Link); -+ -+ txd->Handler (txd, txd->Arg, EP_NODE_DOWN); -+ FreeTxd (xmtr, txd); -+ } -+ -+ /* Check to see if we're low on txds */ -+ if (xmtr->FreeDescCount < ep_txd_lowat) -+ AllocateTxdBlock (xmtr, 0, NULL); -+ -+ /* Then check each rail */ -+ for (i = 0; i < EP_MAX_RAILS; i++) -+ if (xmtr->RailMask & (1 << i) ) -+ nextRunTime = EP_XMTR_OP (xmtr->Rails[i],Check) (xmtr->Rails[i], nextRunTime); -+ return (nextRunTime); -+} -+ -+void -+ep_display_txd (DisplayInfo *di, EP_TXD *txd) -+{ -+ EP_ENVELOPE *env = &txd->Envelope; -+ EP_TXD_RAIL *txdRail = txd->TxdRail; -+ -+ (di->func)(di->arg, "TXD: %p Version=%x Attr=%x Xid=%08x.%08x.%016llx\n", txd, -+ env->Version, env->Attr, env->Xid.Generation, env->Xid.Handle, (long long) env->Xid.Unique); -+ (di->func)(di->arg, " NodeId=%d Range=%d.%d TxdRail=%x TxdMain=%x.%x.%x nFrags=%d\n", -+ env->NodeId, EP_RANGE_LOW(env->Range), EP_RANGE_HIGH(env->Range), env->TxdRail, -+ env->TxdMain.nmd_addr, env->TxdMain.nmd_len, env->TxdMain.nmd_attr, env->nFrags); -+ (di->func)(di->arg, " Frag[0] %08x.%08x.%08x\n", env->Frags[0].nmd_addr, env->Frags[0].nmd_len, env->Frags[0].nmd_attr); -+ (di->func)(di->arg, " Frag[1] %08x.%08x.%08x\n", env->Frags[1].nmd_addr, env->Frags[1].nmd_len, env->Frags[1].nmd_attr); -+ (di->func)(di->arg, " Frag[2] %08x.%08x.%08x\n", env->Frags[2].nmd_addr, env->Frags[2].nmd_len, env->Frags[2].nmd_attr); -+ (di->func)(di->arg, " Frag[3] %08x.%08x.%08x\n", env->Frags[3].nmd_addr, env->Frags[3].nmd_len, env->Frags[3].nmd_attr); -+ -+ if (txdRail != NULL) EP_XMTR_OP (txdRail->XmtrRail, DisplayTxd) (di, txdRail); -+} -+ -+void -+ep_display_xmtr (DisplayInfo *di, EP_XMTR *xmtr) -+{ -+ int freeCount = 0; -+ int activeCount = 0; -+ struct list_head *el; -+ int i; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&xmtr->FreeDescLock, flags); -+ list_for_each (el, &xmtr->FreeDescList) -+ freeCount++; -+ spin_unlock_irqrestore (&xmtr->FreeDescLock, flags); -+ -+ spin_lock_irqsave (&xmtr->Lock, flags); -+ list_for_each (el, &xmtr->ActiveDescList) -+ activeCount++; -+ -+ (di->func)(di->arg, "ep_display_xmtr: xmtr=%p Free=%d Active=%d\n", xmtr, freeCount, activeCount); -+ for (i = 0; i < EP_MAX_RAILS; i++) -+ if (xmtr->Rails[i]) EP_XMTR_OP (xmtr->Rails[i], DisplayXmtr) (di, xmtr->Rails[i]); -+ -+ list_for_each (el,&xmtr->ActiveDescList) -+ ep_display_txd (di, list_entry (el, EP_TXD, Link)); -+ spin_unlock_irqrestore (&xmtr->Lock, flags); -+} -+ -+void -+ep_xmtr_fillout_stats(EP_XMTR *xmtr, char *str) -+{ -+ sprintf(str+strlen(str),"Tx %lu %lu /sec\n", GET_STAT_TOTAL(xmtr->stats,tx), GET_STAT_PER_SEC(xmtr->stats,tx) ); -+ sprintf(str+strlen(str),"MBytes %lu %lu Mbytes/sec\n", GET_STAT_TOTAL(xmtr->stats,tx_len) / (1024*1024), GET_STAT_PER_SEC(xmtr->stats,tx_len) / (1024*1024)); -+} -+ -+void -+ep_xmtr_rail_fillout_stats(EP_XMTR_RAIL *xmtr_rail, char *str) -+{ -+ sprintf(str+strlen(str),"Tx %lu %lu /sec\n", GET_STAT_TOTAL(xmtr_rail->stats,tx), GET_STAT_PER_SEC(xmtr_rail->stats,tx) ); -+ sprintf(str+strlen(str),"MBytes %lu %lu Mbytes/sec\n", GET_STAT_TOTAL(xmtr_rail->stats,tx_len) / (1024*1024), GET_STAT_PER_SEC(xmtr_rail->stats,tx_len) / (1024*1024)); -+} -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/ep/epcommsTx_elan3.c linux-2.6.9/drivers/net/qsnet/ep/epcommsTx_elan3.c ---- clean/drivers/net/qsnet/ep/epcommsTx_elan3.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/epcommsTx_elan3.c 2004-11-12 05:55:03.000000000 -0500 -@@ -0,0 +1,1173 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: epcommsTx_elan3.c,v 1.19 2004/11/12 10:55:03 mike Exp $" -+/* $Source: /cvs/master/quadrics/epmod/epcommsTx_elan3.c,v $ */ -+ -+#include -+ -+#include -+#include -+#include -+ -+#include "kcomm_vp.h" -+#include "kcomm_elan3.h" -+#include "epcomms_elan3.h" -+#include "debug.h" -+ -+#define XMTR_TO_RAIL(xmtrRail) ((EP3_RAIL *) ((EP_XMTR_RAIL *) xmtrRail)->CommsRail->Rail) -+#define XMTR_TO_DEV(xmtrRail) (XMTR_TO_RAIL(xmtrRail)->Device) -+#define XMTR_TO_SUBSYS(xmtrRail) (((EP_XMTR_RAIL *) xmtrRail)->Xmtr->Subsys) -+ -+static void TxEnveEvent (EP3_RAIL *rail, void *arg); -+static void TxEnveRetry (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma, int status); -+static void TxEnveVerify (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma); -+ -+static EP3_COOKIE_OPS EnveCookieOps = -+{ -+ TxEnveEvent, -+ TxEnveRetry, -+ NULL, /* DmaCancelled */ -+ TxEnveVerify -+}; -+ -+static void TxDataEvent (EP3_RAIL *rail, void *arg); -+static void TxDataRetry (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma, int status); -+static void TxDataVerify (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma); -+ -+static EP3_COOKIE_OPS DataCookieOps = -+{ -+ TxDataEvent, -+ TxDataRetry, -+ NULL, /* DmaCancelled */ -+ TxDataVerify -+}; -+ -+static void TxDoneEvent (EP3_RAIL *dev, void *arg); -+static void TxDoneRetry (EP3_RAIL *dev, void *arg, E3_DMA_BE *dma, int status); -+static void TxDoneVerify (EP3_RAIL *dev, void *arg, E3_DMA_BE *dma); -+ -+static EP3_COOKIE_OPS DoneCookieOps = -+{ -+ TxDoneEvent, -+ TxDoneRetry, -+ NULL, /* DmaCancelled */ -+ TxDoneVerify, -+} ; -+ -+static int -+AllocateTxdRailBlock (EP3_XMTR_RAIL *xmtrRail) -+{ -+ EP3_RAIL *rail = XMTR_TO_RAIL (xmtrRail); -+ ELAN3_DEV *dev = rail->Device; -+ EP3_TXD_RAIL_BLOCK *blk; -+ EP3_TXD_RAIL *txdRail; -+ sdramaddr_t pTxdElan; -+ EP3_TXD_RAIL_MAIN *pTxdMain; -+ E3_Addr pTxdElanAddr; -+ E3_Addr pTxdMainAddr; -+ E3_BlockCopyEvent event; -+ int i; -+ unsigned long flags; -+ -+ KMEM_ZALLOC (blk, EP3_TXD_RAIL_BLOCK *, sizeof (EP3_TXD_RAIL_BLOCK), 1); -+ -+ if (blk == NULL) -+ return 0; -+ -+ if ((pTxdElan = ep_alloc_elan (&rail->Generic, EP3_TXD_RAIL_ELAN_SIZE * EP3_NUM_TXD_PER_BLOCK, 0, &pTxdElanAddr)) == (sdramaddr_t) 0) -+ { -+ KMEM_FREE (blk, sizeof (EP3_TXD_RAIL_BLOCK)); -+ return 0; -+ } -+ -+ if ((pTxdMain = ep_alloc_main (&rail->Generic, EP3_TXD_RAIL_MAIN_SIZE * EP3_NUM_TXD_PER_BLOCK, 0, &pTxdMainAddr)) == (EP3_TXD_RAIL_MAIN *) NULL) -+ { -+ ep_free_elan (&rail->Generic, pTxdElanAddr, EP3_TXD_RAIL_ELAN_SIZE * EP3_NUM_TXD_PER_BLOCK); -+ KMEM_FREE (blk, sizeof (EP3_TXD_RAIL_BLOCK)); -+ return 0; -+ } -+ -+ if (ReserveDmaRetries (rail, EP3_NUM_TXD_PER_BLOCK, 0) != ESUCCESS) -+ { -+ ep_free_main (&rail->Generic, pTxdMainAddr, EP3_TXD_RAIL_MAIN_SIZE * EP3_NUM_TXD_PER_BLOCK); -+ ep_free_elan (&rail->Generic, pTxdElanAddr, EP3_TXD_RAIL_ELAN_SIZE * EP3_NUM_TXD_PER_BLOCK); -+ KMEM_FREE (blk, sizeof (EP3_TXD_RAIL_BLOCK)); -+ return 0; -+ } -+ -+ for (txdRail = &blk->Txd[0], i = 0; i < EP3_NUM_TXD_PER_BLOCK; i++, txdRail++) -+ { -+ txdRail->Generic.XmtrRail = &xmtrRail->Generic; -+ txdRail->TxdElan = pTxdElan; -+ txdRail->TxdElanAddr = pTxdElanAddr; -+ txdRail->TxdMain = pTxdMain; -+ txdRail->TxdMainAddr = pTxdMainAddr; -+ -+ RegisterCookie (&rail->CookieTable, &txdRail->EnveCookie, pTxdElanAddr + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent), &EnveCookieOps, (void *) txdRail); -+ RegisterCookie (&rail->CookieTable, &txdRail->DataCookie, pTxdElanAddr + offsetof (EP3_TXD_RAIL_ELAN, DataEvent), &DataCookieOps, (void *) txdRail); -+ RegisterCookie (&rail->CookieTable, &txdRail->DoneCookie, pTxdElanAddr + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent), &DoneCookieOps, (void *) txdRail); -+ -+ EP3_INIT_COPY_EVENT (event, txdRail->EnveCookie, pTxdMainAddr + offsetof (EP3_TXD_RAIL_MAIN, EnveEvent), 0); -+ elan3_sdram_copyl_to_sdram (dev, &event, pTxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent), sizeof (E3_BlockCopyEvent)); -+ -+ EP3_INIT_COPY_EVENT (event, txdRail->DataCookie, pTxdMainAddr + offsetof (EP3_TXD_RAIL_MAIN, DataEvent), 0); -+ elan3_sdram_copyl_to_sdram (dev, &event, pTxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent), sizeof (E3_BlockCopyEvent)); -+ -+ EP3_INIT_COPY_EVENT (event, txdRail->DoneCookie, pTxdMainAddr + offsetof (EP3_TXD_RAIL_MAIN, DoneEvent), 0); -+ elan3_sdram_copyl_to_sdram (dev, &event, pTxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent), sizeof (E3_BlockCopyEvent)); -+ -+ pTxdMain->EnveEvent = EP3_EVENT_FREE; -+ pTxdMain->DataEvent = EP3_EVENT_FREE; -+ pTxdMain->DoneEvent = EP3_EVENT_FREE; -+ -+ /* move onto next descriptor */ -+ pTxdElan += EP3_TXD_RAIL_ELAN_SIZE; -+ pTxdElanAddr += EP3_TXD_RAIL_ELAN_SIZE; -+ pTxdMain = (EP3_TXD_RAIL_MAIN *) ((unsigned long) pTxdMain + EP3_TXD_RAIL_MAIN_SIZE); -+ pTxdMainAddr += EP3_TXD_RAIL_MAIN_SIZE; -+ } -+ -+ spin_lock_irqsave (&xmtrRail->FreeDescLock, flags); -+ -+ list_add (&blk->Link, &xmtrRail->DescBlockList); -+ xmtrRail->TotalDescCount += EP3_NUM_TXD_PER_BLOCK; -+ xmtrRail->FreeDescCount += EP3_NUM_TXD_PER_BLOCK; -+ -+ for (i = 0; i < EP3_NUM_TXD_PER_BLOCK; i++) -+ list_add (&blk->Txd[i].Generic.Link, &xmtrRail->FreeDescList); -+ -+ spin_unlock_irqrestore (&xmtrRail->FreeDescLock, flags); -+ -+ return 1; -+} -+ -+static void -+FreeTxdRailBlock (EP3_XMTR_RAIL *xmtrRail, EP3_TXD_RAIL_BLOCK *blk) -+{ -+ EP3_RAIL *rail = XMTR_TO_RAIL(xmtrRail); -+ EP3_TXD_RAIL *txdRail; -+ unsigned long flags; -+ int i; -+ -+ spin_lock_irqsave (&xmtrRail->FreeDescLock, flags); -+ -+ list_del (&blk->Link); -+ -+ xmtrRail->TotalDescCount -= EP3_NUM_TXD_PER_BLOCK; -+ -+ for (txdRail = &blk->Txd[0], i = 0; i < EP3_NUM_TXD_PER_BLOCK; i++, txdRail++) -+ { -+ xmtrRail->FreeDescCount--; -+ -+ list_del (&txdRail->Generic.Link); -+ -+ DeregisterCookie (&rail->CookieTable, &txdRail->EnveCookie); -+ DeregisterCookie (&rail->CookieTable, &txdRail->DataCookie); -+ DeregisterCookie (&rail->CookieTable, &txdRail->DoneCookie); -+ } -+ -+ spin_unlock_irqrestore (&xmtrRail->FreeDescLock, flags); -+ -+ ReleaseDmaRetries (rail, EP3_NUM_TXD_PER_BLOCK); -+ -+ ep_free_main (&rail->Generic, blk->Txd[0].TxdMainAddr, EP3_TXD_RAIL_MAIN_SIZE * EP3_NUM_TXD_PER_BLOCK); -+ ep_free_elan (&rail->Generic, blk->Txd[0].TxdElanAddr, EP3_TXD_RAIL_ELAN_SIZE * EP3_NUM_TXD_PER_BLOCK); -+ KMEM_FREE (blk, sizeof (EP3_TXD_RAIL_BLOCK)); -+} -+ -+static EP3_TXD_RAIL * -+GetTxdRail (EP3_XMTR_RAIL *xmtrRail) -+{ -+ EP_COMMS_SUBSYS *subsys = xmtrRail->Generic.Xmtr->Subsys; -+ EP3_TXD_RAIL *txdRail; -+ int low_on_txds; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&xmtrRail->FreeDescLock, flags); -+ -+ if (list_empty (&xmtrRail->FreeDescList)) -+ txdRail = NULL; -+ else -+ { -+ txdRail = list_entry (xmtrRail->FreeDescList.next, EP3_TXD_RAIL, Generic.Link); -+ -+#if defined(DEBUG) -+ { -+ EP_RAIL *rail = xmtrRail->Generic.CommsRail->Rail; -+ ELAN3_DEV *dev = ((EP3_RAIL *) rail)->Device; -+ -+ EP_ASSERT (rail, txdRail->TxdMain->EnveEvent == EP3_EVENT_FREE); -+ EP_ASSERT (rail, txdRail->TxdMain->DataEvent == EP3_EVENT_FREE); -+ EP_ASSERT (rail, txdRail->TxdMain->DoneEvent == EP3_EVENT_FREE); -+ EP_ASSERT (rail, SDRAM_ASSERT(elan3_sdram_readl (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count)) == 0)); -+ EP_ASSERT (rail, SDRAM_ASSERT(elan3_sdram_readl (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count)) == 0)); -+ EP_ASSERT (rail, SDRAM_ASSERT(elan3_sdram_readl (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Count)) == 0)); -+ } -+#endif -+ -+ list_del (&txdRail->Generic.Link); -+ -+ xmtrRail->FreeDescCount--; -+ } -+ /* Wakeup the descriptor primer thread if there's not many left */ -+ low_on_txds = (xmtrRail->FreeDescCount < ep_txd_lowat); -+ -+ spin_unlock_irqrestore (&xmtrRail->FreeDescLock, flags); -+ -+ if (low_on_txds) -+ ep_kthread_schedule (&subsys->Thread, lbolt); -+ -+ return (txdRail); -+} -+ -+static void -+FreeTxdRail (EP3_XMTR_RAIL *xmtrRail, EP3_TXD_RAIL *txdRail) -+{ -+ unsigned long flags; -+ -+#if defined(DEBUG_ASSERT) -+ { -+ EP_RAIL *rail = xmtrRail->Generic.CommsRail->Rail; -+ ELAN3_DEV *dev = ((EP3_RAIL *) rail)->Device; -+ -+ EP_ASSERT (rail, txdRail->Generic.XmtrRail == &xmtrRail->Generic); -+ -+ EP_ASSERT (rail, txdRail->TxdMain->EnveEvent == EP3_EVENT_PRIVATE); -+ EP_ASSERT (rail, txdRail->TxdMain->DataEvent == EP3_EVENT_PRIVATE); -+ EP_ASSERT (rail, txdRail->TxdMain->DoneEvent == EP3_EVENT_PRIVATE); -+ EP_ASSERT (rail, SDRAM_ASSERT (elan3_sdram_readl (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count)) == 0)); -+ EP_ASSERT (rail, SDRAM_ASSERT (elan3_sdram_readl (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count)) == 0)); -+ EP_ASSERT (rail, SDRAM_ASSERT (elan3_sdram_readl (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Count)) == 0)); -+ -+ txdRail->TxdMain->EnveEvent = EP3_EVENT_FREE; -+ txdRail->TxdMain->DataEvent = EP3_EVENT_FREE; -+ txdRail->TxdMain->DoneEvent = EP3_EVENT_FREE; -+ } -+#endif -+ -+ spin_lock_irqsave (&xmtrRail->FreeDescLock, flags); -+ -+ list_add (&txdRail->Generic.Link, &xmtrRail->FreeDescList); -+ -+ xmtrRail->FreeDescCount++; -+ -+ if (xmtrRail->FreeDescWaiting) -+ { -+ xmtrRail->FreeDescWaiting--; -+ kcondvar_wakeupall (&xmtrRail->FreeDescSleep, &xmtrRail->FreeDescLock); -+ } -+ -+ spin_unlock_irqrestore (&xmtrRail->FreeDescLock, flags); -+} -+ -+static void -+BindTxdToRail (EP_TXD *txd, EP3_TXD_RAIL *txdRail) -+{ -+ ASSERT (SPINLOCK_HELD (&txd->Xmtr->Lock)); -+ -+ EPRINTF6 (DBG_XMTR, "%s: BindTxdToRail: txd=%p txdRail=%p XID=%08x.%08x.%016llx\n", -+ XMTR_TO_RAIL(txdRail->Generic.XmtrRail)->Generic.Name, txd, txdRail, -+ txd->Envelope.Xid.Generation, txd->Envelope.Xid.Handle, (long long) txd->Envelope.Xid.Unique); -+ -+ txd->TxdRail = &txdRail->Generic; -+ txdRail->Generic.Txd = txd; -+} -+ -+static void -+UnbindTxdFromRail (EP_TXD *txd, EP3_TXD_RAIL *txdRail) -+{ -+ ASSERT (SPINLOCK_HELD (&txd->Xmtr->Lock)); -+ ASSERT (txd->TxdRail == &txdRail->Generic && txdRail->Generic.Txd == txd); -+ -+ EPRINTF6 (DBG_XMTR, "%s: UnbindTxdToRail: txd=%p txdRail=%p XID=%08x.%08x.%016llx\n", -+ XMTR_TO_RAIL(txdRail->Generic.XmtrRail)->Generic.Name, txd, txdRail, -+ txd->Envelope.Xid.Generation, txd->Envelope.Xid.Handle, (long long) txd->Envelope.Xid.Unique); -+ txd->TxdRail = NULL; -+ txdRail->Generic.Txd = NULL; -+} -+ -+/* -+ * TxEnveEvent: arg == EP_TXD -+ * Called when envelope delivered -+ */ -+static void -+TxEnveEvent (EP3_RAIL *rail, void *arg) -+{ -+ panic ("TxEnveEvent"); -+} -+ -+/* -+ * TxEnveRetry: arg == EP3_TXD_RAIL -+ * Called on retry of dma of large message envelope. -+ */ -+static void -+TxEnveRetry (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma, int status) -+{ -+ EP3_TXD_RAIL *txdRail = (EP3_TXD_RAIL *) arg; -+ EP3_XMTR_RAIL *xmtrRail = (EP3_XMTR_RAIL *) txdRail->Generic.XmtrRail; -+ -+ EPRINTF3 (DBG_XMTR, "%s: TxEnveRetry: xmtr %p txd %p\n", rail->Generic.Name, xmtrRail, txdRail); -+ -+ EP_ASSERT (&rail->Generic, txdRail->TxdMain->EnveEvent == EP3_EVENT_ACTIVE); -+ EP_ASSERT (&rail->Generic, SDRAM_ASSERT (elan3_sdram_readl (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count)) == 1)); /* PCI read */ -+ EP_ASSERT (&rail->Generic, dma->s.dma_direction == DMA_WRITE && EP_VP_TO_NODE(dma->s.dma_destVProc) == txdRail->Generic.Txd->NodeId); -+ -+ if (! TxdShouldStabalise (&txdRail->Generic, &rail->Generic)) -+ QueueDmaForRetry (rail, dma, EP_RETRY_LOW_PRI_RETRY + ep_backoff (&txdRail->Backoff, EP_BACKOFF_ENVELOPE)); -+ else -+ QueueDmaForRetry (rail, dma, EP_RETRY_STABALISING); /* place dma on stabilising list for neterr fixup */ -+} -+ -+static void -+TxEnveVerify (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma) -+{ -+ EP3_TXD_RAIL *txdRail = (EP3_TXD_RAIL *) arg; -+ -+ EP_ASSERT (&rail->Generic, txdRail->TxdMain->EnveEvent == EP3_EVENT_ACTIVE); -+ EP_ASSERT (&rail->Generic, SDRAM_ASSERT (elan3_sdram_readl (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count)) == 1)); /* PCI read */ -+ EP_ASSERT (&rail->Generic, dma->s.dma_direction == DMA_WRITE && EP_VP_TO_NODE(dma->s.dma_destVProc) == txdRail->Generic.Txd->NodeId); -+} -+ -+/* -+ * TxDataEvent: arg == EP3_TXD -+ * Called on completion of a large transmit. -+ */ -+static void -+TxDataEvent (EP3_RAIL *rail, void *arg) -+{ -+ EP3_TXD_RAIL *txdRail = (EP3_TXD_RAIL *) arg; -+ EP3_XMTR_RAIL *xmtrRail = (EP3_XMTR_RAIL *) txdRail->Generic.XmtrRail; -+ EP_XMTR *xmtr = xmtrRail->Generic.Xmtr; -+ EP3_TXD_RAIL_MAIN *txdMain = txdRail->TxdMain; -+ sdramaddr_t txdElan = txdRail->TxdElan; -+ int delay = 1; -+ EP_TXD *txd; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&xmtr->Lock, flags); -+ for (;;) -+ { -+ if (EP3_EVENT_FIRED (txdRail->DataCookie, txdMain->DataEvent)) -+ break; -+ -+ if (EP3_EVENT_FIRING (rail->Device, txdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent), txdRail->DataCookie, txdMain->DataEvent)) /* PCI read */ -+ { -+ if (delay > EP3_EVENT_FIRING_TLIMIT) -+ panic ("TxDataEvent: events set but block copy not completed\n"); -+ DELAY(delay); -+ delay <<= 1; -+ } -+ else -+ { -+ EPRINTF3 (DBG_XMTR, "%s: TxDataEvent: xmtr %p txd %p previously collecting by polling\n", -+ rail->Generic.Name, xmtrRail, txdRail); -+ spin_unlock_irqrestore (&xmtr->Lock, flags); -+ return; -+ } -+ mb(); -+ } -+ -+ if ((txd = txdRail->Generic.Txd) == NULL || /* If there is no txd, or if the descriptor is marked */ -+ !(EP_IS_INTERRUPT_ENABLED(txd->Envelope.Attr)) || /* as no interrupt, or been reused as an RPC, */ -+ (EP_IS_RPC(txd->Envelope.Attr))) /* then we were either called as a result of a previous */ -+ { /* tx which was completed by polling or as a result */ -+ spin_unlock_irqrestore (&xmtr->Lock, flags); /* of a EnableTxCallBack/DisableTxCallback */ -+ -+ EPRINTF4 (DBG_XMTR, "%s: TxDataEvent: xmtr %p txd %p recyled (%x)\n", -+ rail->Generic.Name, xmtr, txd, txd ? txd->Envelope.Attr : 0); -+ return; -+ } -+ -+ ASSERT (EP3_EVENT_FIRED (txdRail->EnveCookie, txdMain->EnveEvent)); -+ -+ EPRINTF5 (DBG_XMTR, "%s: TxDataEvent : xmtrRail=%p txdRail=%p tx=%p XID=%llx\n", -+ rail->Generic.Name, xmtrRail, txdRail, txd, (long long) txd->Envelope.Xid.Unique); -+ -+ ep_xmtr_txd_stat(xmtr,txd); -+ -+ /* remove from active transmit lists */ -+ list_del (&txd->Link); -+ -+ UnbindTxdFromRail (txd, txdRail); -+ -+ /* clear the done flags for next time round */ -+ txdMain->EnveEvent = EP3_EVENT_PRIVATE; -+ txdMain->DataEvent = EP3_EVENT_PRIVATE; -+ txdMain->DoneEvent = EP3_EVENT_PRIVATE; -+ -+ FreeTxdRail (xmtrRail, txdRail); -+ -+ spin_unlock_irqrestore (&xmtr->Lock, flags); -+ -+ txd->Handler (txd, txd->Arg, EP_SUCCESS); -+ -+ FreeTxd (xmtr, txd); -+} -+ -+/* -+ * TxDataRetry: arg == EP3_TXD -+ * Called on retry of remote "put" dma of large transmit data. -+ */ -+static void -+TxDataRetry (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma, int status) -+{ -+ EP3_TXD_RAIL *txdRail = (EP3_TXD_RAIL *) arg; -+ EP3_XMTR_RAIL *xmtrRail = (EP3_XMTR_RAIL *) txdRail->Generic.XmtrRail; -+ EP_TXD *txd = txdRail->Generic.Txd; -+ -+ EP_ASSERT (&rail->Generic, ((txdRail->TxdMain->DataEvent == EP3_EVENT_ACTIVE && -+ SDRAM_ASSERT (elan3_sdram_readl (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count)) >= 1)) || /* PCI read */ -+ (EP3_EVENT_FIRED (txdRail->DataCookie, txdRail->TxdMain->DataEvent) && -+ SDRAM_ASSERT (elan3_sdram_readl (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count)) == 0)))); /* PCI read */ -+ EP_ASSERT (&rail->Generic, dma->s.dma_direction == DMA_WRITE && EP_VP_TO_NODE(dma->s.dma_destVProc) == txd->NodeId); -+ -+ EPRINTF5 (DBG_XMTR, "%s: TxDataRetry: xmtrRail=%p txdRail=%p txd=%p XID=%llx\n", -+ rail->Generic.Name, xmtrRail, txdRail, txd, (long long) txd->Envelope.Xid.Unique); -+ -+ QueueDmaForRetry (rail, dma, EP_RETRY_LOW_PRI_RETRY + ep_backoff (&txdRail->Backoff, EP_BACKOFF_DATA)); -+} -+ -+static void -+TxDataVerify (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma) -+{ -+ EP3_TXD_RAIL *txdRail = (EP3_TXD_RAIL *) arg; -+ EP_TXD *txd = txdRail->Generic.Txd; -+ -+ EP_ASSERT (&rail->Generic, ((txdRail->TxdMain->DataEvent == EP3_EVENT_ACTIVE && -+ SDRAM_ASSERT (elan3_sdram_readl (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count)) >= 1)) || /* PCI read */ -+ (EP3_EVENT_FIRED (txdRail->DataCookie, txdRail->TxdMain->DataEvent) && -+ SDRAM_ASSERT (elan3_sdram_readl (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count)) == 0)))); /* PCI read */ -+ EP_ASSERT (&rail->Generic, dma->s.dma_direction == DMA_WRITE && EP_VP_TO_NODE(dma->s.dma_destVProc) == txd->NodeId); -+} -+ -+/* -+ * TxDoneEvent: arg == EP3_TXD -+ * Called on completion of a RPC. -+ */ -+static void -+TxDoneEvent (EP3_RAIL *rail, void *arg) -+{ -+ EP3_TXD_RAIL *txdRail = (EP3_TXD_RAIL *) arg; -+ EP3_XMTR_RAIL *xmtrRail = (EP3_XMTR_RAIL *) txdRail->Generic.XmtrRail; -+ EP_XMTR *xmtr = xmtrRail->Generic.Xmtr; -+ int delay = 1; -+ EP_TXD *txd; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&xmtr->Lock, flags); -+ -+ for (;;) -+ { -+ if (EP3_EVENT_FIRED (txdRail->DoneCookie, txdRail->TxdMain->DoneEvent) && -+ EP3_EVENT_FIRED (txdRail->DataCookie, txdRail->TxdMain->DataEvent)) -+ break; -+ -+ if (EP3_EVENT_FIRING (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent), txdRail->DoneCookie, txdRail->TxdMain->DoneEvent) && -+ EP3_EVENT_FIRING (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent), txdRail->DataCookie, txdRail->TxdMain->DataEvent)) -+ { -+ if (delay > EP3_EVENT_FIRING_TLIMIT) -+ panic ("TxDoneEvent: events set but block copy not completed\n"); -+ DELAY(delay); -+ delay <<= 1; -+ } -+ else -+ { -+ EPRINTF3 (DBG_XMTR, "%s: TxDoneEvent: xmtr %p txdRail %p previously collecting by polling\n", -+ rail->Generic.Name, xmtr, txdRail); -+ spin_unlock_irqrestore (&xmtr->Lock, flags); -+ return; -+ } -+ mb(); -+ } -+ -+ if ((txd = txdRail->Generic.Txd) == NULL || /* If there is no txd, or if the descriptor is marked */ -+ !(EP_IS_INTERRUPT_ENABLED(txd->Envelope.Attr) || EP_IS_RPC(txd->Envelope.Attr))) /* marked as no interrupt, or been reused as an transmit, */ -+ { /* then we were either called as a result of a previous */ -+ spin_unlock_irqrestore (&xmtr->Lock, flags); /* tx which was completed by polling or as a result */ -+ /* of a EnableTxCallBack/DisableTxCallback */ -+ -+ EPRINTF4 (DBG_XMTR, "%s: TxDoneEvent: xmtr %p txd %p recyled (%x)\n", -+ rail->Generic.Name, xmtr, txd, txd ? txd->Envelope.Attr : 0); -+ return; -+ } -+ -+ EPRINTF5 (DBG_XMTR, "%s: TxDoneEvent: xmtrRail=%p txdRail=%p txd=%p XID=%llx\n", -+ rail->Generic.Name, xmtrRail, txdRail, txd, (long long) txd->Envelope.Xid.Unique); -+ -+ ep_xmtr_txd_stat(xmtr,txd); -+ -+ /* remove from active transmit list */ -+ list_del (&txd->Link); -+ -+ UnbindTxdFromRail (txd, txdRail); -+ -+ /* clear the done flags for next time round */ -+ txdRail->TxdMain->EnveEvent = EP3_EVENT_PRIVATE; -+ txdRail->TxdMain->DataEvent = EP3_EVENT_PRIVATE; -+ txdRail->TxdMain->DoneEvent = EP3_EVENT_PRIVATE; -+ -+ FreeTxdRail (xmtrRail, txdRail); -+ -+ spin_unlock_irqrestore (&xmtr->Lock, flags); -+ -+ if (txd->Handler) -+ txd->Handler (txd, txd->Arg, EP_SUCCESS); -+ -+ FreeTxd (xmtr, txd); -+} -+ -+/* -+ * TxDoneRetry: arg == EP3_TXD -+ */ -+static void -+TxDoneRetry (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma, int status) -+{ -+ panic ("TxDoneRetry"); -+} -+ -+static void -+TxDoneVerify (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma) -+{ -+ panic ("TxDoneVerify"); -+} -+ -+static void -+EnableTransmitCallback (EP_TXD *txd, EP3_TXD_RAIL *txdRail) -+{ -+ ELAN3_DEV *dev = XMTR_TO_RAIL(txdRail->Generic.XmtrRail)->Device; -+ -+ EPRINTF3 (DBG_XMTR, "%s: EnableTransmitCallback: txd %p txdRail %p\n", XMTR_TO_RAIL (txdRail->Generic.XmtrRail)->Generic.Name, txd, txdRail); -+ -+ txd->Envelope.Attr = EP_SET_INTERRUPT_ENABLED(txd->Envelope.Attr); -+ -+ elan3_sdram_writel (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Type), EV_TYPE_BCOPY); -+ -+ if (EP_IS_RPC(txd->Envelope.Attr)) -+ { -+ elan3_sdram_writel (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Type), EV_TYPE_BCOPY); -+ elan3_sdram_writel (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Type), EV_TYPE_BCOPY | EV_TYPE_EVIRQ | txdRail->DoneCookie.Cookie); -+ } -+ else -+ { -+ elan3_sdram_writel (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Type), EV_TYPE_BCOPY | EV_TYPE_EVIRQ | txdRail->DataCookie.Cookie); -+ elan3_sdram_writel (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Type), EV_TYPE_BCOPY); -+ } -+} -+ -+static void -+DisableTransmitCallback (EP_TXD *txd, EP3_TXD_RAIL *txdRail) -+{ -+ ELAN3_DEV *dev = XMTR_TO_RAIL(txdRail->Generic.XmtrRail)->Device; -+ -+ EPRINTF3 (DBG_XMTR, "%s: DisableTransmitCallback: txd %p txdRail %p\n", XMTR_TO_RAIL (txdRail->Generic.XmtrRail)->Generic.Name, txd, txdRail); -+ -+ txd->Envelope.Attr = EP_CLEAR_INTERRUPT_ENABLED(txd->Envelope.Attr); -+ -+ elan3_sdram_writel (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Type), EV_TYPE_BCOPY); -+ elan3_sdram_writel (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Type), EV_TYPE_BCOPY); -+ elan3_sdram_writel (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Type), EV_TYPE_BCOPY); -+} -+ -+static void -+InitialiseTxdRail (EP_TXD *txd, EP3_TXD_RAIL *txdRail, int phase) -+{ -+ EP3_XMTR_RAIL *xmtrRail = (EP3_XMTR_RAIL *) txdRail->Generic.XmtrRail; -+ EP3_RAIL *rail = XMTR_TO_RAIL (xmtrRail); -+ -+ /* Flush the Elan TLB if mappings have changed */ -+ ep_perrail_dvma_sync (&rail->Generic); -+ -+ /* Initialise the per-rail fields in the envelope */ -+ txd->Envelope.TxdRail = txdRail->TxdElanAddr; -+ txd->Envelope.NodeId = rail->Generic.Position.pos_nodeid; -+ -+ /* Initialise the dma backoff */ -+ txdRail->Backoff.type = EP_BACKOFF_FREE; -+ -+ /* Initialise the per-rail events */ -+ switch (phase) -+ { -+ case EP_TXD_PHASE_ACTIVE: -+ elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count), 1); -+ elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count), -+ (txd->Envelope.nFrags ? txd->Envelope.nFrags : 1) + (EP_IS_MULTICAST(txd->Envelope.Attr) ? 1 : 0)); -+ -+ txdRail->TxdMain->EnveEvent = EP3_EVENT_ACTIVE; -+ txdRail->TxdMain->DataEvent = EP3_EVENT_ACTIVE; -+ break; -+ -+ case EP_TXD_PHASE_PASSIVE: -+ ASSERT (EP_IS_RPC(txd->Envelope.Attr)); -+ -+ elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count), 0); -+ elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count), 0); -+ -+ txdRail->TxdMain->EnveEvent = txdRail->EnveCookie.Cookie; -+ txdRail->TxdMain->DataEvent = txdRail->DataCookie.Cookie; -+ break; -+ } -+ -+ if (! EP_IS_RPC(txd->Envelope.Attr)) -+ txdRail->TxdMain->DoneEvent = txdRail->DoneCookie.Cookie; -+ else -+ { -+ elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Count), 1); -+ txdRail->TxdMain->DoneEvent = EP3_EVENT_ACTIVE; -+ } -+ -+ if (EP_IS_NO_INTERRUPT(txd->Envelope.Attr)) -+ DisableTransmitCallback (txd, txdRail); -+ else -+ EnableTransmitCallback (txd, txdRail); -+ -+#if ! defined(CONFIG_EP_NO_CHECK_SUM) -+ if ( epdebug_check_sum ) -+ txd->Envelope.CheckSum = ep_calc_check_sum( txd->Xmtr->Subsys->Subsys.Sys, &txd->Envelope, txd->Envelope.Frags, txd->Envelope.nFrags); -+ else -+#endif -+ txd->Envelope.CheckSum = 0; -+ -+ /* copy the envelope and payload if present down to sdram */ -+ elan3_sdram_copyl_to_sdram (rail->Device, &txd->Envelope, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, Envelope), EP_ENVELOPE_SIZE); -+ -+ if (EP_HAS_PAYLOAD(txd->Envelope.Attr)) -+ elan3_sdram_copyl_to_sdram (rail->Device, &txd->Payload, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, Payload), EP_PAYLOAD_SIZE); -+} -+ -+void -+ep3xmtr_flush_callback (EP_XMTR *xmtr, EP3_XMTR_RAIL *xmtrRail) -+{ -+ EP3_RAIL *rail = XMTR_TO_RAIL (xmtrRail); -+ struct list_head *el; -+ unsigned long flags; -+ -+ switch (rail->Generic.CallbackStep) -+ { -+ case EP_CB_FLUSH_FILTERING: -+ /* only need to acquire/release the Lock to ensure that -+ * the node state transition has been noticed. */ -+ spin_lock_irqsave (&xmtr->Lock, flags); -+ spin_unlock_irqrestore (&xmtr->Lock, flags); -+ break; -+ -+ case EP_CB_FLUSH_FLUSHING: -+ spin_lock_irqsave (&xmtr->Lock, flags); -+ -+ list_for_each (el, &xmtr->ActiveDescList) { -+ EP_TXD *txd = list_entry (el, EP_TXD, Link); -+ EP3_TXD_RAIL *txdRail = (EP3_TXD_RAIL *) txd->TxdRail; -+ EP_NODE_RAIL *nodeRail = &rail->Generic.Nodes[txd->NodeId]; -+ -+ if (!TXD_BOUND2RAIL(txdRail, xmtrRail) || nodeRail->State != EP_NODE_LOCAL_PASSIVATE) -+ continue; -+ -+ if (EP_IS_RPC(txd->Envelope.Attr)) -+ { -+ if (! EP3_EVENT_FIRED (txdRail->DataCookie, txdRail->TxdMain->DataEvent)) -+ nodeRail->MessageState |= EP_NODE_ACTIVE_MESSAGES; -+ else if (! EP3_EVENT_FIRED (txdRail->DoneCookie, txdRail->TxdMain->DoneEvent)) -+ nodeRail->MessageState |= EP_NODE_PASSIVE_MESSAGES; -+ } -+ else -+ { -+ if (! EP3_EVENT_FIRED (txdRail->DataCookie, txdRail->TxdMain->DataEvent)) -+ nodeRail->MessageState |= EP_NODE_ACTIVE_MESSAGES; -+ } -+ } -+ spin_unlock_irqrestore (&xmtr->Lock, flags); -+ break; -+ -+ default: -+ panic ("ep3xmtr_flush_callback: invalid callback step\n"); -+ break; -+ } -+} -+ -+void -+ep3xmtr_failover_callback (EP_XMTR *xmtr, EP3_XMTR_RAIL *xmtrRail) -+{ -+ EP3_RAIL *rail = XMTR_TO_RAIL (xmtrRail); -+ struct list_head txdList; -+ struct list_head *el, *nel; -+ unsigned long flags; -+#ifdef SUPPORT_RAIL_FAILOVER -+ EP_COMMS_SUBSYS *subsys = xmtr->Subsys; -+#endif -+ -+ INIT_LIST_HEAD (&txdList); -+ -+ spin_lock_irqsave (&xmtr->Lock, flags); -+ list_for_each_safe (el, nel, &xmtr->ActiveDescList) { -+ EP_TXD *txd = list_entry (el, EP_TXD, Link); -+ EP3_TXD_RAIL *txdRail = (EP3_TXD_RAIL *) txd->TxdRail; -+ EP_NODE_RAIL *nodeRail = &rail->Generic.Nodes[txd->NodeId]; -+ -+ /* Only progress relocation of txd's bound to this rail */ -+ if (!TXD_BOUND2RAIL(txdRail, xmtrRail) || nodeRail->State != EP_NODE_PASSIVATED) -+ continue; -+ -+#ifdef SUPPORT_RAIL_FAILOVER -+ /* Transmit data not been sent, so just restart on different rail */ -+ if (! EP3_EVENT_FIRED (txdRail->DataCookie, txdRail->TxdMain->DataEvent)) -+ { -+ EPRINTF4 (DBG_XMTR, "%s: ep3xmtr_failover_callback - xmtr %p txd %p node %d unbind an retry\n", rail->Generic.Name, xmtr, txd, txd->NodeId); -+ -+ UnbindTxdFromRail (txd, txdRail); -+ -+ /* clear the done flags - so that it will be ignored if an event interrupt is generated */ -+ txdRail->TxdMain->EnveEvent = EP3_EVENT_PRIVATE; -+ txdRail->TxdMain->DataEvent = EP3_EVENT_PRIVATE; -+ txdRail->TxdMain->DoneEvent = EP3_EVENT_PRIVATE; -+ -+ /* reset all events, since non of them could have been set */ -+ elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count), 0); /* PCI write */ -+ elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count), 0); /* PCI write */ -+ elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Count), 0); /* PCI write */ -+ -+ FreeTxdRail (xmtrRail, txdRail); -+ -+ /* epcomms thread will restart on different rail */ -+ ep_kthread_schedule (&subsys->Thread, lbolt); -+ continue; -+ } -+ -+ if (EP_IS_RPC(txd->Envelope.Attr) && !EP3_EVENT_FIRED (txdRail->DoneCookie, txdRail->TxdMain->DoneEvent)) -+ { -+ if (EP_IS_NO_FAILOVER(txd->Envelope.Attr)) -+ { -+ EPRINTF4 (DBG_XMTR, "%s: ep3xmtr_failover_callback - xmtr %p txd %p node %d - not able to failover\n", -+ rail->Generic.Name, xmtr, txd, txd->NodeId); -+ -+ list_del (&txd->Link); -+ UnbindTxdFromRail (txd, txdRail); -+ -+ /* clear the done flags - so that it will be ignored if an event interrupt is generated */ -+ txdRail->TxdMain->EnveEvent = EP3_EVENT_PRIVATE; -+ txdRail->TxdMain->DataEvent = EP3_EVENT_PRIVATE; -+ txdRail->TxdMain->DoneEvent = EP3_EVENT_PRIVATE; -+ -+ /* envelope and data events must have been set, so only clear the done event */ -+ EP_ASSERT (&rail->Generic, SDRAM_ASSERT(elan3_sdram_readl (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count)) == 0)); -+ EP_ASSERT (&rail->Generic, SDRAM_ASSERT(elan3_sdram_readl (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count)) == 0)); -+ -+ elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Count), 0); /* PCI write */ -+ -+ FreeTxdRail (xmtrRail, txdRail); -+ -+ list_add_tail (&txd->Link, &txdList); -+ continue; -+ } -+ EPRINTF4 (DBG_XMTR, "%s: ep3xmtr_failover_callback - xmtr %p txd %p node %d passive\n", rail->Generic.Name, xmtr, txd, txd->NodeId); -+ -+ nodeRail->MessageState |= EP_NODE_PASSIVE_MESSAGES; -+ continue; -+ } -+ -+ EPRINTF4 (DBG_XMTR, "%s: ep3xmtr_failover_callback - xmtr %p txd %p node %d completed\n", rail->Generic.Name, xmtr, txd, txd->NodeId); -+#endif -+ -+ } -+ spin_unlock_irqrestore (&xmtr->Lock, flags); -+ -+ while (! list_empty (&txdList)) -+ { -+ EP_TXD *txd = list_entry (txdList.next, EP_TXD, Link); -+ -+ list_del (&txd->Link); -+ -+ txd->Handler (txd, txd->Arg, EP_CONN_RESET); -+ -+ FreeTxd (xmtr, txd); -+ } -+} -+ -+ -+void -+ep3xmtr_disconnect_callback (EP_XMTR *xmtr, EP3_XMTR_RAIL *xmtrRail) -+{ -+ EP3_RAIL *rail = XMTR_TO_RAIL (xmtrRail); -+ struct list_head *el, *nel; -+ struct list_head txdList; -+ unsigned long flags; -+ -+ INIT_LIST_HEAD (&txdList); -+ -+ spin_lock_irqsave (&xmtr->Lock, flags); -+ -+ list_for_each_safe (el, nel, &xmtr->ActiveDescList) { -+ EP_TXD *txd = list_entry (el, EP_TXD, Link); -+ EP3_TXD_RAIL *txdRail = (EP3_TXD_RAIL *) txd->TxdRail; -+ EP_NODE_RAIL *nodeRail = &rail->Generic.Nodes[txd->NodeId]; -+ -+ if (!TXD_BOUND2RAIL(txdRail, xmtrRail) || nodeRail->State != EP_NODE_DISCONNECTING) -+ continue; -+ -+ if (EP3_EVENT_FIRED (txdRail->EnveCookie, txdRail->TxdMain->EnveEvent) && -+ EP3_EVENT_FIRED (txdRail->DataCookie, txdRail->TxdMain->DataEvent) && -+ EP3_EVENT_FIRED (txdRail->DoneCookie, txdRail->TxdMain->DoneEvent)) -+ { -+ EPRINTF4 (DBG_XMTR, "%s: ep3xmtr_disconnect_callback - xmtr %p txd %p completed to node %d\n", rail->Generic.Name, xmtr, txd, txd->NodeId); -+ continue; -+ } -+ -+ /* Remove from active list */ -+ list_del (&txd->Link); -+ -+ UnbindTxdFromRail (txd, txdRail); -+ -+ /* clear the done flags - so that it will be ignored if an event interrupt is generated */ -+ txdRail->TxdMain->EnveEvent = EP3_EVENT_PRIVATE; -+ txdRail->TxdMain->DataEvent = EP3_EVENT_PRIVATE; -+ txdRail->TxdMain->DoneEvent = EP3_EVENT_PRIVATE; -+ -+ /* reset the envelope and data events, since only they could have been set */ -+ elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count), 0); /* PCI write */ -+ elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count), 0); /* PCI write */ -+ elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Count), 0); /* PCI write */ -+ -+ FreeTxdRail (xmtrRail, txdRail); -+ -+ EPRINTF4 (DBG_XMTR, "%s: ep3xmtr_disconnect_callback - xmtr %p txd %p node %d not conected\n", rail->Generic.Name, xmtr, txd, txd->NodeId); -+ -+ /* add to the list of txd's which are to be completed */ -+ list_add_tail (&txd->Link, &txdList); -+ } -+ spin_unlock_irqrestore (&xmtr->Lock, flags); -+ -+ while (! list_empty (&txdList)) -+ { -+ EP_TXD *txd = list_entry (txdList.next, EP_TXD, Link); -+ -+ list_del (&txd->Link); -+ -+ txd->Handler (txd, txd->Arg, EP_CONN_RESET); -+ -+ FreeTxd (xmtr, txd); -+ } -+} -+ -+int -+ep3xmtr_poll_txd (EP_XMTR_RAIL *x, EP_TXD_RAIL *t, int how) -+{ -+ EP3_XMTR_RAIL *xmtrRail = (EP3_XMTR_RAIL *) x; -+ EP3_TXD_RAIL *txdRail = (EP3_TXD_RAIL *) t; -+ EP_TXD *txd = txdRail->Generic.Txd; -+ -+ switch (how) -+ { -+ case ENABLE_TX_CALLBACK: -+ if (EP_IS_NO_INTERRUPT(txd->Envelope.Attr)) -+ EnableTransmitCallback (txd, txdRail); -+ break; -+ -+ case DISABLE_TX_CALLBACK: -+ if (EP_IS_NO_INTERRUPT(txd->Envelope.Attr)) -+ DisableTransmitCallback (txd, txdRail); -+ break; -+ } -+ -+ if (EP3_EVENT_FIRED (txdRail->EnveCookie, txdRail->TxdMain->EnveEvent) && -+ EP3_EVENT_FIRED (txdRail->DataCookie, txdRail->TxdMain->DataEvent) && -+ EP3_EVENT_FIRED (txdRail->DoneCookie, txdRail->TxdMain->DoneEvent)) -+ { -+ EPRINTF3 (DBG_XMTR, "%s: ep3xmtr_poll_txd: txd=%p XID=%llx completed\n", -+ XMTR_TO_RAIL (xmtrRail)->Generic.Name, txd, (long long) txd->Envelope.Xid.Unique); -+ -+ ep_xmtr_txd_stat(xmtrRail->Generic.Xmtr,txd); -+ -+ UnbindTxdFromRail (txd, txdRail); -+ -+ /* clear the done flags - so that it will be ignored if an event interrupt is generated */ -+ txdRail->TxdMain->EnveEvent = EP3_EVENT_PRIVATE; -+ txdRail->TxdMain->DataEvent = EP3_EVENT_PRIVATE; -+ txdRail->TxdMain->DoneEvent = EP3_EVENT_PRIVATE; -+ -+ FreeTxdRail (xmtrRail, txdRail); -+ -+ return 1; -+ } -+ -+ return 0; -+} -+ -+int -+ep3xmtr_bind_txd (EP_TXD *txd, EP_XMTR_RAIL *x, unsigned int phase) -+{ -+ EP3_XMTR_RAIL *xmtrRail = (EP3_XMTR_RAIL *) x; -+ EP3_RAIL *rail = XMTR_TO_RAIL (xmtrRail); -+ EP3_TXD_RAIL *txdRail; -+ E3_DMA_BE dmabe; -+ -+ if ((txdRail = GetTxdRail (xmtrRail)) == NULL) -+ return 0; -+ -+ switch (phase) -+ { -+ case EP_TXD_PHASE_ACTIVE: -+ if (rail->Generic.Nodes[txd->NodeId].State != EP_NODE_CONNECTED) -+ { -+ EPRINTF2 (DBG_XMTR, "%s: TransmitTxdOnRail: node %u not connected on this rail\n", rail->Generic.Name, txd->NodeId); -+ -+ /* clear the done flags - so that it will be ignored if an event interrupt is generated */ -+ txdRail->TxdMain->EnveEvent = EP3_EVENT_PRIVATE; -+ txdRail->TxdMain->DataEvent = EP3_EVENT_PRIVATE; -+ txdRail->TxdMain->DoneEvent = EP3_EVENT_PRIVATE; -+ -+ /* reset all events, since non of them could have been set */ -+ elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count), 0); /* PCI write */ -+ elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count), 0); /* PCI write */ -+ elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Count), 0); /* PCI write */ -+ -+ FreeTxdRail (xmtrRail, txdRail); -+ return 0; -+ } -+ -+ InitialiseTxdRail (txd, txdRail, phase); -+ -+ /* Initialise the dma descriptor */ -+ dmabe.s.dma_type = E3_DMA_TYPE (DMA_BYTE, DMA_WRITE, DMA_QUEUED, EP3_DMAFAILCOUNT); -+ dmabe.s.dma_size = (EP_HAS_PAYLOAD(txd->Envelope.Attr) ? EP_INPUTQ_SIZE : EP_ENVELOPE_SIZE); -+ dmabe.s.dma_source = txdRail->TxdElanAddr + offsetof (EP3_TXD_RAIL_ELAN, Envelope); -+ dmabe.s.dma_dest = (E3_Addr) 0; -+ dmabe.s.dma_destEvent = EP_MSGQ_ADDR(txd->Service); -+ dmabe.s.dma_destCookieVProc = EP_VP_DATA (txd->NodeId); -+ dmabe.s.dma_srcEvent = txdRail->TxdElanAddr + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent); -+ dmabe.s.dma_srcCookieVProc = LocalCookie (rail, txd->NodeId); -+ -+ EPRINTF8 (DBG_XMTR, "%s: TransmitTxdOnRail: txd=%p txdRail=%p @ %x XID=%llx dest=%u srcEvent=%x srcCookie=%x\n", rail->Generic.Name, -+ txd, txdRail, txdRail->TxdElanAddr, (long long) txd->Envelope.Xid.Unique, txd->NodeId, dmabe.s.dma_srcEvent, dmabe.s.dma_srcCookieVProc); -+ -+ BindTxdToRail (txd, txdRail); -+ -+ if (IssueDma (rail, &dmabe, EP_RETRY_LOW_PRI, FALSE) != ISSUE_COMMAND_OK) -+ QueueDmaForRetry (rail, &dmabe, EP_RETRY_LOW_PRI); -+ break; -+ -+ case EP_TXD_PHASE_PASSIVE: -+ InitialiseTxdRail (txd, txdRail, EP_TXD_PHASE_PASSIVE); /* initialise as passive (updated envelope) */ -+ -+ EP_XMTR_OP (txd->TxdRail->XmtrRail, UnbindTxd) (txd, EP_TXD_PHASE_PASSIVE); /* unbind from existing rail */ -+ -+ BindTxdToRail (txd, txdRail); /* and bind it to our new rail */ -+ break; -+ } -+ -+ return 1; -+} -+ -+void -+ep3xmtr_unbind_txd (EP_TXD *txd, unsigned int phase) -+{ -+ EP3_TXD_RAIL *txdRail = (EP3_TXD_RAIL *) txd->TxdRail; -+ EP3_XMTR_RAIL *xmtrRail = (EP3_XMTR_RAIL *) txdRail->Generic.XmtrRail; -+ EP3_RAIL *rail = XMTR_TO_RAIL (xmtrRail); -+ -+ /* XXXX - TBD assertions on phase */ -+ -+ UnbindTxdFromRail (txd, txdRail); -+ -+ /* clear the done flags - so that it will be ignored if an event interrupt is generated */ -+ txdRail->TxdMain->EnveEvent = EP3_EVENT_PRIVATE; -+ txdRail->TxdMain->DataEvent = EP3_EVENT_PRIVATE; -+ txdRail->TxdMain->DoneEvent = EP3_EVENT_PRIVATE; -+ -+ /* reset the envelope and data events, since only they could have been set */ -+ elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count), 0); /* PCI write */ -+ elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count), 0); /* PCI write */ -+ elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Count), 0); /* PCI write */ -+ -+ FreeTxdRail (xmtrRail, txdRail); -+} -+ -+long -+ep3xmtr_check (EP_XMTR_RAIL *x, long nextRunTime) -+{ -+ EP3_XMTR_RAIL *xmtrRail = (EP3_XMTR_RAIL *) x; -+ -+ if (xmtrRail->FreeDescCount < ep_txd_lowat && !AllocateTxdRailBlock(xmtrRail)) -+ { -+ EPRINTF1 (DBG_RCVR,"%s: failed to grow txd rail pool\n", XMTR_TO_RAIL(xmtrRail)->Generic.Name); -+ -+ if (nextRunTime == 0 || AFTER (nextRunTime, lbolt + RESOURCE_RETRY_TIME)) -+ nextRunTime = lbolt + RESOURCE_RETRY_TIME; -+ } -+ -+ return nextRunTime; -+} -+ -+void -+ep3xmtr_add_rail (EP_XMTR *xmtr, EP_COMMS_RAIL *commsRail) -+{ -+ EP3_XMTR_RAIL *xmtrRail; -+ unsigned long flags; -+ -+ KMEM_ZALLOC (xmtrRail, EP3_XMTR_RAIL *, sizeof (EP3_XMTR_RAIL), 1); -+ -+ spin_lock_init (&xmtrRail->FreeDescLock); -+ kcondvar_init (&xmtrRail->FreeDescSleep); -+ INIT_LIST_HEAD (&xmtrRail->FreeDescList); -+ INIT_LIST_HEAD (&xmtrRail->DescBlockList); -+ -+ xmtrRail->Generic.CommsRail = commsRail; -+ xmtrRail->Generic.Xmtr = xmtr; -+ -+ spin_lock_irqsave (&xmtr->Lock, flags); -+ -+ xmtr->Rails[commsRail->Rail->Number] = &xmtrRail->Generic; -+ xmtr->RailMask |= EP_RAIL2RAILMASK(commsRail->Rail->Number); -+ -+ spin_unlock_irqrestore (&xmtr->Lock, flags); -+} -+ -+void -+ep3xmtr_del_rail (EP_XMTR *xmtr, EP_COMMS_RAIL *commsRail) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) commsRail->Rail; -+ EP3_XMTR_RAIL *xmtrRail = (EP3_XMTR_RAIL *) xmtr->Rails[commsRail->Rail->Number]; -+ unsigned long flags; -+ -+ /* rail mask set as not usable */ -+ spin_lock_irqsave (&xmtr->Lock, flags); -+ xmtr->RailMask &= ~EP_RAIL2RAILMASK (rail->Generic.Number); -+ spin_unlock_irqrestore (&xmtr->Lock, flags); -+ -+ /* wait for all txd's for this rail to become free */ -+ spin_lock_irqsave (&xmtrRail->FreeDescLock, flags); -+ while (xmtrRail->FreeDescCount != xmtrRail->TotalDescCount) -+ { -+ xmtrRail->FreeDescWaiting++; -+ kcondvar_wait (&xmtrRail->FreeDescSleep, &xmtrRail->FreeDescLock, &flags); -+ } -+ spin_unlock_irqrestore (&xmtrRail->FreeDescLock, flags); -+ -+ spin_lock_irqsave (&xmtr->Lock, flags); -+ xmtr->Rails[commsRail->Rail->Number] = NULL; -+ spin_unlock_irqrestore (&xmtr->Lock, flags); -+ -+ /* need to free up the txd's and blocks */ -+ /* all the txd's accociated with DescBlocks must be in the FreeDescList */ -+ ASSERT (xmtrRail->TotalDescCount == xmtrRail->FreeDescCount); -+ -+ /* run through the DescBlockList deleting them */ -+ while (!list_empty (&xmtrRail->DescBlockList)) -+ FreeTxdRailBlock (xmtrRail, list_entry(xmtrRail->DescBlockList.next, EP3_TXD_RAIL_BLOCK , Link)); -+ -+ /* it had better be empty after that */ -+ ASSERT ((xmtrRail->FreeDescCount == 0) && (xmtrRail->TotalDescCount == 0)); -+ -+ spin_lock_destroy (&xmtrRail->FreeDescLock); -+ kcondvar_destroy (&xmtrRail->FreeDescSleep); -+ -+ KMEM_FREE (xmtrRail, sizeof (EP3_XMTR_RAIL)); -+} -+ -+void -+ep3xmtr_display_xmtr (DisplayInfo *di, EP_XMTR_RAIL *x) -+{ -+ EP3_XMTR_RAIL *xmtrRail = (EP3_XMTR_RAIL *) x; -+ EP3_RAIL *rail = XMTR_TO_RAIL (xmtrRail); -+ struct list_head *el; -+ unsigned long flags; -+ int freeCount = 0; -+ -+ spin_lock_irqsave (&xmtrRail->FreeDescLock, flags); -+ list_for_each (el, &xmtrRail->FreeDescList) -+ freeCount++; -+ spin_unlock_irqrestore (&xmtrRail->FreeDescLock, flags); -+ -+ (di->func)(di->arg, " Rail=%d Free=%d Total=%d (%d)\n", -+ rail->Generic.Number, xmtrRail->FreeDescCount, xmtrRail->TotalDescCount, freeCount); -+} -+ -+void -+ep3xmtr_display_txd (DisplayInfo *di, EP_TXD_RAIL *t) -+{ -+ EP3_TXD_RAIL *txdRail = (EP3_TXD_RAIL *) t; -+ EP3_XMTR_RAIL *xmtrRail = (EP3_XMTR_RAIL *) txdRail->Generic.XmtrRail; -+ EP3_TXD_RAIL_MAIN *txdMain = txdRail->TxdMain; -+ sdramaddr_t txdElan = txdRail->TxdElan; -+ EP3_RAIL *rail = (EP3_RAIL *) xmtrRail->Generic.CommsRail->Rail; -+ ELAN3_DEV *dev = rail->Device; -+ -+ (di->func)(di->arg, " EnveEvent=%x DataEvent=%x DoneEvent=%x Rail=%s\n", -+ txdMain->EnveEvent, txdMain->DataEvent, txdMain->DoneEvent, rail->Generic.Name); -+ (di->func)(di->arg, " EnveEvent=%x.%x DataEvent=%x.%x DoneEvent=%x.%x\n", -+ elan3_sdram_readl (dev, txdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count)), -+ elan3_sdram_readl (dev, txdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Type)), -+ elan3_sdram_readl (dev, txdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count)), -+ elan3_sdram_readl (dev, txdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Type)), -+ elan3_sdram_readl (dev, txdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Count)), -+ elan3_sdram_readl (dev, txdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Type))); -+} -+ -+int -+ep3xmtr_check_txd_state (EP_TXD *txd) -+{ -+ EP3_TXD_RAIL *txdRail = (EP3_TXD_RAIL *) txd->TxdRail; -+ EP3_XMTR_RAIL *xmtrRail = (EP3_XMTR_RAIL *) txdRail->Generic.XmtrRail; -+ EP3_RAIL *rail = XMTR_TO_RAIL (xmtrRail); -+ E3_Addr enveEvent = txdRail->TxdElanAddr + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent); -+ EP3_RETRY_DMA *retry = NULL; -+ -+ struct list_head *el; -+ struct list_head *nel; -+ unsigned long flags; -+ -+ /* is enevelope event is really not set */ -+ if (EP3_EVENT_FIRED (txdRail->EnveCookie, txdRail->TxdMain->EnveEvent )) -+ return (0); -+ -+ /* remove matching dma from stalled list */ -+ spin_lock_irqsave (&rail->DmaRetryLock, flags); -+ -+ list_for_each_safe(el, nel, &rail->DmaRetries[EP_RETRY_STABALISING]) { -+ retry = list_entry (el, EP3_RETRY_DMA, Link); -+ -+ if ( retry->Dma.s.dma_srcEvent == enveEvent ) { -+ /* remove from retry list */ -+ list_del (&retry->Link); -+ break; /* there can only be one */ -+ } -+ } -+ ASSERT ( retry != NULL); /* must find one in list */ -+ ASSERT ( retry->Dma.s.dma_srcEvent == enveEvent ); /* better still be the right type then */ -+ -+ /* add to free list */ -+ list_add (&retry->Link, &rail->DmaRetryFreeList); -+ -+ spin_unlock_irqrestore (&rail->DmaRetryLock, flags); -+ -+ UnbindTxdFromRail (txd, txdRail); -+ -+ /* clear the done flags - so that it will be ignored if an event interrupt is generated */ -+ txdRail->TxdMain->EnveEvent = EP3_EVENT_PRIVATE; -+ txdRail->TxdMain->DataEvent = EP3_EVENT_PRIVATE; -+ txdRail->TxdMain->DoneEvent = EP3_EVENT_PRIVATE; -+ -+ /* reset the envelope and data events, since only they could have been set */ -+ elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count), 0); /* PCI write */ -+ elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count), 0); /* PCI write */ -+ elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Count), 0); /* PCI write */ -+ -+ FreeTxdRail (xmtrRail, txdRail); -+ -+ return (1); -+} -+ -+void -+ep3xmtr_fillout_rail_stats(EP_XMTR_RAIL *xmtr_rail, char *str) { -+ /* no stats here yet */ -+ /* EP3_XMTR_RAIL * ep3xmtr_rail = (EP3_XMTR_RAIL *) xmtr_rail; */ -+} -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/ep/epcommsTx_elan4.c linux-2.6.9/drivers/net/qsnet/ep/epcommsTx_elan4.c ---- clean/drivers/net/qsnet/ep/epcommsTx_elan4.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/epcommsTx_elan4.c 2005-07-20 07:35:37.000000000 -0400 -@@ -0,0 +1,1389 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: epcommsTx_elan4.c,v 1.32.2.1 2005/07/20 11:35:37 mike Exp $" -+/* $Source: /cvs/master/quadrics/epmod/epcommsTx_elan4.c,v $ */ -+ -+#include -+ -+#include -+#include -+#include -+ -+#include "debug.h" -+#include "kcomm_vp.h" -+#include "kcomm_elan4.h" -+#include "epcomms_elan4.h" -+ -+#include -+ -+#define XMTR_TO_COMMS(xmtrRail) ((EP4_COMMS_RAIL *) ((EP_XMTR_RAIL *) xmtrRail)->CommsRail) -+#define XMTR_TO_RAIL(xmtrRail) ((EP4_RAIL *) ((EP_XMTR_RAIL *) xmtrRail)->CommsRail->Rail) -+#define XMTR_TO_DEV(xmtrRail) (XMTR_TO_RAIL(xmtrRail)->r_ctxt.ctxt_dev) -+#define XMTR_TO_SUBSYS(xmtrRail) (((EP_XMTR_RAIL *) xmtrRail)->Xmtr->Subsys) -+ -+#define TXD_TO_XMTR(txdRail) ((EP4_XMTR_RAIL *) txdRail->txd_generic.XmtrRail) -+#define TXD_TO_RAIL(txdRail) XMTR_TO_RAIL(TXD_TO_XMTR(txdRail)) -+ -+static void txd_interrupt (EP4_RAIL *rail, void *arg); -+static void poll_interrupt (EP4_RAIL *rail, void *arg); -+ -+static __inline__ int -+on_list (struct list_head *ent, struct list_head *list) -+{ -+ struct list_head *el; -+ unsigned int count = 0; -+ list_for_each (el, list) { -+ if (el == ent) -+ count++; -+ } -+ return count; -+} -+ -+static __inline__ void -+__ep4_txd_assert_free (EP4_TXD_RAIL *txdRail, const char *file, const int line) -+{ -+ EP4_XMTR_RAIL *xmtrRail = TXD_TO_XMTR (txdRail); -+ ELAN4_DEV *dev = XMTR_TO_DEV (xmtrRail); -+ register int failed = 0; -+ -+ if ((txdRail)->txd_retry_time != 0) failed |= (1 << 0); -+ if ((txdRail)->txd_main->txd_env != EP4_STATE_FREE) failed |= (1 << 1); -+ if ((txdRail)->txd_main->txd_data != EP4_STATE_FREE) failed |= (1 << 2); -+ if ((txdRail)->txd_main->txd_done != EP4_STATE_FREE) failed |= (1 << 3); -+ -+ if (sdram_assert) -+ { -+ if ((int)(elan4_sdram_readq (dev, (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType)) >> 32) != -32) failed |= (1 << 4); -+ if ((int)(elan4_sdram_readq (dev, (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_CountAndType)) >> 32) != 0) failed |= (1 << 5); -+ if ((int)(elan4_sdram_readq (dev, (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CountAndType)) >> 32) != 0) failed |= (1 << 6); -+ } -+ -+ if (failed) -+ { -+ printk ("__ep4_txd_assert_free: failed=%x txdRail=%p at %s:%d\n", failed, txdRail, file, line); -+ -+ ep_debugf (DBG_DEBUG, "__ep4_txd_assert_free: failed=%x txdRail=%p at %s:%d\n", failed, txdRail, file, line); -+ ep4xmtr_display_txd (&di_ep_debug, &txdRail->txd_generic); -+ -+ (txdRail)->txd_retry_time = 0; -+ (txdRail)->txd_main->txd_env = EP4_STATE_FREE; -+ (txdRail)->txd_main->txd_data = EP4_STATE_FREE; -+ (txdRail)->txd_main->txd_done = EP4_STATE_FREE; -+ -+ if (sdram_assert) -+ { -+ elan4_sdram_writel (dev, (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType) + 4, -32); -+ elan4_sdram_writel (dev, (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_CountAndType) + 4, 0); -+ elan4_sdram_writel (dev, (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CountAndType) + 4, 0); -+ } -+ EP_ASSFAIL (XMTR_TO_RAIL(xmtrRail), "__ep4_txd_assert_free"); -+ } -+} -+ -+static __inline__ void -+__ep4_txd_assert_finished (EP4_TXD_RAIL *txdRail, const char *file, const int line) -+{ -+ EP4_XMTR_RAIL *xmtrRail = TXD_TO_XMTR (txdRail); -+ ELAN4_DEV *dev = XMTR_TO_DEV (xmtrRail); -+ register int failed = 0; -+ -+ if ((txdRail)->txd_retry_time != 0) failed |= (1 << 0); -+ if ((txdRail)->txd_main->txd_env != EP4_STATE_FINISHED) failed |= (1 << 1); -+ if ((txdRail)->txd_main->txd_data != EP4_STATE_FINISHED) failed |= (1 << 2); -+ if ((txdRail)->txd_main->txd_done != EP4_STATE_FINISHED) failed |= (1 << 3); -+ -+ if (sdram_assert) -+ { -+ if ((int)(elan4_sdram_readq (dev, (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType)) >> 32) != -32) failed |= (1 << 4); -+ if ((int)(elan4_sdram_readq (dev, (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_CountAndType)) >> 32) != 0) failed |= (1 << 5); -+ if ((int)(elan4_sdram_readq (dev, (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CountAndType)) >> 32) != 0) failed |= (1 << 6); -+ } -+ -+ if (failed) -+ { -+ printk ("__ep4_txd_assert_finished: failed=%x txdRail=%p at %s:%d\n", failed, txdRail, file, line); -+ -+ ep_debugf (DBG_DEBUG, "__ep4_txd_assert_finished: failed=%x txdRail=%p at %s:%d\n", failed, txdRail, file, line); -+ ep4xmtr_display_txd (&di_ep_debug, &txdRail->txd_generic); -+ -+ (txdRail)->txd_retry_time = 0; -+ (txdRail)->txd_main->txd_env = EP4_STATE_FINISHED; -+ (txdRail)->txd_main->txd_data = EP4_STATE_FINISHED; -+ (txdRail)->txd_main->txd_done = EP4_STATE_FINISHED; -+ -+ if (sdram_assert) -+ { -+ elan4_sdram_writel (dev, (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType) + 4, -32); -+ elan4_sdram_writel (dev, (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_CountAndType) + 4, 0); -+ elan4_sdram_writel (dev, (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CountAndType) + 4, 0); -+ } -+ EP_ASSFAIL (XMTR_TO_RAIL(xmtrRail), "__ep4_txd_assert_finished"); -+ } -+} -+ -+static __inline__ int -+__ep4_txd_assfail (EP4_TXD_RAIL *txdRail, const char *expr, const char *file, const int line) -+{ -+ EP4_XMTR_RAIL *xmtrRail = TXD_TO_XMTR (txdRail); -+ -+ printk ("__ep4_txd_assfail: %s:%d '%s'\n", file, line, expr); -+ -+ ep_debugf (DBG_DEBUG, "__ep4_txd_assfail: %s:%d '%s'\n", file, line, expr); -+ ep4xmtr_display_txd (&di_ep_debug, &txdRail->txd_generic); -+ -+ EP_ASSFAIL (XMTR_TO_RAIL (xmtrRail), "__ep4_txd_assfail"); -+ -+ return 0; -+} -+ -+#define EP4_TXD_ASSERT(txdRail, EX) ((void) ((EX) || (__ep4_txd_assfail(txdRail, #EX, __FILE__, __LINE__)))) -+#define EP4_TXD_ASSERT_FREE(txdRail) __ep4_txd_assert_free(txdRail, __FILE__, __LINE__) -+#define EP4_TXD_ASSERT_FINISHED(txdRail) __ep4_txd_assert_finished(txdRail, __FILE__, __LINE__) -+ -+static int -+alloc_txd_block (EP4_XMTR_RAIL *xmtrRail) -+{ -+ EP4_RAIL *rail = XMTR_TO_RAIL(xmtrRail); -+ ELAN4_DEV *dev = XMTR_TO_DEV(xmtrRail); -+ EP4_TXD_RAIL_BLOCK *blk; -+ EP4_TXD_RAIL_MAIN *txdMain; -+ EP_ADDR txdMainAddr; -+ sdramaddr_t txdElan; -+ EP_ADDR txdElanAddr; -+ EP4_TXD_RAIL *txdRail; -+ unsigned long flags; -+ int i; -+ -+ KMEM_ZALLOC (blk, EP4_TXD_RAIL_BLOCK *, sizeof (EP4_TXD_RAIL_BLOCK), 1); -+ -+ if (blk == NULL) -+ return 0; -+ -+ if ((txdElan = ep_alloc_elan (&rail->r_generic, EP4_TXD_RAIL_ELAN_SIZE * EP4_NUM_TXD_PER_BLOCK, 0, &txdElanAddr)) == (sdramaddr_t) 0) -+ { -+ KMEM_FREE (blk, sizeof (EP4_TXD_RAIL_BLOCK)); -+ return 0; -+ } -+ -+ if ((txdMain = ep_alloc_main (&rail->r_generic, EP4_TXD_RAIL_MAIN_SIZE * EP4_NUM_TXD_PER_BLOCK, 0, &txdMainAddr)) == (EP4_TXD_RAIL_MAIN *) NULL) -+ { -+ ep_free_elan (&rail->r_generic, txdElanAddr, EP4_TXD_RAIL_ELAN_SIZE * EP4_NUM_TXD_PER_BLOCK); -+ KMEM_FREE (blk, sizeof (EP4_TXD_RAIL_BLOCK)); -+ return 0; -+ } -+ -+ if (ep4_reserve_dma_retries (rail, EP4_NUM_TXD_PER_BLOCK, 0) != 0) -+ { -+ ep_free_main (&rail->r_generic, blk->blk_txds[0].txd_main_addr, EP4_TXD_RAIL_MAIN_SIZE * EP4_NUM_TXD_PER_BLOCK); -+ ep_free_elan (&rail->r_generic, txdElanAddr, EP4_TXD_RAIL_ELAN_SIZE * EP4_NUM_TXD_PER_BLOCK); -+ KMEM_FREE (blk, sizeof (EP4_TXD_RAIL_BLOCK)); -+ return 0; -+ } -+ -+ for (txdRail = &blk->blk_txds[0], i = 0; i < EP4_NUM_TXD_PER_BLOCK; i++, txdRail++) -+ { -+ txdRail->txd_generic.XmtrRail = &xmtrRail->xmtr_generic; -+ txdRail->txd_elan = txdElan; -+ txdRail->txd_elan_addr = txdElanAddr; -+ txdRail->txd_main = txdMain; -+ txdRail->txd_main_addr = txdMainAddr; -+ -+ /* We only need to reserve space for one command stream, since the sten packet -+ * can only be retrying *before* the dma source event is set. -+ * reserve bytes of "event" cq space for the completion write + interrupt */ -+ if ((txdRail->txd_ecq = ep4_get_ecq (rail, EP4_ECQ_EVENT, EP4_INTR_CMD_NDWORDS)) == NULL) -+ goto failed; -+ -+ /* register the main interrupt cookies */ -+ ep4_register_intcookie (rail, &txdRail->txd_intcookie, txdElanAddr + offsetof (EP4_TXD_RAIL_ELAN, txd_done), txd_interrupt, txdRail); -+ -+ /* initialise the events */ -+ elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType), -+ E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS)); -+ elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CopySource), -+ txdElanAddr + offsetof (EP4_TXD_RAIL_ELAN, txd_env_cmd)); -+ elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CopyDest), -+ txdRail->txd_ecq->ecq_addr); -+ -+ elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_CountAndType), -+ E4_EVENT_INIT_VALUE (0, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0)); -+ elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_WritePtr), -+ txdMainAddr + offsetof (EP4_TXD_RAIL_MAIN, txd_data)); -+ elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_WriteValue), -+ EP4_STATE_FINISHED); -+ -+ elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CountAndType), -+ E4_EVENT_INIT_VALUE (0, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS)); -+ elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CopySource), -+ txdElanAddr + offsetof (EP4_TXD_RAIL_ELAN, txd_done_cmd)); -+ elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CopyDest), -+ txdRail->txd_ecq->ecq_addr); -+ -+ /* Initialise the command streams */ -+ elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_env_cmd.c_write_cmd), -+ WRITE_DWORD_CMD | (txdMainAddr + offsetof (EP4_TXD_RAIL_MAIN, txd_env))); -+ elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_env_cmd.c_write_value), -+ EP4_STATE_FAILED); -+ elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_env_cmd.c_intr_cmd), -+ INTERRUPT_CMD | (txdRail->txd_intcookie.int_val << E4_MAIN_INT_SHIFT)); -+ -+ elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_done_cmd.c_write_cmd), -+ WRITE_DWORD_CMD | (txdMainAddr + offsetof (EP4_TXD_RAIL_MAIN, txd_done))); -+ elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_done_cmd.c_write_value), -+ EP4_STATE_FINISHED); -+ elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_done_cmd.c_intr_cmd), -+ INTERRUPT_CMD | (txdRail->txd_intcookie.int_val << E4_MAIN_INT_SHIFT)); -+ -+ txdMain->txd_env = EP4_STATE_FREE; -+ txdMain->txd_data = EP4_STATE_FREE; -+ txdMain->txd_done = EP4_STATE_FREE; -+ -+ /* move onto next descriptor */ -+ txdElan += EP4_TXD_RAIL_ELAN_SIZE; -+ txdElanAddr += EP4_TXD_RAIL_ELAN_SIZE; -+ txdMain = (EP4_TXD_RAIL_MAIN *) ((unsigned long) txdMain + EP4_TXD_RAIL_MAIN_SIZE); -+ txdMainAddr += EP4_TXD_RAIL_MAIN_SIZE; -+ } -+ -+ spin_lock_irqsave (&xmtrRail->xmtr_freelock, flags); -+ -+ list_add (&blk->blk_link, &xmtrRail->xmtr_blocklist); -+ -+ xmtrRail->xmtr_totalcount += EP4_NUM_TXD_PER_BLOCK; -+ xmtrRail->xmtr_freecount += EP4_NUM_TXD_PER_BLOCK; -+ -+ for (i = 0; i < EP4_NUM_TXD_PER_BLOCK; i++) -+ list_add (&blk->blk_txds[i].txd_generic.Link, &xmtrRail->xmtr_freelist); -+ -+ spin_unlock_irqrestore (&xmtrRail->xmtr_freelock, flags); -+ -+ return 1; -+ -+ failed: -+ while (--i >= 0) -+ { -+ ep4_put_ecq (rail, txdRail->txd_ecq, EP4_INTR_CMD_NDWORDS); -+ ep4_deregister_intcookie (rail, &txdRail->txd_intcookie); -+ } -+ ep4_release_dma_retries (rail, EP4_NUM_TXD_PER_BLOCK); -+ -+ ep_free_main (&rail->r_generic, blk->blk_txds[0].txd_main_addr, EP4_TXD_RAIL_MAIN_SIZE * EP4_NUM_TXD_PER_BLOCK); -+ ep_free_elan (&rail->r_generic, blk->blk_txds[0].txd_elan_addr, EP4_TXD_RAIL_ELAN_SIZE * EP4_NUM_TXD_PER_BLOCK); -+ -+ KMEM_FREE (blk, sizeof (EP4_TXD_RAIL_BLOCK)); -+ -+ return 0; -+} -+ -+static void -+free_txd_block (EP4_XMTR_RAIL *xmtrRail, EP4_TXD_RAIL_BLOCK *blk) -+{ -+ EP4_RAIL *rail = XMTR_TO_RAIL (xmtrRail); -+ EP4_TXD_RAIL *txdRail; -+ unsigned long flags; -+ int i; -+ -+ spin_lock_irqsave (&xmtrRail->xmtr_freelock, flags); -+ -+ list_del (&blk->blk_link); -+ -+ xmtrRail->xmtr_totalcount -= EP4_NUM_TXD_PER_BLOCK; -+ -+ for (txdRail = &blk->blk_txds[0], i = 0; i < EP4_NUM_TXD_PER_BLOCK; i++, txdRail++) -+ { -+ xmtrRail->xmtr_freecount--; -+ -+ ep4_put_ecq (rail, txdRail->txd_ecq, EP4_INTR_CMD_NDWORDS); -+ -+ ep4_deregister_intcookie (rail, &txdRail->txd_intcookie); -+ -+ list_del (&txdRail->txd_generic.Link); -+ } -+ spin_unlock_irqrestore (&xmtrRail->xmtr_freelock, flags); -+ -+ ep4_release_dma_retries (rail, EP4_NUM_TXD_PER_BLOCK); -+ -+ ep_free_main (&rail->r_generic, blk->blk_txds[0].txd_main_addr, EP4_TXD_RAIL_MAIN_SIZE * EP4_NUM_TXD_PER_BLOCK); -+ ep_free_elan (&rail->r_generic, blk->blk_txds[0].txd_elan_addr, EP4_TXD_RAIL_ELAN_SIZE * EP4_NUM_TXD_PER_BLOCK); -+ -+ KMEM_FREE (blk, sizeof (EP4_TXD_RAIL_BLOCK)); -+} -+ -+static EP4_TXD_RAIL * -+get_txd_rail (EP4_XMTR_RAIL *xmtrRail) -+{ -+ EP_COMMS_SUBSYS *subsys = XMTR_TO_SUBSYS(xmtrRail); -+ EP4_TXD_RAIL *txdRail; -+ unsigned long flags; -+ int low_on_txds; -+ -+ spin_lock_irqsave (&xmtrRail->xmtr_freelock, flags); -+ -+ if (list_empty (&xmtrRail->xmtr_freelist)) -+ txdRail = NULL; -+ else -+ { -+ txdRail = list_entry (xmtrRail->xmtr_freelist.next, EP4_TXD_RAIL, txd_generic.Link); -+ -+ EP4_TXD_ASSERT_FREE(txdRail); -+ -+ list_del (&txdRail->txd_generic.Link); -+ -+ xmtrRail->xmtr_freecount--; -+ } -+ /* Wakeup the descriptor primer thread if there's not many left */ -+ low_on_txds = (xmtrRail->xmtr_freecount < ep_txd_lowat); -+ -+ spin_unlock_irqrestore (&xmtrRail->xmtr_freelock, flags); -+ -+ if (low_on_txds) -+ ep_kthread_schedule (&subsys->Thread, lbolt); -+ -+ -+ return (txdRail); -+} -+ -+static void -+free_txd_rail (EP4_XMTR_RAIL *xmtrRail, EP4_TXD_RAIL *txdRail) -+{ -+ unsigned long flags; -+ -+ EP4_TXD_ASSERT_FREE(txdRail); -+ -+ spin_lock_irqsave (&xmtrRail->xmtr_freelock, flags); -+ -+ list_add (&txdRail->txd_generic.Link, &xmtrRail->xmtr_freelist); -+ -+ xmtrRail->xmtr_freecount++; -+ -+ if (xmtrRail->xmtr_freewaiting) -+ { -+ xmtrRail->xmtr_freewaiting--; -+ kcondvar_wakeupall (&xmtrRail->xmtr_freesleep, &xmtrRail->xmtr_freelock); -+ } -+ -+ spin_unlock_irqrestore (&xmtrRail->xmtr_freelock, flags); -+} -+ -+static void -+bind_txd_rail (EP_TXD *txd, EP4_TXD_RAIL *txdRail) -+{ -+ EPRINTF6 (DBG_XMTR, "%s: bind_txd_rail: txd=%p txdRail=%p XID=%08x.%08x.%016llx\n", -+ XMTR_TO_RAIL(txdRail->txd_generic.XmtrRail)->r_generic.Name, txd, txdRail, -+ txd->Envelope.Xid.Generation, txd->Envelope.Xid.Handle, (long long)txd->Envelope.Xid.Unique); -+ -+ txd->TxdRail = &txdRail->txd_generic; -+ txdRail->txd_generic.Txd = txd; -+} -+ -+static void -+unbind_txd_rail (EP_TXD *txd, EP4_TXD_RAIL *txdRail) -+{ -+ EP4_TXD_ASSERT (txdRail, txd->TxdRail == &txdRail->txd_generic && txdRail->txd_generic.Txd == txd); -+ -+ EPRINTF6 (DBG_XMTR, "%s: unbind_txd_rail: txd=%p txdRail=%p XID=%08x.%08x.%016llx\n", -+ XMTR_TO_RAIL(txdRail->txd_generic.XmtrRail)->r_generic.Name, txd, txdRail, -+ txd->Envelope.Xid.Generation, txd->Envelope.Xid.Handle, (long long)txd->Envelope.Xid.Unique); -+ -+ -+ txdRail->txd_generic.Txd = NULL; -+ txd->TxdRail = NULL; -+} -+ -+static void -+initialise_txd (EP_TXD *txd, EP4_TXD_RAIL *txdRail, unsigned int phase) -+{ -+ EP4_XMTR_RAIL *xmtrRail = (EP4_XMTR_RAIL *) txdRail->txd_generic.XmtrRail; -+ EP4_RAIL *rail = XMTR_TO_RAIL (xmtrRail); -+ ELAN4_DEV *dev = rail->r_ctxt.ctxt_dev; -+ -+ /* Flush the Elan TLB if mappings have changed */ -+ ep_perrail_dvma_sync (&rail->r_generic); -+ -+ /* Initialise the per-rail fields in the envelope */ -+ txd->Envelope.TxdRail = txdRail->txd_elan_addr; -+ txd->Envelope.NodeId = rail->r_generic.Position.pos_nodeid; -+ -+ /* Allocate a network error fixup cookie */ -+ txdRail->txd_cookie = ep4_neterr_cookie (rail, txd->NodeId) | EP4_COOKIE_STEN; -+ -+#if ! defined(CONFIG_EP_NO_CHECK_SUM) -+ if ( epdebug_check_sum ) -+ txd->Envelope.CheckSum = ep_calc_check_sum( txd->Xmtr->Subsys->Subsys.Sys, &txd->Envelope, txd->Envelope.Frags, txd->Envelope.nFrags); -+ else -+#endif -+ txd->Envelope.CheckSum = 0; -+ -+ /* Initialise the per-rail events */ -+ switch (phase) -+ { -+ case EP_TXD_PHASE_ACTIVE: -+ { -+ unsigned int nsets = (txd->Envelope.nFrags ? txd->Envelope.nFrags : 1) + ( EP_IS_MULTICAST(txd->Envelope.Attr) ? 1 : 0); -+ -+ if (! EP_IS_RPC(txd->Envelope.Attr)) -+ { -+ elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CountAndType), -+ E4_EVENT_INIT_VALUE (-32 * nsets, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS)); -+ -+ txdRail->txd_main->txd_data = EP4_STATE_FINISHED; -+ } -+ else -+ { -+ elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_CountAndType), -+ E4_EVENT_INIT_VALUE(-32 * nsets , E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0)); -+ elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CountAndType), -+ E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS)); -+ -+ txdRail->txd_main->txd_data = EP4_STATE_ACTIVE; -+ } -+ -+ txdRail->txd_main->txd_env = EP4_STATE_ACTIVE; -+ txdRail->txd_main->txd_done = EP4_STATE_ACTIVE; -+ break; -+ } -+ -+ case EP_TXD_PHASE_PASSIVE: -+ EP4_TXD_ASSERT (txdRail, EP_IS_RPC(txd->Envelope.Attr)); -+ -+ txdRail->txd_main->txd_env = EP4_STATE_FINISHED; -+ txdRail->txd_main->txd_data = EP4_STATE_FINISHED; -+ txdRail->txd_main->txd_done = EP4_STATE_ACTIVE; -+ -+ elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CountAndType), -+ E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS)); -+ break; -+ } -+ -+ if (EP_IS_NO_INTERRUPT(txd->Envelope.Attr)) -+ elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done_cmd.c_intr_cmd), NOP_CMD); -+} -+ -+static void -+terminate_txd_rail (EP4_XMTR_RAIL *xmtrRail, EP4_TXD_RAIL *txdRail) -+{ -+ EP4_SDRAM_ASSERT (TXD_TO_RAIL(txdRail),\ -+ (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType),\ -+ E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));\ -+ -+ /* clear the done flags - so that it will be ignored if an event interrupt is generated */ -+ txdRail->txd_main->txd_env = EP4_STATE_FREE; -+ txdRail->txd_main->txd_data = EP4_STATE_FREE; -+ txdRail->txd_main->txd_done = EP4_STATE_FREE; -+ -+#if defined(DEBUG_ASSERT) -+ if (sdram_assert) -+ { -+ ELAN4_DEV *dev = XMTR_TO_RAIL (xmtrRail)->r_ctxt.ctxt_dev; -+ -+ elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_CountAndType), -+ E4_EVENT_INIT_VALUE (0, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0)); -+ elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CountAndType), -+ E4_EVENT_INIT_VALUE (0, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS)); -+ } -+#endif -+} -+ -+static void -+defer_txd_rail (EP4_TXD_RAIL *txdRail) -+{ -+ EP4_XMTR_RAIL *xmtrRail = TXD_TO_XMTR(txdRail); -+ EP4_RAIL *rail = XMTR_TO_RAIL(xmtrRail); -+ ELAN4_DEV *dev = rail->r_ctxt.ctxt_dev; -+ EP_COMMS_SUBSYS *subsys = XMTR_TO_SUBSYS(xmtrRail); -+ -+ EPRINTF5 (DBG_XMTR, "%s: defer_txd_rail: xmtrRail=%p txdRail=%p env/data (%d,%d) not finished\n", -+ rail->r_generic.Name, xmtrRail, txdRail, (int)txdRail->txd_main->txd_env, (int)txdRail->txd_main->txd_data); -+ -+ /* transmit has completed, but the data dma has not completed -+ * (because of network error fixup), we queue the txdRail onto a list -+ * to be polled for completion later. -+ */ -+ if (txdRail->txd_retry_time) -+ { -+ EP4_TXD_ASSERT (txdRail, (on_list (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_RETRY]) == 1 || -+ on_list (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_STALLED]) == 1)); -+ -+ list_del (&txdRail->txd_retry_link); -+ -+ txdRail->txd_main->txd_env = EP4_STATE_FINISHED; -+ -+ /* re-initialise the envelope event */ -+ elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType), -+ E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS)); -+ } -+ -+ txdRail->txd_retry_time = lbolt; -+ -+ list_add_tail (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_POLL]); -+ -+ ep_kthread_schedule (&subsys->Thread, lbolt); -+} -+ -+static void -+finalise_txd (EP_TXD *txd, EP4_TXD_RAIL *txdRail) -+{ -+ EP4_XMTR_RAIL *xmtrRail = TXD_TO_XMTR(txdRail); -+ -+ EP4_TXD_ASSERT_FINISHED (txdRail); -+ -+ unbind_txd_rail (txd, txdRail); -+ -+ terminate_txd_rail (xmtrRail, txdRail); -+ free_txd_rail (xmtrRail, txdRail); -+} -+ -+static void -+txd_interrupt (EP4_RAIL *rail, void *arg) -+{ -+ EP4_TXD_RAIL *txdRail = (EP4_TXD_RAIL *) arg; -+ EP4_XMTR_RAIL *xmtrRail = TXD_TO_XMTR(txdRail); -+ EP_XMTR *xmtr = xmtrRail->xmtr_generic.Xmtr; -+ int delay = 1; -+ EP_TXD *txd; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&xmtr->Lock, flags); -+ for (;;) -+ { -+ if (txdRail->txd_main->txd_done == EP4_STATE_FINISHED || txdRail->txd_main->txd_env == EP4_STATE_FAILED) -+ break; -+ -+ /* The write to txd_done could be held up in the PCI bridge even though -+ * we've seen the interrupt cookie. Unlike elan3, there is no possibility -+ * of spurious interrupts since we flush the command queues on node -+ * disconnection and the txcallback mechanism */ -+ mb(); -+ -+ if (delay > EP4_EVENT_FIRING_TLIMIT) -+ { -+ spin_unlock_irqrestore (&xmtr->Lock, flags); -+ -+ EP_ASSFAIL (XMTR_TO_RAIL(xmtrRail), "txd_interrupt - not finished\n"); -+ return; -+ } -+ DELAY (delay); -+ delay <<= 1; -+ } -+ -+ txd = txdRail->txd_generic.Txd; -+ -+ if (txdRail->txd_main->txd_env == EP4_STATE_FAILED) -+ { -+ spin_lock (&xmtrRail->xmtr_retrylock); -+ -+ EP4_TXD_ASSERT (txdRail, txdRail->txd_retry_time == 0); /* cannot be on retry/poll list */ -+ EP4_TXD_ASSERT (txdRail, txdRail->txd_main->txd_done != EP4_STATE_FINISHED); /* data xfer cannot have finished */ -+ -+ if (TxdShouldStabalise (&txdRail->txd_generic, &rail->r_generic)) -+ { -+ EPRINTF6 (DBG_STABILISE, "%s: txd_interrupt: stablise xmtrRail=%p txdRail=%p txd=%p XID=%llx dest=%u\n", rail->r_generic.Name, -+ xmtrRail, txdRail, txd, (long long)txd->Envelope.Xid.Unique, txd->NodeId); -+ -+ txdRail->txd_retry_time = lbolt; /* indicate on retry list */ -+ -+ list_add_tail (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_STALLED]); -+ } -+ else -+ { -+ EPRINTF6 (DBG_RETRY, "%s: txd_interrupt: retry xmtrRail=%p txdRail=%p txd=%p XID=%llx dest=%u\n", rail->r_generic.Name, -+ xmtrRail, txdRail, txd, (long long)txd->Envelope.Xid.Unique, txd->NodeId); -+ -+ txdRail->txd_retry_time = lbolt + EP_RETRY_LOW_PRI_TIME; /* XXXX: backoff ? */ -+ -+ list_add_tail (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_RETRY]); -+ -+ ep_kthread_schedule (&rail->r_retry_thread, txdRail->txd_retry_time); -+ } -+ spin_unlock (&xmtrRail->xmtr_retrylock); -+ -+ spin_unlock_irqrestore (&xmtr->Lock, flags); -+ return; -+ } -+ -+ EP4_TXD_ASSERT (txdRail, txd != NULL && !(EP_IS_NO_INTERRUPT(txd->Envelope.Attr))); -+ -+ EPRINTF6 (DBG_XMTR, "%s: txd_interrupt: xmtrRail=%p txdRail=%p txd=%p XID=%llx dest=%u\n", rail->r_generic.Name, -+ xmtrRail, txdRail, txd, (long long)txd->Envelope.Xid.Unique, txd->NodeId); -+ -+ if (txdRail->txd_main->txd_env != EP4_STATE_FINISHED || txdRail->txd_main->txd_data != EP4_STATE_FINISHED) -+ { -+ defer_txd_rail (txdRail); -+ -+ spin_unlock_irqrestore (&xmtr->Lock, flags); -+ } -+ else -+ { -+ /* remove from active transmit list */ -+ list_del (&txd->Link); -+ -+ ep_xmtr_txd_stat(xmtr,txd); -+ -+ finalise_txd (txd, txdRail); -+ -+ spin_unlock_irqrestore (&xmtr->Lock, flags); -+ -+ txd->Handler (txd, txd->Arg, EP_SUCCESS); -+ -+ FreeTxd (xmtr, txd); -+ } -+} -+ -+static void -+poll_interrupt (EP4_RAIL *rail, void *arg) -+{ -+ EP4_XMTR_RAIL *xmtrRail = (EP4_XMTR_RAIL *) arg; -+ -+ ep_poll_transmits (xmtrRail->xmtr_generic.Xmtr); -+} -+ -+void -+issue_envelope_packet (EP4_XMTR_RAIL *xmtrRail, EP4_TXD_RAIL *txdRail) -+{ -+ EP_TXD *txd = txdRail->txd_generic.Txd; -+ ELAN4_CQ *cq = xmtrRail->xmtr_cq; -+ E4_uint64 *blk0 = (E4_uint64 *) &txd->Envelope; -+ E4_uint64 *blk1 = EP_HAS_PAYLOAD(txd->Envelope.Attr) ? (E4_uint64 *) &txd->Payload : NULL; -+ E4_Addr qaddr = EP_MSGQ_ADDR(txd->Service); -+ -+ EP4_SDRAM_ASSERT (TXD_TO_RAIL(txdRail),\ -+ (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType),\ -+ E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));\ -+ -+ elan4_open_packet (cq, OPEN_PACKET (0, PACK_OK | RESTART_COUNT_ZERO, EP_VP_DATA(txd->NodeId))); -+ elan4_sendtrans0 (cq, TR_INPUT_Q_GETINDEX, EP_MSGQ_ADDR(txd->Service)); -+ -+ /* send the payload if present */ -+ if (blk0) elan4_sendtransp (cq, TR_WRITE(128 >> 3, 0, TR_DATATYPE_BYTE), 0, blk0); -+ if (blk1) elan4_sendtransp (cq, TR_WRITE(128 >> 3, 0, TR_DATATYPE_BYTE), 128, blk1); -+ -+ elan4_sendtrans1 (cq, TR_INPUT_Q_COMMIT, qaddr, txdRail->txd_cookie); -+ -+ elan4_guard (cq, GUARD_CHANNEL (1) | GUARD_TEST(0, PACK_OK) | GUARD_RESET (EP4_TXD_STEN_RETRYCOUNT)); -+ elan4_write_dword_cmd (cq, txdRail->txd_main_addr + offsetof (EP4_TXD_RAIL_MAIN, txd_env), EP4_STATE_FINISHED); -+ -+ elan4_guard (cq, GUARD_CHANNEL (1) | GUARD_TEST(0, RESTART_COUNT_ZERO) | GUARD_RESET (EP4_TXD_STEN_RETRYCOUNT)); -+ elan4_set_event_cmd (cq, txdRail->txd_elan_addr + offsetof (EP4_TXD_RAIL_ELAN, txd_env)); -+ -+ elan4_write_dword_cmd (cq, xmtrRail->xmtr_main_addr + offsetof (EP4_XMTR_RAIL_MAIN, xmtr_flowcnt), ++xmtrRail->xmtr_flowcnt); -+} -+ -+void -+ep4xmtr_flush_callback (EP_XMTR *xmtr, EP4_XMTR_RAIL *xmtrRail) -+{ -+ EP4_RAIL *rail = XMTR_TO_RAIL (xmtrRail); -+ EP4_COMMS_RAIL *commsRail = XMTR_TO_COMMS (xmtrRail); -+ struct list_head *el, *nel; -+ unsigned long flags; -+ -+ switch (rail->r_generic.CallbackStep) -+ { -+ case EP_CB_FLUSH_FILTERING: -+ /* need to acquire/release the Lock to ensure that the node state -+ * transition has been noticed and no new envelopes are queued to -+ * nodes which are passivating. */ -+ spin_lock_irqsave (&xmtr->Lock, flags); -+ -+ /* Then we insert a "setevent" into the command queue to flush -+ * through the envelopes which have already been submitted */ -+ ep4comms_flush_setevent (commsRail, xmtrRail->xmtr_cq); -+ -+ spin_unlock_irqrestore (&xmtr->Lock, flags); -+ -+ break; -+ -+ case EP_CB_FLUSH_FLUSHING: -+ /* remove any envelopes which are retrying to nodes which are going down */ -+ spin_lock_irqsave (&xmtrRail->xmtr_retrylock, flags); -+ list_for_each_safe (el, nel, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_RETRY]) { -+ EP4_TXD_RAIL *txdRail = list_entry (el, EP4_TXD_RAIL, txd_retry_link); -+ EP_TXD *txd = txdRail->txd_generic.Txd; -+ EP_NODE_RAIL *nodeRail = &rail->r_generic.Nodes[txd->NodeId]; -+ -+ EP4_TXD_ASSERT (txdRail, txdRail->txd_main->txd_env == EP4_STATE_FAILED); -+ -+ if (nodeRail->State == EP_NODE_LOCAL_PASSIVATE) -+ { -+ EPRINTF2 (DBG_XMTR, "%s; ep4xmtr_flush_callback: removing txdRail %p from retry list\n", rail->r_generic.Name, txdRail); -+ -+ EP4_TXD_ASSERT (txdRail, txdRail->txd_retry_time != 0); -+ -+ list_del (&txdRail->txd_retry_link); -+ list_add_tail (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_STALLED]); -+ } -+ } -+ spin_unlock_irqrestore (&xmtrRail->xmtr_retrylock, flags); -+ -+ /* Determine whether we have active or passive messages to -+ * any node which is passivating */ -+ spin_lock_irqsave (&xmtr->Lock, flags); -+ list_for_each (el, &xmtr->ActiveDescList) { -+ EP_TXD *txd = list_entry (el, EP_TXD, Link); -+ EP4_TXD_RAIL *txdRail = (EP4_TXD_RAIL *) txd->TxdRail; -+ EP_NODE_RAIL *nodeRail = &rail->r_generic.Nodes[txd->NodeId]; -+ -+ if (txdRail == NULL || txdRail->txd_generic.XmtrRail != &xmtrRail->xmtr_generic || nodeRail->State != EP_NODE_LOCAL_PASSIVATE) -+ continue; -+ -+ EPRINTF5 (DBG_XMTR, "%s: flush txd=%p txdRail=%p data=%llx done=%llx\n", rail->r_generic.Name, -+ txd, txdRail, (long long)txdRail->txd_main->txd_data, (long long)txdRail->txd_main->txd_done); -+ -+ if (EP_IS_RPC(txd->Envelope.Attr)) -+ { -+ if (txdRail->txd_main->txd_data == EP4_STATE_ACTIVE) -+ nodeRail->MessageState |= EP_NODE_ACTIVE_MESSAGES; -+ else if (txdRail->txd_main->txd_data == EP4_STATE_ACTIVE) -+ nodeRail->MessageState |= EP_NODE_PASSIVE_MESSAGES; -+ } -+ else -+ { -+ if (txdRail->txd_main->txd_data == EP4_STATE_ACTIVE) -+ nodeRail->MessageState |= EP_NODE_ACTIVE_MESSAGES; -+ } -+ } -+ spin_unlock_irqrestore (&xmtr->Lock, flags); -+ break; -+ -+ default: -+ panic ("ep4xmtr_flush_callback: invalid callback step\n"); -+ break; -+ } -+} -+ -+void -+ep4xmtr_failover_callback (EP_XMTR *xmtr, EP4_XMTR_RAIL *xmtrRail) -+{ -+ EP4_RAIL *rail = XMTR_TO_RAIL (xmtrRail); -+ struct list_head txdList; -+ struct list_head *el, *nel; -+ unsigned long flags; -+ -+ INIT_LIST_HEAD (&txdList); -+ -+ spin_lock_irqsave (&xmtr->Lock, flags); -+ list_for_each_safe (el, nel, &xmtr->ActiveDescList) { -+ EP_TXD *txd = list_entry (el, EP_TXD, Link); -+ EP4_TXD_RAIL *txdRail = (EP4_TXD_RAIL *) txd->TxdRail; -+ EP_NODE_RAIL *nodeRail = &rail->r_generic.Nodes[txd->NodeId]; -+ -+ /* Only progress relocation of txd's bound to this rail */ -+ if (! TXD_BOUND2RAIL (txdRail, xmtrRail) || nodeRail->State != EP_NODE_PASSIVATED) -+ continue; -+ -+ /* XXXX - no rail failover for now ....*/ -+ -+ EPRINTF4 (DBG_XMTR, "%s: ep4xmtr_failover_callback - xmtr %p txd %p node %d completed\n", rail->r_generic.Name, xmtr, txd, txd->NodeId); -+ } -+ spin_unlock_irqrestore (&xmtr->Lock, flags); -+ -+ while (! list_empty (&txdList)) -+ { -+ EP_TXD *txd = list_entry (txdList.next, EP_TXD, Link); -+ -+ list_del (&txd->Link); -+ -+ txd->Handler (txd, txd->Arg, EP_CONN_RESET); -+ -+ FreeTxd (xmtr, txd); -+ } -+} -+ -+ -+void -+ep4xmtr_disconnect_callback (EP_XMTR *xmtr, EP4_XMTR_RAIL *xmtrRail) -+{ -+ EP4_RAIL *rail = XMTR_TO_RAIL (xmtrRail); -+ ELAN4_DEV *dev = rail->r_ctxt.ctxt_dev; -+ struct list_head *el, *nel; -+ struct list_head txdList; -+ unsigned long flags; -+ -+ INIT_LIST_HEAD (&txdList); -+ -+ spin_lock_irqsave (&xmtr->Lock, flags); -+ -+ list_for_each_safe (el, nel, &xmtr->ActiveDescList) { -+ EP_TXD *txd = list_entry (el, EP_TXD, Link); -+ EP4_TXD_RAIL *txdRail = (EP4_TXD_RAIL *) txd->TxdRail; -+ EP_NODE_RAIL *nodeRail = &rail->r_generic.Nodes[txd->NodeId]; -+ -+ if ( ! TXD_BOUND2RAIL (txdRail, xmtrRail) || nodeRail->State != EP_NODE_DISCONNECTING) -+ continue; -+ -+ if (txdRail->txd_main->txd_done == EP4_STATE_ACTIVE) -+ { -+ -+ EPRINTF8 (DBG_DISCON, "ep4xmtr_disconnect_callback: txdRail=%p : events %llx,%llx,%llx done %llx,%llx,%llx retry %lx\n",txdRail, -+ elan4_sdram_readq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType)), -+ elan4_sdram_readq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_CountAndType)), -+ elan4_sdram_readq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CountAndType)), -+ (long long)txdRail->txd_main->txd_env, (long long)txdRail->txd_main->txd_data, (long long)txdRail->txd_main->txd_done, -+ txdRail->txd_retry_time); -+ -+ if (txdRail->txd_retry_time) -+ { -+ /* re-initialise the envelope event */ -+ elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType), -+ E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS)); -+ -+ EP4_TXD_ASSERT (txdRail, on_list (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_STALLED]) == 1); -+ -+ txdRail->txd_retry_time = 0; -+ -+ list_del (&txdRail->txd_retry_link); -+ } -+ -+ /* Remove from active list */ -+ list_del (&txd->Link); -+ -+ unbind_txd_rail (txd, txdRail); -+ -+ terminate_txd_rail (xmtrRail, txdRail); -+ free_txd_rail (xmtrRail, txdRail); -+ -+ EPRINTF4 (DBG_XMTR, "%s: ep4xmtr_disconnect_callback - xmtr %p txd %p node %d not conected\n", rail->r_generic.Name, xmtr, txd, txd->NodeId); -+ -+ /* add to the list of txd's which are to be completed */ -+ list_add_tail (&txd->Link, &txdList); -+ } -+ } -+ spin_unlock_irqrestore (&xmtr->Lock, flags); -+ -+ while (! list_empty (&txdList)) -+ { -+ EP_TXD *txd = list_entry (txdList.next, EP_TXD, Link); -+ -+ list_del (&txd->Link); -+ -+ txd->Handler (txd, txd->Arg, EP_CONN_RESET); -+ -+ FreeTxd (xmtr, txd); -+ } -+} -+ -+void -+ep4xmtr_neterr_flush (EP_XMTR *xmtr, EP4_XMTR_RAIL *xmtrRail, unsigned int nodeId, EP_NETERR_COOKIE *cookies) -+{ -+ EP4_COMMS_RAIL *commsRail = XMTR_TO_COMMS (xmtrRail); -+ unsigned long flags; -+ -+ spin_lock_irqsave (&xmtr->Lock, flags); -+ -+ /* insert a "setevent" into the command queue to flush -+ * through the envelopes which have already been submitted */ -+ ep4comms_flush_setevent (commsRail, xmtrRail->xmtr_cq); -+ -+ spin_unlock_irqrestore (&xmtr->Lock, flags); -+} -+ -+void -+ep4xmtr_neterr_check (EP_XMTR *xmtr, EP4_XMTR_RAIL *xmtrRail, unsigned int nodeId, EP_NETERR_COOKIE *cookies) -+{ -+ EP4_RAIL *rail = XMTR_TO_RAIL (xmtrRail); -+ ELAN4_DEV *dev = rail->r_ctxt.ctxt_dev; -+ struct list_head *el; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&xmtr->Lock, flags); -+ list_for_each (el, &xmtr->ActiveDescList) { -+ EP_TXD *txd = list_entry (el, EP_TXD, Link); -+ EP4_TXD_RAIL *txdRail = (EP4_TXD_RAIL *) txd->TxdRail; -+ -+ if ( ! TXD_BOUND2RAIL (txdRail, xmtrRail) || txd->NodeId != nodeId) -+ continue; -+ -+ /* The only non-dma associated with a txd is the initial sten packet, if it has been acked -+ * and the neterr cookie matches, then change it to look like it's been acked since the -+ * INPUT_Q_COMMIT transaction has already been executed */ -+ if (txdRail->txd_main->txd_env == EP4_STATE_FAILED && (txdRail->txd_cookie == cookies[0] || txdRail->txd_cookie == cookies[1])) -+ { -+ EPRINTF4 (DBG_NETWORK_ERROR, "%s: ep4xmtr_neterr_callback: cookie <%lld%s%s%s%s> matches txd %p txdRail %p\n", -+ rail->r_generic.Name, (long long)EP4_COOKIE_STRING(txdRail->txd_cookie), txd, txdRail); -+ -+ EP4_TXD_ASSERT (txdRail, txdRail->txd_retry_time != 0); -+ -+ txdRail->txd_main->txd_env = EP4_STATE_FINISHED; -+ -+ /* re-initialise the envelope event */ -+ elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType), -+ E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS)); -+ -+ spin_lock (&xmtrRail->xmtr_retrylock); -+ -+ EP4_TXD_ASSERT (txdRail, (on_list (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_RETRY]) == 1 || -+ on_list (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_STALLED]) == 1)); -+ -+ txdRail->txd_retry_time = 0; -+ -+ list_del (&txdRail->txd_retry_link); -+ -+ spin_unlock (&xmtrRail->xmtr_retrylock); -+ } -+ } -+ spin_unlock_irqrestore (&xmtr->Lock, flags); -+} -+ -+int -+ep4xmtr_poll_txd (EP_XMTR_RAIL *x, EP_TXD_RAIL *t, int how) -+{ -+ EP4_XMTR_RAIL *xmtrRail = (EP4_XMTR_RAIL *) x; -+ ELAN4_DEV *dev = XMTR_TO_DEV (xmtrRail); -+ EP4_TXD_RAIL *txdRail = (EP4_TXD_RAIL *) t; -+ EP_TXD *txd = txdRail->txd_generic.Txd; -+ -+ if (! EP_IS_NO_INTERRUPT(txd->Envelope.Attr)) -+ return 0; -+ -+ switch (how) -+ { -+ case ENABLE_TX_CALLBACK: -+ if (!EP_IS_INTERRUPT_ENABLED(txd->Envelope.Attr)) -+ { -+ elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done_cmd.c_intr_cmd), -+ INTERRUPT_CMD | (xmtrRail->xmtr_intcookie.int_val << E4_MAIN_INT_SHIFT)); -+ -+ txd->Envelope.Attr |= EP_INTERRUPT_ENABLED; -+ } -+ break; -+ -+ case DISABLE_TX_CALLBACK: -+ if (EP_IS_INTERRUPT_ENABLED(txd->Envelope.Attr & EP_INTERRUPT_ENABLED)) -+ { -+ elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done_cmd.c_intr_cmd), NOP_CMD); -+ -+ txd->Envelope.Attr &= ~EP_INTERRUPT_ENABLED; -+ } -+ } -+ -+ if (txdRail->txd_main->txd_env == EP4_STATE_FINISHED && txdRail->txd_main->txd_data == EP4_STATE_FINISHED && txdRail->txd_main->txd_done == EP4_STATE_FINISHED) -+ { -+ EPRINTF3 (DBG_XMTR, "%s: ep4xmtr_poll_txd: txd=%p XID=%llx completed\n", -+ XMTR_TO_RAIL (xmtrRail)->r_generic.Name, txd, (long long)txd->Envelope.Xid.Unique); -+ -+ elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done_cmd.c_intr_cmd), -+ INTERRUPT_CMD | (txdRail->txd_intcookie.int_val << E4_MAIN_INT_SHIFT)); -+ -+ -+ ep_xmtr_txd_stat(xmtrRail->xmtr_generic.Xmtr,txd); -+ -+ finalise_txd (txd, txdRail); -+ -+ return 1; -+ } -+ -+ return 0; -+} -+ -+int -+ep4xmtr_bind_txd (EP_TXD *txd, EP_XMTR_RAIL *x, unsigned int phase) -+{ -+ EP4_XMTR_RAIL *xmtrRail = (EP4_XMTR_RAIL *) x; -+ EP4_RAIL *rail = XMTR_TO_RAIL (xmtrRail); -+ EP4_TXD_RAIL *txdRail; -+ unsigned long flags; -+ -+ if ((txdRail = get_txd_rail (xmtrRail)) == NULL) -+ return 0; -+ -+ switch (phase) -+ { -+ case EP_TXD_PHASE_ACTIVE: -+ if (rail->r_generic.Nodes[txd->NodeId].State != EP_NODE_CONNECTED) -+ { -+ EPRINTF2 (DBG_XMTR, "%s: ep4xmtr_bind_txd: node %u not connected on this rail\n", rail->r_generic.Name, txd->NodeId); -+ -+ free_txd_rail (xmtrRail, txdRail); -+ return 0; -+ } -+ -+ initialise_txd (txd, txdRail, EP_TXD_PHASE_ACTIVE); -+ -+ bind_txd_rail (txd, txdRail); -+ -+ /* generate the STEN packet to transfer the envelope */ -+ spin_lock_irqsave (&xmtrRail->xmtr_retrylock, flags); -+ if (((int) (xmtrRail->xmtr_flowcnt - xmtrRail->xmtr_main->xmtr_flowcnt)) < EP4_XMTR_FLOWCNT) -+ issue_envelope_packet (xmtrRail, txdRail); -+ else -+ { -+ txdRail->txd_retry_time = lbolt; -+ -+ list_add_tail (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_RETRY]); -+ -+ ep_kthread_schedule (&rail->r_retry_thread, txdRail->txd_retry_time); -+ } -+ spin_unlock_irqrestore (&xmtrRail->xmtr_retrylock, flags); -+ break; -+ -+ case EP_TXD_PHASE_PASSIVE: -+ initialise_txd (txd, txdRail, EP_TXD_PHASE_PASSIVE); -+ -+ EP_XMTR_OP (txd->TxdRail->XmtrRail, UnbindTxd) (txd, EP_TXD_PHASE_PASSIVE); /* unbind from existing rail */ -+ -+ bind_txd_rail (txd, txdRail); /* and bind it to our new rail */ -+ break; -+ } -+ -+ return 1; -+} -+ -+void -+ep4xmtr_unbind_txd (EP_TXD *txd, unsigned int phase) -+{ -+ /* XXXX - TBD */ -+} -+ -+long -+ep4xmtr_check (EP_XMTR_RAIL *x, long nextRunTime) -+{ -+ EP4_XMTR_RAIL *xmtrRail = (EP4_XMTR_RAIL *) x; -+ EP_XMTR *xmtr = xmtrRail->xmtr_generic.Xmtr; -+ struct list_head txdList; -+ struct list_head *el, *nel; -+ unsigned long flags; -+ -+ INIT_LIST_HEAD (&txdList); -+ -+ if (xmtrRail->xmtr_freecount < ep_txd_lowat && !alloc_txd_block (xmtrRail)) -+ { -+ EPRINTF1 (DBG_RCVR,"%s: failed to grow txd rail pool\n", XMTR_TO_RAIL(xmtrRail)->r_generic.Name); -+ -+ if (nextRunTime == 0 || AFTER (nextRunTime, lbolt + RESOURCE_RETRY_TIME)) -+ nextRunTime = lbolt + RESOURCE_RETRY_TIME; -+ } -+ -+ spin_lock_irqsave (&xmtr->Lock, flags); -+ list_for_each_safe (el, nel, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_POLL]) { -+ EP4_TXD_RAIL *txdRail = list_entry (el, EP4_TXD_RAIL, txd_retry_link); -+ -+ if (txdRail->txd_main->txd_env != EP4_STATE_FINISHED || txdRail->txd_main->txd_data != EP4_STATE_FINISHED) -+ { -+ ep_debugf (DBG_XMTR, "%s: ep4xmtr_check: xmtrRail=%p txdRail=%p env/data (%d,%d) not finished\n", -+ XMTR_TO_RAIL(xmtrRail)->r_generic.Name, xmtrRail, txdRail, (int)txdRail->txd_main->txd_env, (int)txdRail->txd_main->txd_data); -+ -+ nextRunTime = lbolt + HZ; -+ } -+ else -+ { -+ EP_TXD *txd = txdRail->txd_generic.Txd; -+ -+ ep_debugf (DBG_XMTR, "%s: ep4xmtr_check: xmtrRail=%p txdRail=%p env/data (%d,%d) finished\n", -+ XMTR_TO_RAIL(xmtrRail)->r_generic.Name, xmtrRail, txdRail, (int)txdRail->txd_main->txd_env, (int)txdRail->txd_main->txd_data); -+ -+ EPRINTF5 (DBG_XMTR, "%s: ep4xmtr_check: xmtrRail=%p txdRail=%p env/data (%d,%d) finished\n", -+ XMTR_TO_RAIL(xmtrRail)->r_generic.Name, xmtrRail, txdRail, (int)txdRail->txd_main->txd_env, (int)txdRail->txd_main->txd_data); -+ EPRINTF3 (DBG_XMTR, "%s: done %x data %x\n", XMTR_TO_RAIL(xmtrRail)->r_generic.Name, -+ (uint)(txdRail->txd_elan_addr + offsetof (EP4_TXD_RAIL_ELAN, txd_done)), -+ (uint)(txdRail->txd_elan_addr + offsetof (EP4_TXD_RAIL_ELAN, txd_data))); -+ -+ EP4_TXD_ASSERT (txdRail, txdRail->txd_retry_time != 0); -+ -+ /* remove txd from active list and add to list to call handlers */ -+ list_del (&txd->Link); -+ list_add_tail (&txd->Link, &txdList); -+ -+ /* remove and free of txdRail */ -+ txdRail->txd_retry_time = 0; -+ list_del (&txdRail->txd_retry_link); -+ -+ finalise_txd (txd, txdRail); -+ -+ } -+ } -+ spin_unlock_irqrestore (&xmtr->Lock, flags); -+ -+ while (! list_empty (&txdList)) -+ { -+ EP_TXD *txd = list_entry (txdList.next, EP_TXD, Link); -+ -+ list_del (&txd->Link); -+ -+ ep_xmtr_txd_stat (xmtr,txd); -+ -+ txd->Handler (txd, txd->Arg, EP_SUCCESS); -+ -+ FreeTxd (xmtr, txd); -+ } -+ -+ return nextRunTime; -+} -+ -+unsigned long -+ep4xmtr_retry (EP4_RAIL *rail, void *arg, unsigned long nextRunTime) -+{ -+ EP4_XMTR_RAIL *xmtrRail = (EP4_XMTR_RAIL *) arg; -+ ELAN4_DEV *dev = XMTR_TO_DEV(xmtrRail); -+ unsigned long flags; -+ -+ spin_lock_irqsave (&xmtrRail->xmtr_retrylock, flags); -+ while (! list_empty (&xmtrRail->xmtr_retrylist[EP4_TXD_LIST_RETRY])) -+ { -+ EP4_TXD_RAIL *txdRail = list_entry (xmtrRail->xmtr_retrylist[EP4_TXD_LIST_RETRY].next, EP4_TXD_RAIL, txd_retry_link); -+ -+ if (BEFORE (lbolt, txdRail->txd_retry_time)) -+ { -+ if (nextRunTime == 0 || AFTER (nextRunTime, txdRail->txd_retry_time)) -+ nextRunTime = txdRail->txd_retry_time; -+ -+ break; -+ } -+ -+ if (((int) (xmtrRail->xmtr_flowcnt - xmtrRail->xmtr_main->xmtr_flowcnt)) < EP4_XMTR_FLOWCNT) -+ { -+ txdRail->txd_retry_time = 0; -+ -+ list_del (&txdRail->txd_retry_link); -+ -+ /* re-initialise the envelope event */ -+ elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType), -+ E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS)); -+ -+ EPRINTF3 (DBG_RETRY, "%s: ep4xmtr_retry: re-issue envelope packet to %d for txdRail=%p\n", -+ rail->r_generic.Name, txdRail->txd_generic.Txd->Envelope.NodeId, txdRail); -+ -+ txdRail->txd_main->txd_env = EP4_STATE_ACTIVE; -+ -+ issue_envelope_packet (xmtrRail, txdRail); -+ } -+ else -+ { -+ EPRINTF2 (DBG_RETRY, "%s: ep4xmtr_retry: cannot re-issue envelope packet to %d\n", rail->r_generic.Name, txdRail->txd_generic.Txd->Envelope.NodeId); -+ -+ if (nextRunTime == 0 || AFTER (nextRunTime, txdRail->txd_retry_time)) -+ nextRunTime = txdRail->txd_retry_time; -+ -+ break; -+ } -+ } -+ spin_unlock_irqrestore (&xmtrRail->xmtr_retrylock, flags); -+ -+ return nextRunTime; -+} -+ -+void -+ep4xmtr_add_rail (EP_XMTR *xmtr, EP_COMMS_RAIL *commsRail) -+{ -+ EP4_RAIL *rail = (EP4_RAIL *) commsRail->Rail; -+ EP_COMMS_SUBSYS *subsys = xmtr->Subsys; -+ EP4_XMTR_RAIL *xmtrRail; -+ unsigned long flags; -+ int i; -+ -+ KMEM_ZALLOC (xmtrRail, EP4_XMTR_RAIL *, sizeof (EP4_XMTR_RAIL), 1); -+ -+ spin_lock_init (&xmtrRail->xmtr_freelock); -+ kcondvar_init (&xmtrRail->xmtr_freesleep); -+ INIT_LIST_HEAD (&xmtrRail->xmtr_freelist); -+ INIT_LIST_HEAD (&xmtrRail->xmtr_blocklist); -+ -+ for (i = 0; i < EP4_TXD_NUM_LISTS; i++) -+ INIT_LIST_HEAD (&xmtrRail->xmtr_retrylist[i]); -+ spin_lock_init (&xmtrRail->xmtr_retrylock); -+ -+ xmtrRail->xmtr_generic.CommsRail = commsRail; -+ xmtrRail->xmtr_generic.Xmtr = xmtr; -+ -+ xmtrRail->xmtr_main = ep_alloc_main (&rail->r_generic, sizeof (EP4_XMTR_RAIL_MAIN), 0, &xmtrRail->xmtr_main_addr); -+ xmtrRail->xmtr_cq = elan4_alloccq (&rail->r_ctxt, EP4_XMTR_CQSIZE, CQ_EnableAllBits, CQ_Priority); -+ -+ xmtrRail->xmtr_retryops.op_func = ep4xmtr_retry; -+ xmtrRail->xmtr_retryops.op_arg = xmtrRail; -+ -+ ep4_add_retry_ops (rail, &xmtrRail->xmtr_retryops); -+ -+ ep4_register_intcookie (rail, &xmtrRail->xmtr_intcookie, xmtrRail->xmtr_main_addr, -+ poll_interrupt, xmtrRail); -+ -+ spin_lock_irqsave (&xmtr->Lock, flags); -+ -+ xmtr->Rails[commsRail->Rail->Number] = &xmtrRail->xmtr_generic; -+ xmtr->RailMask |= EP_RAIL2RAILMASK(commsRail->Rail->Number); -+ -+ spin_unlock_irqrestore (&xmtr->Lock, flags); -+ -+ ep_kthread_schedule (&subsys->Thread, lbolt); -+ -+ ep_procfs_xmtr_add_rail(&(xmtrRail->xmtr_generic)); -+} -+ -+void -+ep4xmtr_del_rail (EP_XMTR *xmtr, EP_COMMS_RAIL *commsRail) -+{ -+ EP4_RAIL *rail = (EP4_RAIL *) commsRail->Rail; -+ EP4_XMTR_RAIL *xmtrRail = (EP4_XMTR_RAIL *) xmtr->Rails[commsRail->Rail->Number]; -+ unsigned long flags; -+ -+ /* rail mask set as not usable */ -+ spin_lock_irqsave (&xmtr->Lock, flags); -+ xmtr->RailMask &= ~EP_RAIL2RAILMASK (rail->r_generic.Number); -+ spin_unlock_irqrestore (&xmtr->Lock, flags); -+ -+ ep_procfs_xmtr_del_rail(&(xmtrRail->xmtr_generic)); -+ -+ /* wait for all txd's for this rail to become free */ -+ spin_lock_irqsave (&xmtrRail->xmtr_freelock, flags); -+ while (xmtrRail->xmtr_freecount != xmtrRail->xmtr_totalcount) -+ { -+ xmtrRail->xmtr_freewaiting++; -+ kcondvar_wait (&xmtrRail->xmtr_freesleep, &xmtrRail->xmtr_freelock, &flags); -+ } -+ spin_unlock_irqrestore (&xmtrRail->xmtr_freelock, flags); -+ -+ spin_lock_irqsave (&xmtr->Lock, flags); -+ xmtr->Rails[commsRail->Rail->Number] = NULL; -+ spin_unlock_irqrestore (&xmtr->Lock, flags); -+ -+ /* all the txd's accociated with DescBlocks must be in the freelist */ -+ ASSERT (xmtrRail->xmtr_totalcount == xmtrRail->xmtr_freecount); -+ -+ /* run through the DescBlockList deleting them */ -+ while (!list_empty (&xmtrRail->xmtr_blocklist)) -+ free_txd_block (xmtrRail, list_entry(xmtrRail->xmtr_blocklist.next, EP4_TXD_RAIL_BLOCK , blk_link)); -+ -+ /* it had better be empty after that */ -+ ASSERT ((xmtrRail->xmtr_freecount == 0) && (xmtrRail->xmtr_totalcount == 0)); -+ -+ ep4_deregister_intcookie (rail, &xmtrRail->xmtr_intcookie); -+ -+ ep4_remove_retry_ops (rail, &xmtrRail->xmtr_retryops); -+ -+ elan4_freecq (&rail->r_ctxt, xmtrRail->xmtr_cq); -+ ep_free_main (&rail->r_generic, xmtrRail->xmtr_main_addr, sizeof (EP4_XMTR_RAIL_MAIN)); -+ -+ spin_lock_destroy (&xmtrRail->xmtr_retrylock); -+ -+ spin_lock_destroy (&xmtrRail->xmtr_freelock); -+ kcondvar_destroy (&xmtrRail->xmtr_freesleep); -+ -+ KMEM_FREE (xmtrRail, sizeof (EP4_XMTR_RAIL)); -+} -+ -+void -+ep4xmtr_display_xmtr (DisplayInfo *di, EP_XMTR_RAIL *x) -+{ -+ EP4_XMTR_RAIL *xmtrRail = (EP4_XMTR_RAIL *) x; -+ EP4_RAIL *rail = XMTR_TO_RAIL (xmtrRail); -+ unsigned int freeCount = 0; -+ unsigned int pollCount = 0; -+ unsigned int stalledCount = 0; -+ unsigned int retryCount = 0; -+ struct list_head *el; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&xmtrRail->xmtr_freelock, flags); -+ list_for_each (el, &xmtrRail->xmtr_freelist) -+ freeCount++; -+ spin_unlock_irqrestore (&xmtrRail->xmtr_freelock, flags); -+ -+ spin_lock_irqsave (&xmtrRail->xmtr_retrylock, flags); -+ list_for_each (el, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_POLL]) -+ pollCount++; -+ list_for_each (el, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_STALLED]) -+ stalledCount++; -+ list_for_each (el, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_RETRY]) -+ retryCount++; -+ spin_unlock_irqrestore (&xmtrRail->xmtr_retrylock, flags); -+ -+ (di->func)(di->arg, " rail=%d free=%d total=%d (%d) (retry %d,%d,%d)\n", -+ rail->r_generic.Number, xmtrRail->xmtr_freecount, xmtrRail->xmtr_totalcount, -+ freeCount, pollCount, stalledCount, retryCount); -+ (di->func)(di->arg, " cq %d flowcnt %lld,%lld\n", elan4_cq2num (xmtrRail->xmtr_cq), xmtrRail->xmtr_flowcnt, xmtrRail->xmtr_main->xmtr_flowcnt); -+} -+ -+void -+ep4xmtr_display_txd (DisplayInfo *di, EP_TXD_RAIL *t) -+{ -+ EP4_TXD_RAIL *txdRail = (EP4_TXD_RAIL *) t; -+ EP4_XMTR_RAIL *xmtrRail = TXD_TO_XMTR(txdRail); -+ EP4_TXD_RAIL_MAIN *txdMain = txdRail->txd_main; -+ sdramaddr_t txdElan = txdRail->txd_elan; -+ EP4_RAIL *rail = XMTR_TO_RAIL (xmtrRail); -+ ELAN4_DEV *dev = XMTR_TO_DEV (xmtrRail); -+ char *list = ""; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&xmtrRail->xmtr_retrylock, flags); -+ if (txdRail->txd_retry_time) -+ { -+ if (on_list (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_POLL])) -+ list = " poll"; -+ else if (on_list (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_STALLED])) -+ list = " stalled"; -+ else if (on_list (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_RETRY])) -+ list = " retry"; -+ else -+ list = " ERROR"; -+ } -+ spin_unlock_irqrestore (&xmtrRail->xmtr_retrylock, flags); -+ -+ (di->func)(di->arg, " Rail %d txd %p elan %lx (%x) main %p (%x) cookie <%lld%s%s%s%s> ecq %d %s\n", rail->r_generic.Number, -+ txdRail, txdRail->txd_elan, txdRail->txd_elan_addr, txdRail->txd_main, txdRail->txd_main_addr, -+ EP4_COOKIE_STRING(txdRail->txd_cookie), elan4_cq2num (txdRail->txd_ecq->ecq_cq), list); -+ -+ (di->func)(di->arg, " env %016llx %016llx %016llx -> %016llx\n", -+ elan4_sdram_readq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType)), -+ elan4_sdram_readq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_Params[0])), -+ elan4_sdram_readq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_Params[1])), -+ txdMain->txd_env); -+ (di->func)(di->arg, " data %016llx %016llx %016llx -> %016llx\n", -+ elan4_sdram_readq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_CountAndType)), -+ elan4_sdram_readq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_Params[0])), -+ elan4_sdram_readq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_Params[1])), -+ txdMain->txd_data); -+ (di->func)(di->arg, " done %016llx %016llx %016llx -> %016llx\n", -+ elan4_sdram_readq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CountAndType)), -+ elan4_sdram_readq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_Params[0])), -+ elan4_sdram_readq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_Params[1])), -+ txdMain->txd_done); -+} -+ -+int -+ep4xmtr_check_txd_state (EP_TXD *txd) -+{ -+ EP4_TXD_RAIL *txdRail = (EP4_TXD_RAIL *) txd->TxdRail; -+ EP4_XMTR_RAIL *xmtrRail = (EP4_XMTR_RAIL *) txdRail->txd_generic.XmtrRail; -+ ELAN4_DEV *dev = XMTR_TO_DEV (xmtrRail); -+ unsigned long flags; -+ -+ if (txdRail->txd_main->txd_env == EP4_STATE_FINISHED) -+ return 0; -+ -+ EP4_TXD_ASSERT (txdRail, txdRail->txd_retry_time != 0); -+ -+ spin_lock_irqsave (&xmtrRail->xmtr_retrylock, flags); -+ EP4_TXD_ASSERT (txdRail, on_list (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_STALLED]) == 1); -+ -+ list_del (&txdRail->txd_retry_link); -+ txdRail->txd_retry_time = 0; -+ spin_unlock_irqrestore (&xmtrRail->xmtr_retrylock, flags); -+ -+ /* re-initialise the envelope event */ -+ elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType), -+ E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS)); -+ -+ unbind_txd_rail (txd, txdRail); -+ -+ terminate_txd_rail (xmtrRail, txdRail); -+ free_txd_rail (xmtrRail, txdRail); -+ -+ return 1; -+} -+ -+void -+ep4xmtr_fillout_rail_stats(EP_XMTR_RAIL *xmtr_rail, char *str) { -+ /* no stats here yet */ -+ /* EP4_XMTR_RAIL * ep4xmtr_rail = (EP4_XMTR_RAIL *) xmtr_rail; */ -+} -+ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/ep/ep_procfs.c linux-2.6.9/drivers/net/qsnet/ep/ep_procfs.c ---- clean/drivers/net/qsnet/ep/ep_procfs.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/ep_procfs.c 2005-03-30 09:06:34.000000000 -0500 -@@ -0,0 +1,331 @@ -+ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: ep_procfs.c,v 1.10 2005/03/30 14:06:34 mike Exp $" -+/* $Source: /cvs/master/quadrics/epmod/ep_procfs.c,v $*/ -+ -+#include -+ -+#include -+#include -+#include -+ -+#include "cm.h" -+#include "debug.h" -+#include "conf_linux.h" -+ -+#include "kcomm_vp.h" -+#include "kcomm_elan4.h" -+#include "epcomms_elan4.h" -+ -+#include -+ -+struct proc_dir_entry *ep_procfs_xmtr_root; -+struct proc_dir_entry *ep_procfs_rcvr_root; -+ -+static int -+ep_proc_open (struct inode *inode, struct file *file) -+{ -+ QSNET_PROC_PRIVATE *pr; -+ int pages = 4; -+ -+ if ((pr = kmalloc (sizeof (QSNET_PROC_PRIVATE), GFP_KERNEL)) == NULL) -+ return (-ENOMEM); -+ -+ do { -+ pr->pr_data_len = PAGESIZE * pages; -+ -+ KMEM_ZALLOC (pr->pr_data, char *, pr->pr_data_len, 1); -+ if (pr->pr_data == NULL) -+ { -+ pr->pr_len = sprintf (pr->pr_data, "Out of Memory\n"); -+ break; -+ } -+ -+ pr->pr_off = 0; -+ pr->pr_len = 0; -+ pr->pr_data[0] = 0; -+ -+ pr->pr_di.func = qsnet_proc_character_fill; -+ pr->pr_di.arg = (long)pr; -+ -+ if (!strcmp("debug_xmtr", file->f_dentry->d_iname)) -+ { -+ EP_XMTR *xmtr = (EP_XMTR *)(PDE(inode)->data); -+ ep_display_xmtr (&pr->pr_di, xmtr); -+ } -+ -+ if (!strcmp("debug_rcvr", file->f_dentry->d_iname)) -+ { -+ EP_RCVR *rcvr = (EP_RCVR *)(PDE(inode)->data); -+ ep_display_rcvr (&pr->pr_di, rcvr, 0); -+ } -+ -+ if (!strcmp("debug_full", file->f_dentry->d_iname)) -+ { -+ EP_RCVR *rcvr = (EP_RCVR *)(PDE(inode)->data); -+ ep_display_rcvr (&pr->pr_di, rcvr, 1); -+ } -+ -+ if ( pr->pr_len < pr->pr_data_len) -+ break; /* we managed to get all the output into the buffer */ -+ -+ pages++; -+ KMEM_FREE ( pr->pr_data, pr->pr_data_len); -+ } while (1); -+ -+ -+ file->private_data = (void *) pr; -+ -+ MOD_INC_USE_COUNT; -+ return (0); -+} -+ -+struct file_operations ep_proc_operations = -+{ -+ read: qsnet_proc_read, -+ open: ep_proc_open, -+ release: qsnet_proc_release, -+}; -+ -+static int -+proc_read_rcvr_stats(char *page, char **start, off_t off, -+ int count, int *eof, void *data) -+{ -+ EP_RCVR *rcvr = (EP_RCVR *)data; -+ -+ if (rcvr == NULL) -+ sprintf(page,"proc_read_rcvr_stats rcvr=NULL\n"); -+ else { -+ page[0] = 0; -+ ep_rcvr_fillout_stats(rcvr,page); -+ } -+ return (qsnet_proc_calc_metrics (page, start, off, count, eof, strlen(page))); -+} -+ -+static int -+proc_read_rcvr_rail_stats(char *page, char **start, off_t off, -+ int count, int *eof, void *data) -+{ -+ EP_RCVR_RAIL *rcvr_rail = (EP_RCVR_RAIL *)data; -+ -+ if (rcvr_rail == NULL) { -+ strcpy(page,"proc_read_rcvr_rail_stats rcvr_rail=NULL"); -+ } else { -+ page[0] = 0; -+ ep_rcvr_rail_fillout_stats(rcvr_rail, page); -+ EP_RCVR_OP(rcvr_rail,FillOutRailStats)(rcvr_rail,page); -+ } -+ return (qsnet_proc_calc_metrics (page, start, off, count, eof, strlen(page))); -+} -+ -+void -+ep_procfs_rcvr_add(EP_RCVR *rcvr) -+{ -+ /* ep/rcvr/service_number/stats */ -+ /* ep/rcvr/service_number/debug_rcvr */ -+ /* ep/rcvr/service_number/debug_full */ -+ struct proc_dir_entry *p; -+ char str[32]; -+ -+ sprintf(str,"%d", rcvr->Service); -+ -+ rcvr->procfs_root = proc_mkdir (str, ep_procfs_rcvr_root); -+ -+ if ((p = create_proc_entry ("stats", 0, rcvr->procfs_root)) != NULL) -+ { -+ p->write_proc = NULL; -+ p->read_proc = proc_read_rcvr_stats; -+ p->data = rcvr; -+ p->owner = THIS_MODULE; -+ } -+ -+ if ((p = create_proc_entry ("debug_rcvr", 0, rcvr->procfs_root)) != NULL) -+ { -+ p->proc_fops = &ep_proc_operations; -+ p->owner = THIS_MODULE; -+ p->data = rcvr; -+ } -+ -+ if ((p = create_proc_entry ("debug_full", 0, rcvr->procfs_root)) != NULL) -+ { -+ p->proc_fops = &ep_proc_operations; -+ p->owner = THIS_MODULE; -+ p->data = rcvr; -+ } -+} -+ -+void -+ep_procfs_rcvr_del(EP_RCVR *rcvr) -+{ -+ char str[32]; -+ sprintf(str,"%d", rcvr->Service); -+ -+ remove_proc_entry ("debug_full", rcvr->procfs_root); -+ remove_proc_entry ("debug_rcvr", rcvr->procfs_root); -+ remove_proc_entry ("stats", rcvr->procfs_root); -+ -+ remove_proc_entry (str, ep_procfs_rcvr_root); -+} -+ -+void -+ep_procfs_rcvr_add_rail(EP_RCVR_RAIL *rcvrRail) -+{ -+ /* ep/rcvr/service_number/railN/stats */ -+ -+ struct proc_dir_entry *p; -+ char str[32]; -+ sprintf(str,"rail%d",rcvrRail->CommsRail->Rail->Number); -+ -+ rcvrRail->procfs_root = proc_mkdir (str, rcvrRail->Rcvr->procfs_root); -+ -+ if ((p = create_proc_entry ("stats", 0, rcvrRail->procfs_root)) != NULL) -+ { -+ p->write_proc = NULL; -+ p->read_proc = proc_read_rcvr_rail_stats; -+ p->data = rcvrRail; -+ p->owner = THIS_MODULE; -+ } -+} -+ -+void -+ep_procfs_rcvr_del_rail(EP_RCVR_RAIL *rcvrRail) -+{ -+ char str[32]; -+ sprintf(str,"rail%d",rcvrRail->CommsRail->Rail->Number); -+ -+ remove_proc_entry ("stats", rcvrRail->procfs_root); -+ -+ remove_proc_entry (str, rcvrRail->Rcvr->procfs_root); -+} -+ -+ -+ -+ -+static int -+proc_read_xmtr_stats(char *page, char **start, off_t off, -+ int count, int *eof, void *data) -+{ -+ EP_XMTR *xmtr = (EP_XMTR *)data; -+ -+ if (xmtr == NULL) -+ strcpy(page,"proc_read_xmtr_stats xmtr=NULL\n"); -+ else { -+ page[0] = 0; -+ ep_xmtr_fillout_stats(xmtr, page); -+ } -+ return (qsnet_proc_calc_metrics (page, start, off, count, eof, strlen(page))); -+} -+ -+static int -+proc_read_xmtr_rail_stats(char *page, char **start, off_t off, -+ int count, int *eof, void *data) -+{ -+ EP_XMTR_RAIL *xmtr_rail = (EP_XMTR_RAIL *)data; -+ -+ if (xmtr_rail == NULL) -+ strcpy(page,"proc_read_xmtr_rail_stats xmtr_rail=NULL\n"); -+ else { -+ page[0] = 0; -+ ep_xmtr_rail_fillout_stats(xmtr_rail, page); -+ EP_XMTR_OP(xmtr_rail,FillOutRailStats)(xmtr_rail,page); -+ } -+ return (qsnet_proc_calc_metrics (page, start, off, count, eof, strlen(page))); -+} -+ -+void -+ep_procfs_xmtr_add(EP_XMTR *xmtr) -+{ -+ /* ep/xmtr/service_number/stats */ -+ /* ep/xmtr/service_number/debug_xmtr */ -+ struct proc_dir_entry *p; -+ char str[32]; -+ -+ sprintf(str,"%llx", (unsigned long long) (unsigned long)xmtr); -+ -+ xmtr->procfs_root = proc_mkdir (str, ep_procfs_xmtr_root); -+ -+ if ((p = create_proc_entry ("stats", 0, xmtr->procfs_root)) != NULL) -+ { -+ p->write_proc = NULL; -+ p->read_proc = proc_read_xmtr_stats; -+ p->data = xmtr; -+ p->owner = THIS_MODULE; -+ } -+ -+ if ((p = create_proc_entry ("debug_xmtr", 0, xmtr->procfs_root)) != NULL) -+ { -+ p->proc_fops = &ep_proc_operations; -+ p->owner = THIS_MODULE; -+ p->data = xmtr; -+ } -+} -+ -+void -+ep_procfs_xmtr_del(EP_XMTR *xmtr) -+{ -+ char str[32]; -+ sprintf(str,"%llx", (unsigned long long) (unsigned long)xmtr); -+ -+ remove_proc_entry ("stats", xmtr->procfs_root); -+ remove_proc_entry ("debug_xmtr", xmtr->procfs_root); -+ -+ remove_proc_entry (str, ep_procfs_xmtr_root); -+} -+ -+void -+ep_procfs_xmtr_add_rail(EP_XMTR_RAIL *xmtrRail) -+{ -+ /* ep/xmtr/service_number/railN/stats */ -+ -+ struct proc_dir_entry *p; -+ char str[32]; -+ sprintf(str,"rail%d",xmtrRail->CommsRail->Rail->Number); -+ -+ xmtrRail->procfs_root = proc_mkdir (str, xmtrRail->Xmtr->procfs_root); -+ -+ if ((p = create_proc_entry ("stats", 0, xmtrRail->procfs_root)) != NULL) -+ { -+ p->write_proc = NULL; -+ p->read_proc = proc_read_xmtr_rail_stats; -+ p->data = xmtrRail; -+ p->owner = THIS_MODULE; -+ } -+} -+ -+void -+ep_procfs_xmtr_del_rail(EP_XMTR_RAIL *xmtrRail) -+{ -+ char str[32]; -+ sprintf(str,"rail%d",xmtrRail->CommsRail->Rail->Number); -+ -+ remove_proc_entry ("stats", xmtrRail->procfs_root); -+ -+ remove_proc_entry (str, xmtrRail->Xmtr->procfs_root); -+} -+ -+void -+ep_procfs_rcvr_xmtr_init(void) -+{ -+ ep_procfs_rcvr_root = proc_mkdir ("rcvr", ep_procfs_root); -+ ep_procfs_xmtr_root = proc_mkdir ("xmtr", ep_procfs_root); -+} -+ -+void -+ep_procfs_rcvr_xmtr_fini(void) -+{ -+ remove_proc_entry ("rcvr", ep_procfs_root); -+ remove_proc_entry ("xmtr", ep_procfs_root); -+} -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/ep/kalloc.c linux-2.6.9/drivers/net/qsnet/ep/kalloc.c ---- clean/drivers/net/qsnet/ep/kalloc.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/kalloc.c 2004-12-14 05:19:23.000000000 -0500 -@@ -0,0 +1,677 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: kalloc.c,v 1.19 2004/12/14 10:19:23 mike Exp $" -+/* $Source: /cvs/master/quadrics/epmod/kalloc.c,v $ */ -+ -+#include -+ -+#include -+ -+#include "debug.h" -+ -+static void -+HashInPool (EP_ALLOC *alloc, EP_POOL *pool) -+{ -+ int idx0 = HASH (pool->Handle.nmh_nmd.nmd_addr); -+ int idx1 = HASH (pool->Handle.nmh_nmd.nmd_addr + pool->Handle.nmh_nmd.nmd_len); -+ -+ list_add (&pool->HashBase, &alloc->HashBase[idx0]); -+ list_add (&pool->HashTop, &alloc->HashTop[idx1]); -+} -+ -+static void -+HashOutPool (EP_ALLOC *alloc, EP_POOL *pool) -+{ -+ list_del (&pool->HashBase); -+ list_del (&pool->HashTop); -+} -+ -+static EP_POOL * -+LookupPool (EP_ALLOC *alloc, EP_ADDR addr) -+{ -+ struct list_head *el; -+ -+ list_for_each (el, &alloc->HashBase[HASH(addr)]) { -+ EP_POOL *pool = list_entry (el, EP_POOL, HashBase); -+ -+ if (pool->Handle.nmh_nmd.nmd_addr <= addr && addr < (pool->Handle.nmh_nmd.nmd_addr + pool->Handle.nmh_nmd.nmd_len)) -+ return (pool); -+ } -+ -+ list_for_each (el, &alloc->HashTop[HASH(addr)]) { -+ EP_POOL *pool = list_entry (el, EP_POOL, HashTop); -+ -+ if (pool->Handle.nmh_nmd.nmd_addr <= addr && addr < (pool->Handle.nmh_nmd.nmd_addr + pool->Handle.nmh_nmd.nmd_len)) -+ return (pool); -+ } -+ -+ return (NULL); -+} -+ -+static EP_POOL * -+AllocatePool (EP_ALLOC *alloc, EP_ADDR addr, unsigned size, unsigned int perm, EP_ATTRIBUTE attr) -+{ -+ EP_ADDR base = 0; -+ EP_POOL *pool; -+ EP_RAIL *rail; -+ int i, railmask = 0; -+ struct list_head *el; -+ -+ KMEM_ZALLOC (pool, EP_POOL *, sizeof (EP_POOL), !(attr & EP_NO_SLEEP)); -+ -+ if (pool == NULL) -+ return (NULL); -+ -+ if (addr != 0) -+ base = addr; -+ else -+ { -+ for (i = LN2_MIN_SIZE; i <= LN2_MAX_SIZE; i ++) -+ { -+ KMEM_ZALLOC (pool->Bitmaps[i - LN2_MIN_SIZE], bitmap_t *, BT_BITOUL(1 << (LN2_MAX_SIZE-i)) * sizeof (bitmap_t), !(attr & EP_NO_SLEEP)); -+ if (pool->Bitmaps[i - LN2_MIN_SIZE] == NULL) -+ goto failed; -+ } -+ -+ if ((base = ep_rmalloc (alloc->ResourceMap, size, !(attr & EP_NO_SLEEP))) == 0) -+ goto failed; -+ } -+ -+ switch (alloc->Type) -+ { -+ case EP_ALLOC_TYPE_PRIVATE_SDRAM: -+ rail = alloc->Data.Private.Rail; -+ -+ if ((pool->Buffer.Sdram = rail->Operations.SdramAlloc (rail, base, size)) == 0) -+ goto failed; -+ -+ ep_perrail_sdram_map (rail, base, pool->Buffer.Sdram, size, perm, attr); -+ -+ pool->Handle.nmh_nmd.nmd_addr = base; -+ pool->Handle.nmh_nmd.nmd_len = size; -+ break; -+ -+ case EP_ALLOC_TYPE_PRIVATE_MAIN: -+ KMEM_GETPAGES(pool->Buffer.Ptr, unsigned long, btop (size), !(attr & EP_NO_SLEEP)); -+ if (pool->Buffer.Ptr == 0) -+ goto failed; -+ -+ ep_perrail_kaddr_map (alloc->Data.Private.Rail, base, pool->Buffer.Ptr, size, perm, attr); -+ -+ pool->Handle.nmh_nmd.nmd_addr = base; -+ pool->Handle.nmh_nmd.nmd_len = size; -+ break; -+ -+ case EP_ALLOC_TYPE_SHARED_MAIN: -+ KMEM_GETPAGES(pool->Buffer.Ptr, unsigned long, btop (size), !(attr & EP_NO_SLEEP)); -+ if (pool->Buffer.Ptr == 0) -+ goto failed; -+ -+ list_for_each (el, &alloc->Data.Shared.Rails) { -+ EP_RAIL *rail = list_entry (el, EP_RAIL_ENTRY, Link)->Rail; -+ -+ ep_perrail_kaddr_map (rail, base, pool->Buffer.Ptr, size, perm, attr); -+ -+ railmask |= (1 << rail->Number); -+ } -+ pool->Handle.nmh_nmd.nmd_addr = base; -+ pool->Handle.nmh_nmd.nmd_len = size; -+ pool->Handle.nmh_nmd.nmd_attr = EP_NMD_ATTR (alloc->Data.Shared.System->Position.pos_nodeid, railmask); -+ -+ ep_nmh_insert (&alloc->Data.Shared.System->MappingTable, &pool->Handle); -+ break; -+ -+ default: -+ goto failed; -+ } -+ -+ return (pool); -+ -+ failed: -+ if (addr == 0 && base) -+ ep_rmfree (alloc->ResourceMap, size, base); -+ -+ for (i = LN2_MIN_SIZE; i <= LN2_MAX_SIZE; i ++) -+ if (pool->Bitmaps[i - LN2_MIN_SIZE] != NULL) -+ KMEM_FREE (pool->Bitmaps[i - LN2_MIN_SIZE], BT_BITOUL(1 << (LN2_MAX_SIZE - i)) * sizeof (bitmap_t)); -+ -+ KMEM_FREE (pool, sizeof (EP_POOL)); -+ return (NULL); -+} -+ -+static void -+FreePool (EP_ALLOC *alloc, EP_POOL *pool) -+{ -+ struct list_head *el; -+ int i; -+ -+ switch (alloc->Type) -+ { -+ case EP_ALLOC_TYPE_PRIVATE_SDRAM: -+ ep_perrail_unmap (alloc->Data.Private.Rail, pool->Handle.nmh_nmd.nmd_addr, pool->Handle.nmh_nmd.nmd_len); -+ -+ alloc->Data.Private.Rail->Operations.SdramFree (alloc->Data.Private.Rail, pool->Buffer.Sdram, pool->Handle.nmh_nmd.nmd_len); -+ break; -+ -+ case EP_ALLOC_TYPE_PRIVATE_MAIN: -+ ep_perrail_unmap (alloc->Data.Private.Rail, pool->Handle.nmh_nmd.nmd_addr, pool->Handle.nmh_nmd.nmd_len); -+ -+ KMEM_FREEPAGES (pool->Buffer.Ptr, btop (pool->Handle.nmh_nmd.nmd_len)); -+ break; -+ -+ case EP_ALLOC_TYPE_SHARED_MAIN: -+ ep_nmh_remove (&alloc->Data.Shared.System->MappingTable, &pool->Handle); -+ -+ list_for_each (el, &alloc->Data.Shared.Rails) { -+ EP_RAIL *rail = list_entry (el, EP_RAIL_ENTRY, Link)->Rail; -+ -+ ep_perrail_unmap (rail, pool->Handle.nmh_nmd.nmd_addr, pool->Handle.nmh_nmd.nmd_len); -+ } -+ -+ KMEM_FREEPAGES (pool->Buffer.Ptr, btop (pool->Handle.nmh_nmd.nmd_len)); -+ break; -+ } -+ -+ if (pool->Bitmaps[0]) -+ { -+ ep_rmfree (alloc->ResourceMap, pool->Handle.nmh_nmd.nmd_len, pool->Handle.nmh_nmd.nmd_addr); -+ -+ for (i = LN2_MIN_SIZE; i <= LN2_MAX_SIZE; i ++) -+ KMEM_FREE (pool->Bitmaps[i - LN2_MIN_SIZE], BT_BITOUL(1 << (LN2_MAX_SIZE - i)) * sizeof (bitmap_t)); -+ } -+ -+ KMEM_FREE (pool, sizeof (EP_POOL)); -+} -+ -+static int -+AddRail (EP_ALLOC *alloc, EP_RAIL *rail) -+{ -+ struct list_head *el; -+ EP_RAIL_ENTRY *l; -+ unsigned long flags; -+ int i; -+ -+ ASSERT (alloc->Type == EP_ALLOC_TYPE_SHARED_MAIN); -+ -+ KMEM_ZALLOC (l, EP_RAIL_ENTRY *, sizeof (EP_RAIL_ENTRY), 1); -+ -+ if (l == NULL) -+ return (ENOMEM); -+ -+ l->Rail = rail; -+ -+ spin_lock_irqsave (&alloc->Lock, flags); -+ for (i = 0; i < NHASH; i++) -+ { -+ list_for_each (el, &alloc->HashBase[i]) { -+ EP_POOL *pool = list_entry (el, EP_POOL, HashBase); -+ -+ ep_perrail_kaddr_map (rail, pool->Handle.nmh_nmd.nmd_addr, pool->Buffer.Ptr, -+ pool->Handle.nmh_nmd.nmd_len, EP_PERM_WRITE, EP_NO_SLEEP); -+ -+ pool->Handle.nmh_nmd.nmd_attr |= EP_NMD_ATTR (0, 1 << rail->Number); -+ } -+ } -+ -+ list_add (&l->Link, &alloc->Data.Shared.Rails); -+ -+ spin_unlock_irqrestore (&alloc->Lock, flags); -+ return (0); -+} -+ -+static void -+RemoveRail (EP_ALLOC *alloc, EP_RAIL *rail) -+{ -+ struct list_head *el; -+ unsigned long flags; -+ int i; -+ -+ spin_lock_irqsave (&alloc->Lock, flags); -+ for (i = 0; i < NHASH; i++) -+ { -+ list_for_each (el, &alloc->HashBase[i]) { -+ EP_POOL *pool = list_entry (el, EP_POOL, HashBase); -+ -+ ep_perrail_unmap (rail, pool->Handle.nmh_nmd.nmd_addr, pool->Handle.nmh_nmd.nmd_len); -+ -+ pool->Handle.nmh_nmd.nmd_attr &= ~EP_NMD_ATTR (0, 1 << rail->Number); -+ } -+ } -+ -+ list_for_each (el, &alloc->Data.Shared.Rails) { -+ EP_RAIL_ENTRY *tmp = list_entry (el, EP_RAIL_ENTRY, Link); -+ if (tmp->Rail == rail) -+ { -+ list_del (el); -+ KMEM_FREE(tmp, sizeof (EP_RAIL_ENTRY)); -+ break; -+ } -+ } -+ -+ spin_unlock_irqrestore (&alloc->Lock, flags); -+} -+ -+static EP_POOL * -+AllocateBlock (EP_ALLOC *alloc, unsigned size, EP_ATTRIBUTE attr, int *offset) -+{ -+ int block, j, k; -+ unsigned long flags; -+ EP_POOL *pool; -+ -+ -+ if (size > MAX_SIZE) -+ { -+ if ((attr & EP_NO_ALLOC) || (pool = AllocatePool (alloc, 0, size, alloc->Perm, attr)) == NULL) -+ return (NULL); -+ -+ spin_lock_irqsave (&alloc->Lock, flags); -+ HashInPool (alloc, pool); -+ spin_unlock_irqrestore (&alloc->Lock, flags); -+ -+ *offset = 0; -+ -+ return pool; -+ } -+ -+ spin_lock_irqsave (&alloc->Lock, flags); -+ -+ /* Round up size to next power of 2 */ -+ for (k = LN2_MIN_SIZE; (1 << k) < size; k++) -+ ; -+ -+ /* k now has ln2 of the size to allocate. */ -+ /* find the free list with the smallest block we can use*/ -+ for (j = k; j <= LN2_MAX_SIZE && list_empty (&alloc->Freelists[j - LN2_MIN_SIZE]); j++) -+ ; -+ -+ /* j has ln2 of the smallest size block we can use */ -+ if (j < LN2_MAX_SIZE) -+ { -+ int nbits = 1 << (LN2_MAX_SIZE-j); -+ -+ pool = list_entry (alloc->Freelists[j - LN2_MIN_SIZE].next, EP_POOL, Link[j - LN2_MIN_SIZE]); -+ block = (bt_lowbit (pool->Bitmaps[j - LN2_MIN_SIZE], nbits) << j); -+ -+ BT_CLEAR (pool->Bitmaps[j - LN2_MIN_SIZE], block >> j); -+ -+ if (bt_lowbit (pool->Bitmaps[j - LN2_MIN_SIZE], nbits) == -1) -+ list_del (&pool->Link[j - LN2_MIN_SIZE]); -+ } -+ else -+ { -+ spin_unlock_irqrestore (&alloc->Lock, flags); -+ -+ if ((attr & EP_NO_ALLOC) || (pool = AllocatePool (alloc, 0, MAX_SIZE, alloc->Perm, attr)) == NULL) -+ return (NULL); -+ -+ block = 0; -+ j = LN2_MAX_SIZE; -+ -+ spin_lock_irqsave (&alloc->Lock, flags); -+ -+ HashInPool (alloc, pool); -+ } -+ -+ /* Split it until the buddies are the correct size, putting one -+ * buddy back on the free list and continuing to split the other */ -+ while (--j >= k) -+ { -+ list_add (&pool->Link[j - LN2_MIN_SIZE], &alloc->Freelists[j - LN2_MIN_SIZE]); -+ -+ BT_SET (pool->Bitmaps[j - LN2_MIN_SIZE], block >> j); -+ -+ block += (1 << j); -+ } -+ spin_unlock_irqrestore (&alloc->Lock, flags); -+ -+ *offset = block; -+ -+ return (pool); -+} -+ -+static void -+FreeBlock (EP_ALLOC *alloc, EP_ADDR addr, unsigned size) -+{ -+ EP_POOL *pool; -+ int k, block = 0; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&alloc->Lock, flags); -+ /* Round up size to next power of 2 */ -+ for (k = LN2_MIN_SIZE; (1 << k) < size; k++) -+ ; -+ -+ /* Find the pool containing this block */ -+ pool = LookupPool (alloc, addr); -+ -+ /* It must exist */ -+ ASSERT (pool != NULL); -+ -+ /* If we're freeing a subset of it, then update the bitmaps */ -+ if (size <= MAX_SIZE) -+ { -+ ASSERT (BT_TEST (pool->Bitmaps[k - LN2_MIN_SIZE], (addr - pool->Handle.nmh_nmd.nmd_addr) >> k) == 0); -+ -+ block = addr - pool->Handle.nmh_nmd.nmd_addr; -+ -+ while (k < LN2_MAX_SIZE && BT_TEST (pool->Bitmaps[k - LN2_MIN_SIZE], (block >> k) ^ 1)) -+ { -+ BT_CLEAR (pool->Bitmaps[k - LN2_MIN_SIZE], (block >> k) ^ 1); -+ -+ if (bt_lowbit (pool->Bitmaps[k - LN2_MIN_SIZE], (1 << (LN2_MAX_SIZE - k))) == -1) -+ list_del (&pool->Link[k - LN2_MIN_SIZE]); -+ -+ k++; -+ } -+ } -+ -+ if (k >= LN2_MAX_SIZE) -+ { -+ HashOutPool (alloc, pool); -+ spin_unlock_irqrestore (&alloc->Lock, flags); -+ -+ FreePool (alloc, pool); -+ } -+ else -+ { -+ if (bt_lowbit (pool->Bitmaps[k - LN2_MIN_SIZE], (1 << (LN2_MAX_SIZE - k))) == -1) -+ list_add (&pool->Link[k - LN2_MIN_SIZE], &alloc->Freelists[k - LN2_MIN_SIZE]); -+ -+ BT_SET (pool->Bitmaps[k - LN2_MIN_SIZE], block >> k); -+ -+ spin_unlock_irqrestore (&alloc->Lock, flags); -+ } -+} -+ -+static void -+InitialiseAllocator (EP_ALLOC *alloc, EP_ALLOC_TYPE type, unsigned int perm, EP_RMAP *rmap) -+{ -+ int i; -+ -+ spin_lock_init (&alloc->Lock); -+ -+ alloc->Type = type; -+ alloc->ResourceMap = rmap; -+ alloc->Perm = perm; -+ -+ for (i = 0; i < NHASH; i++) -+ { -+ (&alloc->HashBase[i])->next = &alloc->HashBase[i]; -+ -+ INIT_LIST_HEAD (&alloc->HashBase[i]); -+ INIT_LIST_HEAD (&alloc->HashTop[i]); -+ } -+ -+ for (i = 0; i < NUM_FREELISTS; i++) -+ INIT_LIST_HEAD (&alloc->Freelists[i]); -+} -+ -+static void -+DestroyAllocator (EP_ALLOC *alloc) -+{ -+ struct list_head *el, *next; -+ int i; -+ -+ for (i = 0; i < NHASH; i++) -+ { -+ list_for_each_safe (el, next, &alloc->HashBase[i]) { -+ EP_POOL *pool = list_entry (el, EP_POOL, HashBase); -+ -+ printk ("!!DestroyAllocator: pool=%p type=%d addr=%x len=%x\n", pool, alloc->Type, -+ pool->Handle.nmh_nmd.nmd_addr, pool->Handle.nmh_nmd.nmd_len); -+ -+ list_del (&pool->HashBase); -+ list_del (&pool->HashTop); -+ -+ // XXXX: FreePool (alloc, pool); -+ } -+ } -+ -+ spin_lock_destroy (&alloc->Lock); -+} -+ -+void -+ep_display_alloc (EP_ALLOC *alloc) -+{ -+ struct list_head *el; -+ int i; -+ int npools = 0; -+ int nbytes = 0; -+ int nfree = 0; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&alloc->Lock, flags); -+ -+ ep_debugf (DBG_DEBUG, "Kernel comms memory allocator %p type %d\n", alloc, alloc->Type); -+ for (i = 0; i < NHASH; i++) -+ { -+ list_for_each (el, &alloc->HashBase[i]) { -+ EP_POOL *pool = list_entry (el, EP_POOL, HashBase); -+ -+ ep_debugf (DBG_DEBUG, " POOL %4x: %p -> %x.%x\n", i, pool, pool->Handle.nmh_nmd.nmd_addr, -+ pool->Handle.nmh_nmd.nmd_addr + pool->Handle.nmh_nmd.nmd_len); -+ -+ npools++; -+ nbytes += pool->Handle.nmh_nmd.nmd_len; -+ } -+ } -+ -+ for (i = LN2_MIN_SIZE; i <= LN2_MAX_SIZE; i++) -+ { -+ int n = 0; -+ -+ list_for_each (el, &alloc->Freelists[i - LN2_MIN_SIZE]) { -+ EP_POOL *pool = list_entry (el, EP_POOL, Link[i - LN2_MIN_SIZE]); -+ int nbits = bt_nbits (pool->Bitmaps[i - LN2_MIN_SIZE], 1 << (LN2_MAX_SIZE - i)); -+ -+ n += nbits; -+ nfree += (nbits << i); -+ } -+ -+ if (n != 0) -+ ep_debugf (DBG_DEBUG, " SIZE %5d : num %d\n", (1 << i), n); -+ } -+ ep_debugf (DBG_DEBUG, "%d pools with %d bytes and %d bytes free\n", npools, nbytes, nfree); -+ -+ spin_unlock_irqrestore (&alloc->Lock, flags); -+} -+ -+/* per-rail allocators */ -+void -+ep_alloc_init (EP_RAIL *rail) -+{ -+ EP_RMAP *rmap = ep_rmallocmap (EP_PRIVATE_RMAP_SIZE, "PrivateMap", 1); -+ -+ ep_rmfree (rmap, EP_PRIVATE_TOP-EP_PRIVATE_BASE, EP_PRIVATE_BASE); -+ -+ InitialiseAllocator (&rail->ElanAllocator, EP_ALLOC_TYPE_PRIVATE_SDRAM, EP_PERM_ALL, rmap); -+ InitialiseAllocator (&rail->MainAllocator, EP_ALLOC_TYPE_PRIVATE_MAIN, EP_PERM_WRITE, rmap); -+ -+ rail->ElanAllocator.Data.Private.Rail = rail; -+ rail->MainAllocator.Data.Private.Rail = rail; -+} -+ -+void -+ep_alloc_fini (EP_RAIL *rail) -+{ -+ EP_RMAP *rmap = rail->ElanAllocator.ResourceMap; -+ -+ DestroyAllocator (&rail->ElanAllocator); -+ DestroyAllocator (&rail->MainAllocator); -+ -+ ep_rmfreemap (rmap); -+} -+ -+sdramaddr_t -+ep_alloc_memory_elan (EP_RAIL *rail, EP_ADDR addr, unsigned size, unsigned int perm, EP_ATTRIBUTE attr) -+{ -+ EP_POOL *pool = AllocatePool (&rail->ElanAllocator, addr, size, perm, attr); -+ unsigned long flags; -+ -+ if (pool == NULL) -+ return (0); -+ -+ spin_lock_irqsave (&rail->ElanAllocator.Lock, flags); -+ HashInPool (&rail->ElanAllocator, pool); -+ spin_unlock_irqrestore (&rail->ElanAllocator.Lock, flags); -+ -+ return (pool->Buffer.Sdram); -+} -+ -+void -+ep_free_memory_elan (EP_RAIL *rail, EP_ADDR addr) -+{ -+ EP_POOL *pool; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&rail->ElanAllocator.Lock, flags); -+ pool = LookupPool (&rail->ElanAllocator, addr); -+ -+ HashOutPool (&rail->ElanAllocator, pool); -+ spin_unlock_irqrestore (&rail->ElanAllocator.Lock, flags); -+ -+ FreePool (&rail->ElanAllocator, pool); -+} -+ -+sdramaddr_t -+ep_alloc_elan (EP_RAIL *rail, unsigned size, EP_ATTRIBUTE attr, EP_ADDR *addrp) -+{ -+ int offset; -+ EP_POOL *pool; -+ -+ if ((pool = AllocateBlock (&rail->ElanAllocator, size, attr, &offset)) == NULL) -+ return (0); -+ -+ *addrp = pool->Handle.nmh_nmd.nmd_addr + offset; -+ -+ return (pool->Buffer.Sdram + offset); -+} -+ -+void -+ep_free_elan (EP_RAIL *rail, EP_ADDR addr, unsigned size) -+{ -+ FreeBlock (&rail->ElanAllocator, addr, size); -+} -+ -+void * -+ep_alloc_main (EP_RAIL *rail, unsigned size, EP_ATTRIBUTE attr, EP_ADDR *addrp) -+{ -+ int offset; -+ EP_POOL *pool; -+ -+ if ((pool = AllocateBlock (&rail->MainAllocator, size, attr, &offset)) == NULL) -+ return (NULL); -+ -+ *addrp = pool->Handle.nmh_nmd.nmd_addr + offset; -+ -+ return ((void *) ((unsigned long) pool->Buffer.Ptr + offset)); -+} -+ -+void -+ep_free_main (EP_RAIL *rail, EP_ADDR addr, unsigned size) -+{ -+ FreeBlock (&rail->MainAllocator, addr, size); -+} -+ -+sdramaddr_t -+ep_elan2sdram (EP_RAIL *rail, EP_ADDR addr) -+{ -+ EP_POOL *pool; -+ sdramaddr_t res; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&rail->ElanAllocator.Lock, flags); -+ if ((pool = LookupPool (&rail->ElanAllocator, addr)) == NULL) -+ res = 0; -+ else -+ res = pool->Buffer.Sdram + (addr - pool->Handle.nmh_nmd.nmd_addr); -+ spin_unlock_irqrestore (&rail->ElanAllocator.Lock, flags); -+ -+ return (res); -+} -+ -+void * -+ep_elan2main (EP_RAIL *rail, EP_ADDR addr) -+{ -+ EP_POOL *pool; -+ void *res; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&rail->MainAllocator.Lock, flags); -+ if ((pool = LookupPool (&rail->MainAllocator, addr)) == NULL) -+ res = NULL; -+ else -+ res = (void *) ((unsigned long) pool->Buffer.Ptr + (addr - pool->Handle.nmh_nmd.nmd_addr)); -+ spin_unlock_irqrestore (&rail->MainAllocator.Lock, flags); -+ -+ return (res); -+} -+ -+/* shared allocators */ -+int -+ep_shared_alloc_add_rail (EP_SYS *sys, EP_RAIL *rail) -+{ -+ return (AddRail (&sys->Allocator, rail)); -+} -+ -+void -+ep_shared_alloc_remove_rail (EP_SYS *sys, EP_RAIL *rail) -+{ -+ RemoveRail (&sys->Allocator, rail); -+} -+ -+void -+ep_shared_alloc_init (EP_SYS *sys) -+{ -+ EP_RMAP *rmap = ep_rmallocmap (EP_SHARED_RMAP_SIZE, "shared_alloc_map", 1); -+ -+ ep_rmfree (rmap, EP_SHARED_TOP - EP_SHARED_BASE, EP_SHARED_BASE); -+ -+ InitialiseAllocator (&sys->Allocator, EP_ALLOC_TYPE_SHARED_MAIN, EP_PERM_WRITE, rmap); -+ -+ INIT_LIST_HEAD (&sys->Allocator.Data.Shared.Rails); -+ -+ sys->Allocator.Data.Shared.System = sys; -+} -+ -+void -+ep_shared_alloc_fini (EP_SYS *sys) -+{ -+ EP_RMAP *rmap = sys->Allocator.ResourceMap; -+ -+ DestroyAllocator (&sys->Allocator); -+ -+ ep_rmfreemap (rmap); -+} -+ -+void * -+ep_shared_alloc_main (EP_SYS *sys, unsigned size, EP_ATTRIBUTE attr, EP_NMD *nmd) -+{ -+ int offset; -+ EP_POOL *pool; -+ -+ if ((pool = AllocateBlock (&sys->Allocator, size, attr, &offset)) == NULL) -+ return (NULL); -+ -+ ep_nmd_subset (nmd, &pool->Handle.nmh_nmd, offset, size); -+ -+ return ((void *) ((unsigned long) pool->Buffer.Ptr + offset)); -+} -+ -+void -+ep_shared_free_main (EP_SYS *sys, EP_NMD *nmd) -+{ -+ FreeBlock (&sys->Allocator, nmd->nmd_addr, nmd->nmd_len); -+} -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/ep/kcomm.c linux-2.6.9/drivers/net/qsnet/ep/kcomm.c ---- clean/drivers/net/qsnet/ep/kcomm.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/kcomm.c 2005-07-20 08:01:34.000000000 -0400 -@@ -0,0 +1,1447 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: kcomm.c,v 1.61.2.2 2005/07/20 12:01:34 mike Exp $" -+/* $Source: /cvs/master/quadrics/epmod/kcomm.c,v $ */ -+ -+#include -+#include -+ -+#include -+#include -+#include -+ -+#include "cm.h" -+#include "debug.h" -+ -+int MaxSwitchLevels = 5; /* Max 1024 sized machine */ -+ -+static char *NodeStateNames[EP_NODE_NUM_STATES] = -+{ -+ "Disconnected", -+ "Connecting", -+ "Connnected", -+ "LeavingConnected", -+ "LocalPassivate", -+ "RemotePassivate", -+ "Passivated", -+ "Disconnecting", -+}; -+ -+static void -+ep_xid_cache_fill (EP_SYS *sys, EP_XID_CACHE *cache) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave (&sys->XidLock, flags); -+ -+ cache->Current = sys->XidNext; -+ cache->Last = cache->Current + EP_XID_CACHE_CHUNKS-1; -+ -+ sys->XidNext += EP_XID_CACHE_CHUNKS; -+ -+ spin_unlock_irqrestore (&sys->XidLock, flags); -+} -+ -+EP_XID -+ep_xid_cache_alloc (EP_SYS *sys, EP_XID_CACHE *cache) -+{ -+ EP_XID xid; -+ -+ if (cache->Current == cache->Last) -+ ep_xid_cache_fill (sys, cache); -+ -+ xid.Generation = sys->XidGeneration; -+ xid.Handle = cache->Handle; -+ xid.Unique = cache->Current++; -+ -+ return (xid); -+} -+ -+void -+ep_xid_cache_init (EP_SYS *sys, EP_XID_CACHE *cache) -+{ -+ /* Stall manager thread - it doesn't lock the XidCacheList */ -+ ep_kthread_stall (&sys->ManagerThread); -+ -+ cache->Handle = ++sys->XidHandle; -+ -+ list_add_tail (&cache->Link, &sys->XidCacheList); -+ -+ ep_kthread_resume (&sys->ManagerThread); -+} -+ -+void -+ep_xid_cache_destroy (EP_SYS *sys, EP_XID_CACHE *cache) -+{ -+ /* Stall manager thread - it doesn't lock the XidCacheList */ -+ ep_kthread_stall (&sys->ManagerThread); -+ -+ list_del (&cache->Link); -+ -+ ep_kthread_resume (&sys->ManagerThread); -+} -+ -+EP_XID_CACHE * -+ep_xid_cache_find (EP_SYS *sys, EP_XID xid) -+{ -+ struct list_head *el; -+ -+ list_for_each (el, &sys->XidCacheList) { -+ EP_XID_CACHE *cache = list_entry (el, EP_XID_CACHE, Link); -+ -+ if (sys->XidGeneration == xid.Generation && cache->Handle == xid.Handle) -+ return (cache); -+ } -+ -+ return (NULL); -+} -+ -+static int -+MsgBusy (EP_RAIL *rail, EP_OUTPUTQ *outputq, int slotNum) -+{ -+ switch (rail->Operations.OutputQState (rail, outputq, slotNum)) -+ { -+ case EP_OUTPUTQ_BUSY: /* still busy */ -+ return 1; -+ -+ case EP_OUTPUTQ_FAILED: /* NACKed */ -+ { -+#if defined(DEBUG_PRINTF) -+ EP_MANAGER_MSG *msg = rail->Operations.OutputQMsg (rail, outputq, slotNum); -+ -+ EPRINTF4 (DBG_MANAGER, "%s: kcomm msg %d type %d to %d failed\n", rail->Name, slotNum, msg->Hdr.Type, msg->Hdr.DestId); -+#endif -+ break; -+ } -+ -+ case EP_OUTPUTQ_FINISHED: /* anything else is finished */ -+ break; -+ } -+ -+ return 0; -+} -+ -+int -+ep_send_message (EP_RAIL *rail, int nodeId, int type, EP_XID xid, EP_MANAGER_MSG_BODY *body) -+{ -+ EP_SYS *sys = rail->System; -+ EP_NODE *node = &sys->Nodes[nodeId]; -+ int n = EP_MANAGER_OUTPUTQ_SLOTS; -+ int slotNum; -+ int rnum; -+ EP_RAIL *msgRail; -+ EP_MANAGER_MSG *msg; -+ unsigned long flags; -+ -+ ASSERT (! EP_XID_INVALID (xid)); -+ -+ if ((rnum = ep_pickRail (node->ConnectedRails)) >= 0) -+ msgRail = sys->Rails[rnum]; -+ else -+ { -+ if (EP_MANAGER_MSG_TYPE_CONNECTED(type)) -+ { -+ ep_debugf (DBG_MANAGER, "%s: no rails available, trying to send type %d to %d\n", rail->Name, type, nodeId); -+ return -EHOSTDOWN; -+ } -+ -+ ep_debugf (DBG_MANAGER, "%s: no rails connected to %d - using receiving rail\n", rail->Name, nodeId); -+ -+ msgRail = rail; -+ } -+ -+ -+ spin_lock_irqsave (&msgRail->ManagerOutputQLock, flags); -+ -+ slotNum = msgRail->ManagerOutputQNextSlot; -+ -+ while (n-- > 0 && MsgBusy (msgRail, msgRail->ManagerOutputQ, slotNum)) /* search for idle message buffer */ -+ { -+ if (++(msgRail->ManagerOutputQNextSlot) == EP_MANAGER_OUTPUTQ_SLOTS) -+ msgRail->ManagerOutputQNextSlot = 0; -+ -+ slotNum = msgRail->ManagerOutputQNextSlot; -+ } -+ -+ if (n == 0) /* all message buffers busy */ -+ { -+ spin_unlock_irqrestore (&msgRail->ManagerOutputQLock, flags); -+ -+ ep_debugf (DBG_MANAGER, "%s: all message buffers busy: trying to send type %d to %d\n", msgRail->Name, type, nodeId); -+ return -EBUSY; -+ } -+ -+ msg = msgRail->Operations.OutputQMsg (msgRail, msgRail->ManagerOutputQ, slotNum); -+ -+ EPRINTF7 (DBG_MANAGER, "%s: ep_send_message: type=%d nodeId=%d rail=%d xid=%08x.%08x.%016llx\n", -+ msgRail->Name, type, nodeId, rail->Number, xid.Generation, xid.Handle, (long long) xid.Unique); -+ -+ msg->Hdr.Version = EP_MANAGER_MSG_VERSION; -+ msg->Hdr.Type = type; -+ msg->Hdr.Rail = rail->Number; -+ msg->Hdr.NodeId = msgRail->Position.pos_nodeid; -+ msg->Hdr.DestId = nodeId; -+ msg->Hdr.Xid = xid; -+ msg->Hdr.Checksum = 0; -+ -+ if (body) bcopy (body, &msg->Body, sizeof (EP_MANAGER_MSG_BODY)); -+ -+ msg->Hdr.Checksum = CheckSum ((char *) msg, EP_MANAGER_MSG_SIZE); -+ -+ if (msgRail->Operations.OutputQSend (msgRail, msgRail->ManagerOutputQ, slotNum, EP_MANAGER_MSG_SIZE, -+ nodeId, EP_SYSTEMQ_MANAGER, EP_MANAGER_OUTPUTQ_RETRIES) < 0) -+ IncrStat (msgRail, SendMessageFailed); -+ -+ if (++(msgRail->ManagerOutputQNextSlot) == EP_MANAGER_OUTPUTQ_SLOTS) /* check this one last next time */ -+ msgRail->ManagerOutputQNextSlot = 0; -+ -+ spin_unlock_irqrestore (&msgRail->ManagerOutputQLock, flags); -+ -+ return 0; -+} -+ -+void -+ep_panic_node (EP_SYS *sys, int nodeId, unsigned char *reason) -+{ -+ EP_NODE *node = &sys->Nodes[nodeId]; -+ EP_MANAGER_MSG_BODY body; -+ EP_XID xid; -+ kcondvar_t sleep; -+ int rnum; -+ unsigned long flags; -+ -+ if (nodeId > sys->Position.pos_nodes) -+ return; -+ -+ strncpy (body.PanicReason, reason, sizeof (body.PanicReason)); -+ -+ kcondvar_init (&sleep); -+ spin_lock_irqsave (&sys->NodeLock, flags); -+ for (;;) -+ { -+ if (node->ConnectedRails == 0) -+ break; -+ -+ for (rnum = 0; rnum < EP_MAX_RAILS; rnum++) -+ if (node->ConnectedRails & (1 << rnum)) -+ break; -+ -+ xid = ep_xid_cache_alloc(sys, &sys->Rails[rnum]->XidCache); -+ -+ if (ep_send_message (sys->Rails[rnum], nodeId, EP_MANAGER_MSG_TYPE_REMOTE_PANIC, xid, &body) == 0) -+ break; -+ -+ if (kcondvar_timedwaitsig (&sleep, &sys->NodeLock, &flags, lbolt + hz) == CV_RET_SIGPENDING) -+ break; -+ } -+ spin_unlock_irqrestore (&sys->NodeLock, flags); -+ kcondvar_destroy (&sleep); -+} -+ -+static void -+ProcessNeterrRequest (EP_RAIL *msgRail, EP_RAIL *rail, EP_MANAGER_MSG *msg) -+{ -+ EPRINTF4 (DBG_NETWORK_ERROR, "%s: process neterr request - node %d cookies %llx %llx\n", rail->Name, msg->Hdr.NodeId, (long long)msg->Body.Cookies[0], (long long)msg->Body.Cookies[1]); -+ -+ rail->Operations.NeterrFixup (rail, msg->Hdr.NodeId, msg->Body.Cookies); -+ -+ ep_send_message (rail, msg->Hdr.NodeId, EP_MANAGER_MSG_TYPE_NETERR_RESPONSE, msg->Hdr.Xid, &msg->Body); -+} -+ -+ -+static void -+ProcessNeterrResponse (EP_RAIL *msgRail, EP_RAIL *rail, EP_MANAGER_MSG *msg) -+{ -+ EP_SYS *sys = rail->System; -+ EP_NODE_RAIL *nodeRail = &rail->Nodes[msg->Hdr.NodeId]; -+ unsigned long flags; -+ -+ EPRINTF4 (DBG_NETWORK_ERROR, "%s: process neterr response - node %d cookies %llx %llx\n", rail->Name, msg->Hdr.NodeId, (long long)msg->Body.Cookies[0], (long long)msg->Body.Cookies[1]); -+ -+ spin_lock_irqsave (&sys->NodeLock, flags); -+ if (EP_XIDS_MATCH (nodeRail->MsgXid, msg->Hdr.Xid)) -+ { -+ EP_INVALIDATE_XID (nodeRail->MsgXid); -+ -+ if (nodeRail->NetworkErrorCookies[0] != 0 && nodeRail->NetworkErrorCookies[0] == msg->Body.Cookies[0]) -+ nodeRail->NetworkErrorCookies[0] = 0; -+ -+ if (nodeRail->NetworkErrorCookies[1] != 0 && nodeRail->NetworkErrorCookies[1] == msg->Body.Cookies[1]) -+ nodeRail->NetworkErrorCookies[1] = 0; -+ -+ if (nodeRail->NetworkErrorCookies[0] == 0 && nodeRail->NetworkErrorCookies[1] == 0) -+ nodeRail->NetworkErrorState &= ~EP_NODE_NETERR_ATOMIC_PACKET; -+ } -+ spin_unlock_irqrestore (&sys->NodeLock, flags); -+} -+ -+ -+static void -+ProcessGetNodeState (EP_RAIL *msgRail, EP_RAIL *rail, EP_MANAGER_MSG *msg) -+{ -+ EP_NODE_RAIL *nodeRail = &rail->Nodes[msg->Hdr.NodeId]; -+ unsigned int service = msg->Body.Service; -+ -+ EPRINTF5 (DBG_MANAGER, "%s: ProcessGetNodeState: %s - %d %s%s\n", msgRail->Name, rail->Name, msg->Hdr.NodeId, -+ NodeStateNames[nodeRail->State], nodeRail->NetworkErrorState ? " (NetworkError)" : ""); -+ -+ msg->Body.NodeState.State = nodeRail->State; -+ msg->Body.NodeState.NetworkErrorState = nodeRail->NetworkErrorState; -+ msg->Body.NodeState.Railmask = ep_rcvr_railmask (rail->System, service); -+ -+ if (ep_send_message (rail, msg->Hdr.NodeId, EP_MANAGER_MSG_TYPE_GET_NODE_STATE_RESPONSE, msg->Hdr.Xid, &msg->Body) < 0) -+ printk ("%s: get node state for %s[%d] - failed to send response\n", msgRail->Name, rail->Name, msg->Hdr.NodeId); -+} -+ -+static void -+ProcessFlushRequest (EP_RAIL *msgRail, EP_RAIL *rail, EP_MANAGER_MSG *msg) -+{ -+ EP_NODE_RAIL *nodeRail = &rail->Nodes[msg->Hdr.NodeId]; -+ -+ EPRINTF5 (DBG_MANAGER, "%s: ProcessFlushRequest: %s - %d %s%s\n", msgRail->Name, rail->Name, msg->Hdr.NodeId, -+ NodeStateNames[nodeRail->State], nodeRail->NetworkErrorState ? " (NetworkError)" : ""); -+ -+ switch (nodeRail->State) -+ { -+ case EP_NODE_REMOTE_PASSIVATE: -+ nodeRail->NextRunTime = lbolt + MSGBUSY_RETRY_TIME; /* retransmit our flush request quickly */ -+ EPRINTF3 (DBG_MANAGER, "%s: ProcessFlushRequest: NextRunTime -> %lx (%lx)\n", rail->Name, nodeRail->NextRunTime, lbolt); -+ /* DROPTHROUGH */ -+ -+ case EP_NODE_PASSIVATED: -+ case EP_NODE_DISCONNECTED: -+ if (nodeRail->NetworkErrorState != 0) -+ break; -+ -+ if (ep_send_message (rail, msg->Hdr.NodeId, EP_MANAGER_MSG_TYPE_FLUSH_RESPONSE, msg->Hdr.Xid, NULL) < 0) -+ printk ("%s: flush request for %s[%d] - failed to send response\n", msgRail->Name, rail->Name, msg->Hdr.NodeId); -+ break; -+ -+ default: -+ EPRINTF4 (DBG_MANAGER, "%s: flush request for %s[%d] - node not in approriate state - %s\n", msgRail->Name, rail->Name, msg->Hdr.NodeId, NodeStateNames[nodeRail->State]); -+ break; -+ } -+} -+ -+static void -+ProcessFlushResponse (EP_RAIL *msgRail, EP_RAIL *rail, EP_MANAGER_MSG *msg) -+{ -+ EP_NODE_RAIL *nodeRail= &rail->Nodes[msg->Hdr.NodeId]; -+ -+ EPRINTF5 (DBG_MANAGER, "%s: ProcessFlushResponse: %s - %d %s%s\n", msgRail->Name, rail->Name, msg->Hdr.NodeId, -+ NodeStateNames[nodeRail->State], EP_XIDS_MATCH (nodeRail->MsgXid, msg->Hdr.Xid) ? " (XIDS match)" : ""); -+ -+ if (nodeRail->State == EP_NODE_REMOTE_PASSIVATE && EP_XIDS_MATCH(nodeRail->MsgXid, msg->Hdr.Xid)) -+ { -+ EP_INVALIDATE_XID (nodeRail->MsgXid); -+ -+ printk ("%s: flush response from %d - move to passivated list\n", rail->Name, msg->Hdr.NodeId); -+ list_del (&nodeRail->Link); -+ -+ /* Node is now passivated - attempt to failover messages */ -+ list_add_tail (&nodeRail->Link, &rail->PassivatedList); -+ nodeRail->State = EP_NODE_PASSIVATED; -+ } -+ else -+ { -+ printk ("%s: flush response from %d - not passivating (%s) or XIDs mismatch (%llx %llx)\n", rail->Name, -+ msg->Hdr.NodeId, NodeStateNames[nodeRail->State], (long long) nodeRail->MsgXid.Unique, (long long) msg->Hdr.Xid.Unique); -+ } -+} -+ -+static void -+ProcessMapNmdRequest (EP_RAIL *msgRail, EP_RAIL *rail, EP_MANAGER_MSG *msg) -+{ -+ EP_SYS *sys = rail->System; -+ EP_MAP_NMD_BODY *msgBody = &msg->Body.MapNmd; -+ int i; -+ -+ EPRINTF4 (DBG_MANAGER, "%s: Map NMD request from %d for %d NMDs to railmask %x\n", rail->Name, msg->Hdr.NodeId, msgBody->nFrags, msgBody->Railmask); -+ -+ for (i = 0; i < msgBody->nFrags; i++) -+ ep_nmd_map_rails (sys, &msgBody->Nmd[i], msgBody->Railmask); -+ -+ /* Must flush TLBs before responding */ -+ for (i = 0; i < EP_MAX_RAILS; i++) -+ if (sys->Rails[i] && sys->Rails[i]->TlbFlushRequired) -+ ep_perrail_dvma_sync (sys->Rails[i]); -+ -+ if (ep_send_message (rail, msg->Hdr.NodeId, EP_MANAGER_MSG_TYPE_MAP_NMD_RESPONSE, msg->Hdr.Xid, &msg->Body) < 0) -+ printk ("%s: map nmd request for %s[%d] - failed to send response\n", msgRail->Name, rail->Name, msg->Hdr.NodeId); -+} -+ -+static void -+ProcessXidMessage (EP_RAIL *msgRail, EP_MANAGER_MSG *msg, EP_XID xid) -+{ -+ EP_XID_CACHE *xidCache = ep_xid_cache_find (msgRail->System, xid); -+ -+ EPRINTF6 (DBG_MANAGER, "%s: ProcessXidMessage: XID=%08x.%0x8.%016llx -> %p(%p)\n", -+ msgRail->Name, xid.Generation, xid.Handle, (long long) xid.Unique, -+ xidCache ? xidCache->MessageHandler : 0, xidCache ? xidCache->Arg : 0); -+ -+ if (xidCache != NULL) -+ xidCache->MessageHandler (xidCache->Arg, msg); -+} -+ -+static void -+ProcessMessage (EP_RAIL *msgRail, void *arg, void *msgbuf) -+{ -+ EP_SYS *sys = msgRail->System; -+ EP_MANAGER_MSG *msg = (EP_MANAGER_MSG *) msgbuf; -+ uint16_t csum = msg->Hdr.Checksum; -+ EP_RAIL *rail; -+ -+ if (msg->Hdr.Version != EP_MANAGER_MSG_VERSION) -+ return; -+ -+ msg->Hdr.Checksum= 0; -+ if (CheckSum ((char *) msg, EP_MANAGER_MSG_SIZE) != csum) -+ { -+ printk ("%s: checksum failed on msg from %d (%d) (%x != %x) ?\n", msgRail->Name, msg->Hdr.NodeId, msg->Hdr.Type, csum, CheckSum ((char *) msg, EP_MANAGER_MSG_SIZE)); -+ return; -+ } -+ -+ if ((rail = sys->Rails[msg->Hdr.Rail]) == NULL) -+ { -+ printk ("%s: rail no longer exists for msg from %d?\n", msgRail->Name, msg->Hdr.NodeId); -+ return; -+ } -+ -+ EPRINTF7 (DBG_MANAGER, "%s: ProcessMessage (%s) type=%d node=%d XID=%08x.%0x8.%016llx\n", -+ msgRail->Name, rail->Name, msg->Hdr.Type, msg->Hdr.NodeId, -+ msg->Hdr.Xid.Generation, msg->Hdr.Xid.Handle, (long long)msg->Hdr.Xid.Unique); -+ -+ switch (msg->Hdr.Type) -+ { -+ case EP_MANAGER_MSG_TYPE_REMOTE_PANIC: -+ msg->Body.PanicReason[EP_PANIC_STRLEN] = '\0'; /* ensure string terminated */ -+ -+ printk ("%s: remote panic call from elan node %d - %s\n", msgRail->Name, msg->Hdr.NodeId, msg->Body.PanicReason); -+ panic ("ep: remote panic request\n"); -+ break; -+ -+ case EP_MANAGER_MSG_TYPE_NETERR_REQUEST: -+ ProcessNeterrRequest (msgRail, rail, msg); -+ break; -+ -+ case EP_MANAGER_MSG_TYPE_NETERR_RESPONSE: -+ ProcessNeterrResponse (msgRail, rail, msg); -+ break; -+ -+ case EP_MANAGER_MSG_TYPE_FLUSH_REQUEST: -+ ProcessFlushRequest (msgRail, rail, msg); -+ break; -+ -+ case EP_MANAGER_MSG_TYPE_FLUSH_RESPONSE: -+ ProcessFlushResponse (msgRail, rail, msg); -+ break; -+ -+ case EP_MANAGER_MSG_TYPE_MAP_NMD_REQUEST: -+ ProcessMapNmdRequest (msgRail, rail, msg); -+ break; -+ -+ case EP_MANAGER_MSG_TYPE_MAP_NMD_RESPONSE: -+ ProcessXidMessage (msgRail, msg, msg->Hdr.Xid); -+ break; -+ -+ case EP_MANAGER_MSG_TYPE_FAILOVER_REQUEST: -+ ProcessXidMessage (msgRail, msg, msg->Body.Failover.Xid); -+ break; -+ -+ case EP_MANAGER_MSG_TYPE_FAILOVER_RESPONSE: -+ ProcessXidMessage (msgRail, msg, msg->Hdr.Xid); -+ break; -+ -+ case EP_MANAGER_MSG_TYPE_GET_NODE_STATE: -+ ProcessGetNodeState (msgRail, rail, msg); -+ break; -+ -+ case EP_MANAGER_MSG_TYPE_GET_NODE_STATE_RESPONSE: -+ ProcessXidMessage (msgRail, msg, msg->Hdr.Xid); -+ break; -+ -+ default: -+ printk ("%s: Unknown message type %d from %d\n", msgRail->Name, msg->Hdr.Type, msg->Hdr.NodeId); -+ break; -+ } -+} -+ -+ -+static void -+ManagerQueueEvent (EP_RAIL *rail, void *arg) -+{ -+ ep_kthread_schedule ((EP_KTHREAD *) arg, lbolt); -+} -+ -+void -+UpdateConnectionState (EP_RAIL *rail, statemap_t *map) -+{ -+ EP_SYS *sys = rail->System; -+ bitmap_t seg; -+ int offset, nodeId; -+ unsigned long flags; -+ -+ while ((offset = statemap_findchange (map, &seg, 1)) >= 0) -+ { -+ for (nodeId = offset; nodeId < (offset + BT_NBIPUL) && nodeId < rail->Position.pos_nodes; nodeId++) -+ { -+ EP_NODE *node = &sys->Nodes[nodeId]; -+ EP_NODE_RAIL *nodeRail = &rail->Nodes[nodeId]; -+ -+ if (statemap_getbits (map, nodeId, 1)) -+ { -+ spin_lock_irqsave (&sys->NodeLock, flags); -+ -+ switch (nodeRail->State) -+ { -+ case EP_NODE_DISCONNECTED: -+ EPRINTF2 (DBG_MANAGER, "%s: Node %d -> Disconnected \n", rail->Name, nodeId); -+ break; -+ -+ case EP_NODE_CONNECTING: -+ EPRINTF2 (DBG_MANAGER, "%s: Node %d -> Connect\n", rail->Name, nodeId); -+ -+ /* load the route table entry *before* setting the state -+ * to connected, since DMA's can be initiated as soon as -+ * the node is marked as connected */ -+ rail->Operations.LoadNodeRoute (rail, nodeId); -+ -+ nodeRail->State = EP_NODE_CONNECTED; -+ -+ statemap_setbits (rail->NodeSet, nodeId, 1, 1); -+ if (statemap_getbits (sys->NodeSet, nodeId, 1) == 0) -+ statemap_setbits (sys->NodeSet, nodeId, 1, 1); -+ -+ /* Add to rails connected to this node */ -+ node->ConnectedRails |= (1 << rail->Number); -+ -+ /* Finally lower the per-node context filter */ -+ rail->Operations.LowerFilter (rail, nodeId); -+ break; -+ -+ case EP_NODE_LEAVING_CONNECTED: -+ EPRINTF2 (DBG_MANAGER, "%s: Node %d -> Local Passivate\n", rail->Name, nodeId); -+ -+ /* Raise the per-node context filter */ -+ rail->Operations.RaiseFilter (rail, nodeId); -+ -+ /* If it's resolving network errors it will be on the NodeNeterrList, -+ * remove if from this list before placing it on the LocalPassivateList -+ * as we'll resolve the network error later in RemotePassivate */ -+ if (nodeRail->NetworkErrorState) -+ list_del (&nodeRail->Link); -+ -+ list_add_tail (&nodeRail->Link, &rail->LocalPassivateList); -+ nodeRail->State = EP_NODE_LOCAL_PASSIVATE; -+ -+ /* Remove from rails connected to this node */ -+ node->ConnectedRails &= ~(1 << rail->Number); -+ break; -+ -+ default: -+ printk ("%s: Node %d - in NodeChangeMap with state %d\n", rail->Name, nodeId, nodeRail->State); -+ panic ("Node in NodeChangeMap with invalid state\n"); -+ break; -+ } -+ spin_unlock_irqrestore (&sys->NodeLock, flags); -+ } -+ } -+ } -+} -+ -+void -+ProgressNetworkError (EP_RAIL *rail, EP_NODE_RAIL *nodeRail) -+{ -+ EP_SYS *sys = rail->System; -+ int nodeId = nodeRail - rail->Nodes; -+ EP_MANAGER_MSG_BODY msg; -+ -+ ASSERT (nodeRail->State >= EP_NODE_CONNECTED && nodeRail->State <= EP_NODE_REMOTE_PASSIVATE); -+ -+ if (BEFORE (lbolt, nodeRail->NextRunTime)) -+ return; -+ -+ if (nodeRail->NetworkErrorState & EP_NODE_NETERR_DMA_PACKET) -+ nodeRail->NetworkErrorState &= ~EP_NODE_NETERR_DMA_PACKET; -+ -+ if (nodeRail->NetworkErrorState & EP_NODE_NETERR_ATOMIC_PACKET) -+ { -+ if (EP_XID_INVALID (nodeRail->MsgXid)) -+ nodeRail->MsgXid = ep_xid_cache_alloc (sys, &rail->XidCache); -+ -+ msg.Cookies[0] = nodeRail->NetworkErrorCookies[0]; -+ msg.Cookies[1] = nodeRail->NetworkErrorCookies[1]; -+ -+ EPRINTF4 (DBG_NETWORK_ERROR, "%s: progress neterr - node %d cookies %llx %llx\n", rail->Name, nodeId, (long long)msg.Cookies[0], (long long)msg.Cookies[1]); -+ -+ if (ep_send_message (rail, nodeId, EP_MANAGER_MSG_TYPE_NETERR_REQUEST, nodeRail->MsgXid, &msg) == 0) -+ nodeRail->NextRunTime = lbolt + MESSAGE_RETRY_TIME; -+ else -+ nodeRail->NextRunTime = lbolt + MSGBUSY_RETRY_TIME; -+ } -+} -+ -+long -+ProgressNodeLists (EP_RAIL *rail, long nextRunTime) -+{ -+ EP_SYS *sys = rail->System; -+ struct list_head *el, *nel; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&sys->NodeLock, flags); -+ list_for_each_safe (el, nel, &rail->NetworkErrorList) { -+ EP_NODE_RAIL *nodeRail = list_entry (el, EP_NODE_RAIL, Link); -+ int nodeId = nodeRail - rail->Nodes; -+ -+ ProgressNetworkError (rail, nodeRail); -+ -+ if (nodeRail->NetworkErrorState == 0) -+ { -+ EPRINTF2 (DBG_NETWORK_ERROR, "%s: lower context filter for node %d due to network error\n", rail->Name, nodeId); -+ -+ rail->Operations.LowerFilter (rail, nodeId); -+ -+ list_del (&nodeRail->Link); -+ continue; -+ } -+ -+ if (nextRunTime == 0 || AFTER (nextRunTime, nodeRail->NextRunTime)) -+ nextRunTime = nodeRail->NextRunTime; -+ } -+ spin_unlock_irqrestore (&sys->NodeLock, flags); -+ -+ if (! list_empty (&rail->LocalPassivateList)) -+ { -+ EPRINTF1 (DBG_MANAGER, "%s: Locally Passivating Nodes\n", rail->Name); -+ -+ /* We have disconnected from some nodes or have left ourselves -+ * flush through all communications and determine whether we -+ * need to perform rail failover */ -+ rail->Operations.FlushFilters (rail); -+ -+ ep_call_callbacks (rail, EP_CB_FLUSH_FILTERING, rail->NodeSet); -+ -+ rail->Operations.FlushQueues (rail); -+ -+ ep_call_callbacks (rail, EP_CB_FLUSH_FLUSHING, rail->NodeSet); -+ -+ while (! list_empty (&rail->LocalPassivateList)) -+ { -+ EP_NODE_RAIL *nodeRail = list_entry (rail->LocalPassivateList.next, EP_NODE_RAIL, Link); -+ int nodeId = nodeRail - rail->Nodes; -+ -+ list_del (&nodeRail->Link); -+ -+ rail->Operations.UnloadNodeRoute (rail, nodeId); -+ -+ if (nodeRail->NetworkErrorState == 0 && nodeRail->MessageState == 0) -+ { -+ EPRINTF2 (DBG_MANAGER, "%s: Node %d -> Disconnecting\n", rail->Name, nodeId); -+ -+ list_add_tail (&nodeRail->Link, &rail->DisconnectingList); -+ nodeRail->State = EP_NODE_DISCONNECTING; -+ } -+ else -+ { -+ EPRINTF2 (DBG_MANAGER, "%s: Node %d -> Remote Passivate\n", rail->Name, nodeId); -+ -+ list_add_tail (&nodeRail->Link, &rail->RemotePassivateList); -+ nodeRail->State = EP_NODE_REMOTE_PASSIVATE; -+ -+ if (nodeRail->NetworkErrorState == 0) -+ nodeRail->NextRunTime = lbolt; -+ } -+ } -+ -+ ep_call_callbacks (rail, EP_CB_PASSIVATED, rail->NodeSet); -+ } -+ -+ list_for_each_safe (el, nel, &rail->RemotePassivateList) { -+ EP_NODE_RAIL *nodeRail = list_entry (el, EP_NODE_RAIL, Link); -+ int nodeId = nodeRail - rail->Nodes; -+ EP_NODE *node = &sys->Nodes[nodeId]; -+ -+ if (node->ConnectedRails == 0) /* no rails connected to this node (anymore) */ -+ { -+ /* Remove from this list */ -+ list_del (&nodeRail->Link); -+ -+ EPRINTF2 (DBG_MANAGER, "%s: Node %d, no rails, Remote Passivate -> Disconnecting\n", rail->Name, nodeId); -+ -+ /* transition towards disconnected */ -+ list_add_tail (&nodeRail->Link, &rail->DisconnectingList); -+ nodeRail->State = EP_NODE_DISCONNECTING; -+ continue; -+ } -+ -+ EPRINTF6 (DBG_MANAGER, "%s: Node %d - %s NetworkErrorState=%x NextRunTime=%lx (%lx)\n", -+ rail->Name, nodeId, NodeStateNames[nodeRail->State], nodeRail->NetworkErrorState, -+ nodeRail->NextRunTime, nextRunTime); -+ -+ if (nodeRail->NetworkErrorState) -+ { -+ ProgressNetworkError (rail, nodeRail); -+ } -+ else if (! BEFORE (lbolt, nodeRail->NextRunTime)) -+ { -+ if (EP_XID_INVALID (nodeRail->MsgXid)) -+ nodeRail->MsgXid = ep_xid_cache_alloc (sys, &rail->XidCache); -+ -+ if (ep_send_message (rail, nodeId, EP_MANAGER_MSG_TYPE_FLUSH_REQUEST, nodeRail->MsgXid, NULL) == 0) -+ nodeRail->NextRunTime = lbolt + MESSAGE_RETRY_TIME; -+ else -+ nodeRail->NextRunTime = lbolt + MSGBUSY_RETRY_TIME; -+ } -+ -+ if (nextRunTime == 0 || AFTER (nextRunTime, nodeRail->NextRunTime)) -+ nextRunTime = nodeRail->NextRunTime; -+ } -+ -+ if (! list_empty (&rail->PassivatedList)) -+ { -+ ep_call_callbacks (rail, EP_CB_FAILOVER, rail->NodeSet); -+ -+ list_for_each_safe (el, nel, &rail->PassivatedList) { -+ EP_NODE_RAIL *nodeRail = list_entry (rail->PassivatedList.next, EP_NODE_RAIL, Link); -+ int nodeId = nodeRail - rail->Nodes; -+ EP_NODE *node = &sys->Nodes[nodeId]; -+ -+ ASSERT (nodeRail->NetworkErrorState == 0); -+ -+ if (node->ConnectedRails == 0) -+ { -+ /* Remove from this list */ -+ list_del (&nodeRail->Link); -+ -+ EPRINTF2 (DBG_MANAGER, "%s: Node %d, no rails, Passivated -> Disconnecting\n", rail->Name, nodeId); -+ -+ /* transition towards disconnected */ -+ list_add_tail (&nodeRail->Link, &rail->DisconnectingList); -+ nodeRail->State = EP_NODE_DISCONNECTING; -+ continue; -+ } -+ -+ EPRINTF6 (DBG_MANAGER, "%s: Node %d - %s NetworkErrorState=%x NextRunTime=%lx (%lx)\n", -+ rail->Name, nodeId, NodeStateNames[nodeRail->State], nodeRail->NetworkErrorState, -+ nodeRail->NextRunTime, nextRunTime); -+ -+ if (nodeRail->MessageState == 0) -+ { -+ EPRINTF2 (DBG_MANAGER, "%s: Node %d, no messages, Passivated -> Disconnecting\n", rail->Name,nodeId); -+ -+ list_del (&nodeRail->Link); -+ list_add_tail (&nodeRail->Link, &rail->DisconnectingList); -+ nodeRail->State = EP_NODE_DISCONNECTING; -+ continue; -+ } -+ -+ nodeRail->MessageState = 0; -+ nodeRail->NextRunTime = lbolt + FAILOVER_RETRY_TIME; -+ -+ if (nextRunTime == 0 || AFTER (nextRunTime, nodeRail->NextRunTime)) -+ nextRunTime = nodeRail->NextRunTime; -+ } -+ } -+ -+ if (! list_empty (&rail->DisconnectingList)) -+ { -+ ep_call_callbacks (rail, EP_CB_DISCONNECTING, rail->NodeSet); -+ -+ while (! list_empty (&rail->DisconnectingList)) -+ { -+ EP_NODE_RAIL *nodeRail = list_entry (rail->DisconnectingList.next, EP_NODE_RAIL, Link); -+ int nodeId = nodeRail - rail->Nodes; -+ EP_NODE *node = &sys->Nodes[nodeId]; -+ -+ EPRINTF2 (DBG_MANAGER, "%s: Node %d, Disconnecting -> Disconnected\n", rail->Name, nodeId); -+ -+ list_del (&nodeRail->Link); -+ -+ rail->Operations.NodeDisconnected (rail, nodeId); -+ -+ /* Clear the network error state */ -+ nodeRail->NextRunTime = 0; -+ nodeRail->NetworkErrorState = 0; -+ nodeRail->NetworkErrorCookies[0] = 0; -+ nodeRail->NetworkErrorCookies[1] = 0; -+ -+ /* Clear the message state */ -+ nodeRail->MessageState = 0; -+ -+ cm_node_disconnected (rail, nodeId); -+ -+ nodeRail->State = EP_NODE_DISCONNECTED; -+ -+ statemap_setbits (rail->NodeSet, nodeId, 0, 1); -+ -+ if (node->ConnectedRails == 0) -+ statemap_setbits (sys->NodeSet, nodeId, 0, 1); -+ } -+ -+ ep_call_callbacks (rail, EP_CB_DISCONNECTED, rail->NodeSet); -+ } -+ -+ return (nextRunTime); -+} -+ -+void -+DisplayNodes (EP_RAIL *rail) -+{ -+ EP_SYS *sys = rail->System; -+ int i, state, count; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&sys->NodeLock, flags); -+ -+ for (state = 0; state < EP_NODE_NUM_STATES; state++) -+ { -+ for (count = i = 0; i < rail->Position.pos_nodes; i++) -+ { -+ ASSERT (rail->Nodes[i].State < EP_NODE_NUM_STATES); -+ -+ if (rail->Nodes[i].State == state) -+ if (state != EP_NODE_DISCONNECTED) -+ printk ("%s %d", !count++ ? NodeStateNames[state] : "", i); -+ } -+ if (count) -+ printk ("%s (%d total)\n", state == EP_NODE_DISCONNECTED ? NodeStateNames[state] : "", count); -+ } -+ spin_unlock_irqrestore (&sys->NodeLock, flags); -+} -+ -+static void -+PositionFound (EP_RAIL *rail, ELAN_POSITION *pos) -+{ -+ EP_SYS *sys = rail->System; -+ struct list_head *el; -+ int i; -+ -+ /* only called from the ep_managage whilst rail->State == EP_RAIL_STATE_STARTED */ -+ ASSERT ( rail->State == EP_RAIL_STATE_STARTED ); -+ -+#if defined(PER_CPU_TIMEOUT) -+ /* -+ * On Tru64 - if we're running in a "funnelled" thread, then we will be -+ * unable to start the per-cpu timeouts, so if we return then eventually -+ * the ep_manager() thread will find the network position and we're -+ * in control of our own destiny. -+ */ -+ if (THREAD_IS_FUNNELED(current_thread())) -+ { -+ ep_kthread_schedule (&sys->ManagerThread, lbolt); -+ return; -+ } -+#endif -+ -+ sprintf (rail->Name, "ep%d[%d]", rail->Number, pos->pos_nodeid); -+ -+ if (pos->pos_levels > MaxSwitchLevels) -+ { -+ for (i = 0; i < (pos->pos_levels - MaxSwitchLevels); i++) -+ pos->pos_nodes /= pos->pos_arity[i]; -+ -+ for (i = 0; i < MaxSwitchLevels; i++) -+ pos->pos_arity[i] = pos->pos_arity[i + (pos->pos_levels - MaxSwitchLevels)]; -+ -+ pos->pos_levels = MaxSwitchLevels; -+ pos->pos_nodeid = pos->pos_nodeid % pos->pos_nodes; -+ -+ printk ("%s: limiting switch levels to %d\n", rail->Name, MaxSwitchLevels); -+ printk ("%s: nodeid=%d level=%d numnodes=%d\n", rail->Name, pos->pos_nodeid, pos->pos_levels, pos->pos_nodes); -+ -+ sprintf (rail->Name, "ep%d[%d]", rail->Number, pos->pos_nodeid); -+ } -+ -+ if (rail->Position.pos_mode != ELAN_POS_UNKNOWN && rail->Position.pos_nodeid != pos->pos_nodeid) -+ { -+ printk ("%s: NodeId has changed from %d to %d\n", rail->Name, rail->Position.pos_nodeid, pos->pos_nodeid); -+ panic ("ep: PositionFound: NodeId has changed\n"); -+ } -+ -+ if (sys->Position.pos_mode != ELAN_POS_UNKNOWN && (sys->Position.pos_nodeid != pos->pos_nodeid || sys->Position.pos_nodes != pos->pos_nodes)) -+ { -+ printk ("%s: position incompatible - disabling rail\n", rail->Name); -+ rail->State = EP_RAIL_STATE_INCOMPATIBLE; -+ return; -+ } -+ -+ if (sys->Position.pos_mode == ELAN_POS_UNKNOWN) -+ { -+ sys->Position = *pos; -+ sys->NodeSet = statemap_create (pos->pos_nodes); -+ KMEM_ZALLOC (sys->Nodes, EP_NODE *, pos->pos_nodes * sizeof (EP_NODE), 1); -+ } -+ -+ rail->Position = *pos; -+ rail->SwitchBroadcastLevel = pos->pos_levels - 1; -+ rail->State = EP_RAIL_STATE_RUNNING; -+ -+ for (i = 0; i < pos->pos_levels; i++) -+ { -+ rail->SwitchProbeTick[i] = lbolt; -+ rail->SwitchLast[i].uplink = 4; -+ } -+ -+ rail->Operations.PositionFound (rail, pos); -+ -+ INIT_LIST_HEAD (&rail->NetworkErrorList); -+ INIT_LIST_HEAD (&rail->LocalPassivateList); -+ INIT_LIST_HEAD (&rail->RemotePassivateList); -+ INIT_LIST_HEAD (&rail->PassivatedList); -+ INIT_LIST_HEAD (&rail->DisconnectingList); -+ -+ rail->NodeSet = statemap_create (rail->Position.pos_nodes); -+ rail->NodeChangeMap = statemap_create (rail->Position.pos_nodes); -+ rail->NodeChangeTmp = statemap_create (rail->Position.pos_nodes); -+ -+ KMEM_ZALLOC (rail->Nodes, EP_NODE_RAIL *, rail->Position.pos_nodes * sizeof (EP_NODE_RAIL), 1); -+ -+ for (i = 0; i < rail->Position.pos_nodes; i++) -+ { -+ spin_lock_init (&rail->Nodes[i].CookieLock); -+ -+ INIT_LIST_HEAD (&rail->Nodes[i].StalledDmas); -+ -+ rail->Nodes[i].State = EP_NODE_DISCONNECTED; -+ } -+ -+ /* Notify all subsystems that a new rail has been enabled */ -+ kmutex_lock (&sys->SubsysLock); -+ list_for_each (el, &sys->Subsystems) { -+ EP_SUBSYS *subsys = list_entry (el, EP_SUBSYS, Link); -+ -+ if (subsys->AddRail) -+ subsys->AddRail (subsys, sys, rail); -+ -+ /* XXXX: what to do if the subsystem refused to add the rail ? */ -+ } -+ kmutex_unlock (&sys->SubsysLock); -+ -+ /* Now enable the manager input queue */ -+ ep_enable_inputq (rail, rail->ManagerInputQ); -+} -+ -+static void -+ep_manager (void *arg) -+{ -+ EP_SYS *sys = (EP_SYS *) arg; -+ struct list_head *el; -+ ELAN_POSITION pos; -+ unsigned long flags; -+ -+ kernel_thread_init ("ep_manager"); -+ kernel_thread_become_highpri(); -+ -+ for (;;) -+ { -+ long nextRunTime = lbolt + MSEC2TICKS(CM_THREAD_SCHEDULE_TIMEOUT); -+ -+ list_for_each (el, &sys->ManagedRails) { -+ EP_RAIL *rail = list_entry (el, EP_RAIL, ManagerLink); -+ -+ switch (rail->State) -+ { -+ case EP_RAIL_STATE_STARTED: -+ if (ProbeNetwork (rail, &pos) == 0) -+ { -+ PositionFound (rail, &pos); -+ break; -+ } -+ -+ if (nextRunTime == 0 || AFTER (nextRunTime, lbolt + HZ)) -+ nextRunTime = lbolt + HZ; -+ break; -+ -+ case EP_RAIL_STATE_RUNNING: -+ if (ep_poll_inputq (rail, rail->ManagerInputQ, 100, ProcessMessage, rail) >= 100) -+ nextRunTime = lbolt; -+ -+ /* Handle any nodes which the cluster membership subsystem -+ * has indicated are to begin connecting or disconnecting */ -+ spin_lock_irqsave (&sys->NodeLock, flags); -+ if (! statemap_changed (rail->NodeChangeMap)) -+ spin_unlock_irqrestore (&sys->NodeLock, flags); -+ else -+ { -+ /* -+ * Take a copy of the statemap, and zero all entries so -+ * we only see new requests next time -+ */ -+ statemap_copy (rail->NodeChangeTmp, rail->NodeChangeMap); -+ statemap_zero (rail->NodeChangeMap); -+ spin_unlock_irqrestore (&sys->NodeLock, flags); -+ -+ UpdateConnectionState (rail, rail->NodeChangeTmp); -+ } -+ -+ nextRunTime = ProgressNodeLists (rail, nextRunTime); -+ -+ if (statemap_changed (rail->NodeSet)) -+ { -+ ep_call_callbacks (rail, EP_CB_NODESET, rail->NodeSet); -+ -+ statemap_clearchanges (rail->NodeSet); -+ } -+ break; -+ -+ case EP_RAIL_STATE_INCOMPATIBLE: -+ break; -+ } -+ } -+ -+ -+ EPRINTF5 (DBG_MANAGER, "ep_manager: sleep now=%lx nextRunTime=%lx (%ld) [%lx (%ld)]\n", -+ lbolt, nextRunTime, nextRunTime ? nextRunTime - lbolt : 0, sys->ManagerThread.next_run, -+ sys->ManagerThread.next_run ? sys->ManagerThread.next_run - lbolt : 0); -+ -+ if (ep_kthread_sleep (&sys->ManagerThread, nextRunTime) < 0) -+ break; -+ } -+ -+ ep_kthread_stopped (&sys->ManagerThread); -+ kernel_thread_exit(); -+} -+ -+void -+ep_connect_node (EP_RAIL *rail, int nodeId) -+{ -+ EP_SYS *sys = rail->System; -+ EP_NODE_RAIL *node = &rail->Nodes[nodeId]; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&sys->NodeLock, flags); -+ -+ EPRINTF2 (DBG_MANAGER, "%s: ep_connect_node: nodeId %d\n", rail->Name, nodeId); -+ -+ ASSERT (node->State == EP_NODE_DISCONNECTED && statemap_getbits (rail->NodeChangeMap, nodeId, 1) == 0); -+ -+ node->State = EP_NODE_CONNECTING; -+ -+ statemap_setbits (rail->NodeChangeMap, nodeId, 1, 1); -+ -+ spin_unlock_irqrestore (&sys->NodeLock, flags); -+ -+ ep_kthread_schedule (&sys->ManagerThread, lbolt); -+} -+ -+int -+ep_disconnect_node (EP_RAIL *rail, int nodeId) -+{ -+ EP_SYS *sys = rail->System; -+ EP_NODE_RAIL *node = &rail->Nodes[nodeId]; -+ int state; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&sys->NodeLock, flags); -+ -+ EPRINTF3 (DBG_MANAGER, "%s: ep_disconnect_node: nodeId %d - %s\n", rail->Name, nodeId, NodeStateNames[node->State]); -+ -+ switch (state = node->State) -+ { -+ case EP_NODE_CONNECTING: -+ statemap_setbits (rail->NodeChangeMap, nodeId, 0, 1); -+ -+ node->State = EP_NODE_DISCONNECTED; -+ break; -+ -+ case EP_NODE_CONNECTED: -+ statemap_setbits (rail->NodeChangeMap, nodeId, 1, 1); -+ -+ node->State = EP_NODE_LEAVING_CONNECTED; -+ break; -+ -+ case EP_NODE_LEAVING_CONNECTED: -+ /* no assert on NodeChangeMap as the map could have been taken but not acted on */ -+ break; -+ -+ default: -+ ASSERT (statemap_getbits (rail->NodeChangeMap, nodeId, 1) == 0); -+ break; -+ } -+ spin_unlock_irqrestore (&sys->NodeLock, flags); -+ -+ if (state == EP_NODE_CONNECTED) -+ ep_kthread_schedule (&sys->ManagerThread, lbolt); -+ -+ return state; -+} -+ -+int -+ep_manager_add_rail (EP_SYS *sys, EP_RAIL *rail) -+{ -+ if ((rail->ManagerOutputQ = ep_alloc_outputq (rail, EP_MANAGER_MSG_SIZE, EP_MANAGER_OUTPUTQ_SLOTS)) == NULL) -+ return -ENOMEM; -+ -+ if ((rail->ManagerInputQ = ep_alloc_inputq (rail, EP_SYSTEMQ_MANAGER, EP_MANAGER_MSG_SIZE, EP_MANAGER_INPUTQ_SLOTS, -+ ManagerQueueEvent, &sys->ManagerThread)) == NULL) -+ { -+ ep_free_outputq (rail, rail->ManagerOutputQ); -+ return -ENOMEM; -+ } -+ -+ spin_lock_init (&rail->ManagerOutputQLock); -+ -+ ep_xid_cache_init (sys, &rail->XidCache); -+ -+ ep_kthread_stall (&sys->ManagerThread); -+ list_add_tail (&rail->ManagerLink, &sys->ManagedRails); -+ ep_kthread_resume (&sys->ManagerThread); -+ -+ return (0); -+} -+ -+void -+ep_manager_remove_rail (EP_SYS *sys, EP_RAIL *rail) -+{ -+ if (rail->ManagerInputQ != NULL) -+ { -+ ep_kthread_stall (&sys->ManagerThread); -+ list_del (&rail->ManagerLink); -+ ep_kthread_resume (&sys->ManagerThread); -+ -+ ep_xid_cache_destroy (sys, &rail->XidCache); -+ -+ spin_lock_destroy (&rail->ManagerOutputQLock); -+ -+ ep_disable_inputq (rail, rail->ManagerInputQ); -+ ep_free_inputq (rail, rail->ManagerInputQ); -+ ep_free_outputq (rail, rail->ManagerOutputQ); -+ } -+} -+ -+int -+ep_manager_init (EP_SYS *sys) -+{ -+ INIT_LIST_HEAD (&sys->ManagedRails); -+ -+ ep_kthread_init (&sys->ManagerThread); -+ -+ if (kernel_thread_create (ep_manager, (void *) sys) == 0) -+ return (ENOMEM); -+ -+ ep_kthread_started (&sys->ManagerThread); -+ -+ return (0); -+} -+ -+void -+ep_manager_fini (EP_SYS *sys) -+{ -+ ep_kthread_stop (&sys->ManagerThread); -+ ep_kthread_destroy (&sys->ManagerThread); -+} -+ -+int -+ep_sys_init (EP_SYS *sys) -+{ -+ kmutex_init (&sys->SubsysLock); -+ kmutex_init (&sys->StartStopLock); -+ spin_lock_init (&sys->NodeLock); -+ -+ INIT_LIST_HEAD (&sys->Subsystems); -+ -+ /* initialise the xid allocators */ -+ spin_lock_init (&sys->XidLock); -+ INIT_LIST_HEAD (&sys->XidCacheList); -+ -+ /* initially don't know where we are in the network */ -+ sys->Position.pos_mode = ELAN_POS_UNKNOWN; -+ -+ /* initialise the network mapping descriptor hash tables */ -+ ep_nmh_init (&sys->MappingTable); -+ -+ /* intialise the shared allocators */ -+ ep_shared_alloc_init (sys); -+ -+ /* initialise the dvma space */ -+ ep_dvma_init (sys); -+ -+ /* intiialise the rail manager */ -+ ep_manager_init (sys); -+ -+ /* initialise all subsystems */ -+ cm_init (sys); -+ ep_comms_init (sys); -+ //ep_msgsys_init (sys); -+ -+ return (0); -+} -+ -+void -+ep_sys_fini (EP_SYS *sys) -+{ -+ /* Destroy the subsystems in the reverse order to their creation */ -+ while (! list_empty (&sys->Subsystems)) -+ { -+ EP_SUBSYS *subsys = list_entry (sys->Subsystems.prev, EP_SUBSYS, Link); -+ -+ list_del (&subsys->Link); -+ -+ subsys->Destroy (subsys, sys); -+ } -+ -+ ep_manager_fini(sys); -+ ep_dvma_fini (sys); -+ ep_shared_alloc_fini (sys); -+ -+ ep_nmh_fini (&sys->MappingTable); -+ -+ if (sys->Position.pos_mode != ELAN_POS_UNKNOWN) { -+ statemap_destroy (sys->NodeSet); -+ KMEM_FREE(sys->Nodes, sys->Position.pos_nodes * sizeof (EP_NODE)); -+ } -+ -+ spin_lock_destroy (&sys->XidLock); -+ -+ spin_lock_destroy (&sys->NodeLock); -+ kmutex_destroy (&sys->SubsysLock); -+ kmutex_destroy (&sys->StartStopLock); -+} -+ -+void -+ep_shutdown (EP_SYS *sys) -+{ -+ sys->Shutdown = 1; -+} -+ -+int -+ep_init_rail (EP_SYS *sys, EP_RAIL *rail) -+{ -+ static int rnum; -+ -+ rail->System = sys; -+ rail->State = EP_RAIL_STATE_UNINITIALISED; -+ rail->Number = rnum++; -+ rail->Position.pos_mode = ELAN_POS_UNKNOWN; -+ rail->Position.pos_nodeid = ELAN_INVALID_NODE; -+ -+ rail->CallbackRegistered = 0; -+ -+ sprintf (rail->Name, "ep%d", rail->Number); -+ -+ /* Initialise externally visible locks */ -+ kmutex_init (&rail->CallbackLock); -+ -+ ep_alloc_init (rail); -+ -+ sys->Rails[rail->Number] = rail; -+ -+ return 0; -+} -+ -+void -+ep_destroy_rail (EP_RAIL *rail) -+{ -+ ASSERT (rail->State == EP_RAIL_STATE_UNINITIALISED); -+ -+ ep_alloc_fini (rail); -+ -+ kmutex_destroy (&rail->CallbackLock); -+ -+ rail->System->Rails[rail->Number] = NULL; -+ -+ rail->Operations.DestroyRail (rail); -+} -+ -+/* We need to traverse the Subsystems lists backwards -+ * but it's not defined in */ -+#define list_for_each_backwards(pos,list) \ -+ for (pos = (list)->prev; pos != (list); \ -+ pos = (pos)->prev) -+ -+void -+__ep_stop_rail (EP_RAIL *rail) -+{ -+ /* called holding the sys->Lock */ -+ EP_SYS *sys = rail->System; -+ struct list_head *el; -+ -+ rail->Operations.StallRail (rail); -+ -+ /* Notify all subsystems that this rail is being stopped */ -+ if (rail->State == EP_RAIL_STATE_RUNNING) -+ { -+ kmutex_lock (&sys->SubsysLock); -+ list_for_each_backwards (el, &sys->Subsystems) { -+ EP_SUBSYS *subsys = list_entry (el, EP_SUBSYS, Link); -+ -+ if (subsys->RemoveRail) -+ subsys->RemoveRail (subsys, sys, rail); -+ } -+ kmutex_unlock (&sys->SubsysLock); -+ -+ ep_manager_remove_rail (sys, rail); -+ -+ KMEM_FREE (rail->Nodes, rail->Position.pos_nodes * sizeof (EP_NODE_RAIL)); -+ -+ statemap_destroy (rail->NodeChangeTmp); -+ statemap_destroy (rail->NodeChangeMap); -+ statemap_destroy (rail->NodeSet); -+ } -+ -+ ep_dvma_remove_rail (sys, rail); -+ ep_shared_alloc_remove_rail (sys, rail); -+ -+ rail->Operations.StopRail (rail); -+ -+ rail->State = EP_RAIL_STATE_UNINITIALISED; -+} -+ -+void -+ep_stop_rail (EP_RAIL *rail) -+{ -+ EP_SYS *sys = rail->System; -+ -+ /* stall ep_manager */ -+ /* and remove the rail from the manaager */ -+ -+ ep_kthread_stall (&sys->ManagerThread); -+ if ( rail->State == EP_RAIL_STATE_STARTED ) -+ ep_manager_remove_rail (sys, rail); -+ ep_kthread_resume (&sys->ManagerThread); -+ -+ __ep_stop_rail (rail); -+} -+ -+int -+ep_start_rail (EP_RAIL *rail) -+{ -+ EP_SYS *sys = rail->System; -+ -+ ASSERT (rail->State == EP_RAIL_STATE_UNINITIALISED); -+ -+ if (rail->Operations.StartRail (rail) < 0) -+ return -ENXIO; -+ -+ kmutex_lock (&sys->StartStopLock); -+ /* Add this rail to the shared allocator */ -+ if (ep_shared_alloc_add_rail (rail->System, rail)) -+ goto failed; -+ -+ /* Add this rail to dvma kmap */ -+ if (ep_dvma_add_rail (rail->System, rail)) -+ goto failed; -+ -+ /* rail is now started */ -+ rail->State = EP_RAIL_STATE_STARTED; -+ -+ /* notify the rail manager of the new rail */ -+ if (ep_manager_add_rail (rail->System, rail)) -+ goto failed; -+ -+ kmutex_unlock (&sys->StartStopLock); -+ return (ESUCCESS); -+ -+ failed: -+ printk ("%s: start failed\n", rail->Name); -+ kmutex_unlock (&sys->StartStopLock); -+ __ep_stop_rail (rail); -+ -+ return (ENOMEM); -+} -+ -+void -+ep_subsys_add (EP_SYS *sys, EP_SUBSYS *subsys) -+{ -+ kmutex_lock (&sys->SubsysLock); -+ list_add_tail (&subsys->Link, &sys->Subsystems); -+ kmutex_unlock (&sys->SubsysLock); -+} -+ -+void -+ep_subsys_del (EP_SYS *sys, EP_SUBSYS *subsys) -+{ -+ kmutex_lock (&sys->SubsysLock); -+ list_del (&subsys->Link); -+ kmutex_unlock (&sys->SubsysLock); -+} -+ -+EP_SUBSYS * -+ep_subsys_find (EP_SYS *sys, char *name) -+{ -+ struct list_head *el; -+ -+ ASSERT ( !in_interrupt()); -+ -+ kmutex_lock (&sys->SubsysLock); -+ list_for_each (el, &sys->Subsystems) { -+ EP_SUBSYS *subsys = list_entry (el, EP_SUBSYS, Link); -+ -+ if (! strcmp (subsys->Name, name)) -+ { -+ kmutex_unlock (&sys->SubsysLock); -+ return (subsys); -+ } -+ } -+ -+ kmutex_unlock (&sys->SubsysLock); -+ return (NULL); -+} -+ -+int -+ep_waitfor_nodeid (EP_SYS *sys) -+{ -+ int i, printed = 0; -+ kcondvar_t Sleep; -+ spinlock_t Lock; -+ -+ kcondvar_init (&Sleep); -+ spin_lock_init (&Lock); -+ -+#define TICKS_TO_WAIT (10*hz) -+#define TICKS_PER_LOOP (hz/10) -+ for (i = 0; sys->Position.pos_mode == ELAN_POS_UNKNOWN && i < TICKS_TO_WAIT; i += TICKS_PER_LOOP) -+ { -+ if (! printed++) -+ printk ("ep: waiting for network position to be found\n"); -+ -+ spin_lock (&Lock); -+ kcondvar_timedwait (&Sleep, &Lock, NULL, lbolt + TICKS_PER_LOOP); -+ spin_unlock (&Lock); -+ } -+ -+ if (sys->Position.pos_mode == ELAN_POS_UNKNOWN) -+ printk ("ep: network position not found after waiting\n"); -+ else if (printed) -+ printk ("ep: network position found at nodeid %d\n", sys->Position.pos_nodeid); -+ -+ spin_lock_destroy (&Lock); -+ kcondvar_destroy (&Sleep); -+ -+ return (sys->Position.pos_mode == ELAN_POS_UNKNOWN ? ELAN_INVALID_NODE : sys->Position.pos_nodeid); -+} -+ -+int -+ep_nodeid (EP_SYS *sys) -+{ -+ return (sys->Position.pos_mode == ELAN_POS_UNKNOWN ? ELAN_INVALID_NODE : sys->Position.pos_nodeid); -+} -+ -+int -+ep_numnodes (EP_SYS *sys) -+{ -+ return (sys->Position.pos_nodes); -+} -+ -+void -+ep_fillout_stats(EP_RAIL *r, char *str) -+{ -+ sprintf(str+strlen(str),"SendMessageFailed %lu NeterrAtomicPacket %lu NeterrDmaPacket %lu \n", r->Stats.SendMessageFailed, r->Stats.NeterrAtomicPacket, r->Stats.NeterrDmaPacket); -+ sprintf(str+strlen(str),"Rx %lu %lu /sec\n", GET_STAT_TOTAL(r->Stats,rx), GET_STAT_PER_SEC(r->Stats,rx) ); -+ sprintf(str+strlen(str),"MBytes %lu %lu MB/sec\n", GET_STAT_TOTAL(r->Stats,rx_len)/ (1024*1024), GET_STAT_PER_SEC(r->Stats,rx_len) / (1024*1024)); -+ sprintf(str+strlen(str),"Tx %lu %lu /sec\n", GET_STAT_TOTAL(r->Stats,tx), GET_STAT_PER_SEC(r->Stats,tx) ); -+ sprintf(str+strlen(str),"MBytes %lu %lu MB/sec\n", GET_STAT_TOTAL(r->Stats,tx_len)/ (1024*1024), GET_STAT_PER_SEC(r->Stats,tx_len) / (1024*1024)); -+} -+ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/ep/kcomm_elan3.c linux-2.6.9/drivers/net/qsnet/ep/kcomm_elan3.c ---- clean/drivers/net/qsnet/ep/kcomm_elan3.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/kcomm_elan3.c 2004-11-30 07:02:06.000000000 -0500 -@@ -0,0 +1,504 @@ -+ -+/* -+ * Copyright (c) 2003 by Quadrics Ltd. -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: kcomm_elan3.c,v 1.34 2004/11/30 12:02:06 mike Exp $" -+/* $Source: /cvs/master/quadrics/epmod/kcomm_elan3.c,v $ */ -+ -+#include -+ -+#include -+ -+#include "kcomm_vp.h" -+#include "kcomm_elan3.h" -+#include "conf_linux.h" -+ -+extern EP_CODE threadcode_elan3; -+ -+unsigned int -+ep3_create_rails (EP_SYS *sys, unsigned int disabled) -+{ -+ unsigned int rmask = 0; -+ ELAN3_DEV *dev; -+ EP_RAIL *rail; -+ int i; -+ -+ for (i = 0; i < EP_MAX_RAILS; i++) -+ { -+ if ((dev = elan3_device (i)) != NULL) -+ { -+ if ((rail = ep3_create_rail (sys, dev)) != NULL) -+ { -+ if (disabled & (1 << rail->Number)) -+ printk ("%s: auto-start of device disabled by configuration\n", rail->Name); -+ else -+ ep_start_rail (rail); -+ -+ ep_procfs_rail_init(rail); -+ -+ rmask |= (1 << rail->Number); -+ } -+ } -+ } -+ -+ return rmask; -+} -+ -+EP_RAIL * -+ep3_create_rail (EP_SYS *sys, ELAN3_DEV *dev) -+{ -+ EP3_RAIL *rail; -+ int res; -+ -+ KMEM_ZALLOC (rail, EP3_RAIL *, sizeof (EP3_RAIL), TRUE); -+ -+ if (rail == NULL) -+ return (EP_RAIL *) NULL; -+ -+ if ((res = ep_init_rail (sys, &rail->Generic)) != 0) -+ { -+ KMEM_FREE (rail, sizeof (EP3_RAIL)); -+ return (EP_RAIL *) NULL; -+ } -+ -+ rail->Device = dev; -+ -+ /* Install our rail operations */ -+ rail->Generic.Operations.DestroyRail = ep3_destroy_rail; -+ rail->Generic.Operations.StartRail = ep3_start_rail; -+ rail->Generic.Operations.StallRail = ep3_stall_rail; -+ rail->Generic.Operations.StopRail = ep3_stop_rail; -+ -+ rail->Generic.Operations.SdramAlloc = ep3_sdram_alloc; -+ rail->Generic.Operations.SdramFree = ep3_sdram_free; -+ rail->Generic.Operations.SdramWriteb = ep3_sdram_writeb; -+ -+ rail->Generic.Operations.KaddrMap = ep3_kaddr_map; -+ rail->Generic.Operations.SdramMap = ep3_sdram_map; -+ rail->Generic.Operations.Unmap = ep3_unmap; -+ -+ rail->Generic.Operations.DvmaReserve = ep3_dvma_reserve; -+ rail->Generic.Operations.DvmaRelease = ep3_dvma_release; -+ rail->Generic.Operations.DvmaSetPte = ep3_dvma_set_pte; -+ rail->Generic.Operations.DvmaReadPte = ep3_dvma_read_pte; -+ rail->Generic.Operations.DvmaUnload = ep3_dvma_unload; -+ rail->Generic.Operations.FlushTlb = ep3_flush_tlb; -+ -+ rail->Generic.Operations.ProbeRoute = ep3_probe_route; -+ rail->Generic.Operations.PositionFound = ep3_position_found; -+ rail->Generic.Operations.CheckPosition = ep3_check_position; -+ rail->Generic.Operations.NeterrFixup = ep3_neterr_fixup; -+ -+ rail->Generic.Operations.LoadSystemRoute = ep3_load_system_route; -+ -+ rail->Generic.Operations.LoadNodeRoute = ep3_load_node_route; -+ rail->Generic.Operations.UnloadNodeRoute = ep3_unload_node_route; -+ rail->Generic.Operations.LowerFilter = ep3_lower_filter; -+ rail->Generic.Operations.RaiseFilter = ep3_raise_filter; -+ rail->Generic.Operations.NodeDisconnected = ep3_node_disconnected; -+ -+ rail->Generic.Operations.FlushFilters = ep3_flush_filters; -+ rail->Generic.Operations.FlushQueues = ep3_flush_queues; -+ -+ rail->Generic.Operations.AllocInputQ = ep3_alloc_inputq; -+ rail->Generic.Operations.FreeInputQ = ep3_free_inputq; -+ rail->Generic.Operations.EnableInputQ = ep3_enable_inputq; -+ rail->Generic.Operations.DisableInputQ = ep3_disable_inputq; -+ rail->Generic.Operations.PollInputQ = ep3_poll_inputq; -+ -+ rail->Generic.Operations.AllocOutputQ = ep3_alloc_outputq; -+ rail->Generic.Operations.FreeOutputQ = ep3_free_outputq; -+ rail->Generic.Operations.OutputQMsg = ep3_outputq_msg; -+ rail->Generic.Operations.OutputQState = ep3_outputq_state; -+ rail->Generic.Operations.OutputQSend = ep3_outputq_send; -+ -+ rail->Generic.Operations.FillOutStats = ep3_fillout_stats; -+ -+ rail->Generic.Devinfo = dev->Devinfo; -+ -+ printk ("%s: connected via elan3 rev%c device %d\n", rail->Generic.Name, -+ 'a' + dev->Devinfo.dev_revision_id, dev->Instance); -+ -+ return (EP_RAIL *) rail; -+} -+ -+void -+ep3_destroy_rail (EP_RAIL *r) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) r; -+ -+ KMEM_FREE (rail, sizeof (EP3_RAIL)); -+} -+ -+static int -+ep3_attach_rail (EP3_RAIL *rail) -+{ -+ ELAN3_DEV *dev = rail->Device; -+ ELAN3_CTXT *ctxt; -+ ELAN_CAPABILITY *cap; -+ int ctx; -+ unsigned long flags; -+ -+ if ((ctxt = elan3_alloc (dev, TRUE)) == (ELAN3_CTXT *) NULL) -+ { -+ printk ("%s: cannot allocate elan context\n", rail->Generic.Name); -+ return -ENXIO; -+ } -+ -+ ctxt->Operations = &ep3_elan3_ops; -+ ctxt->Private = (void *) rail; -+ -+ /* Initialise a capability and attach to the elan*/ -+ KMEM_ALLOC (cap, ELAN_CAPABILITY *, sizeof (ELAN_CAPABILITY), TRUE); -+ -+ elan_nullcap (cap); -+ -+ cap->cap_type = ELAN_CAP_TYPE_KERNEL; -+ cap->cap_version = ELAN_CAP_VERSION_NUMBER; -+ cap->cap_mycontext = ELAN3_MRF_CONTEXT_NUM | SYS_CONTEXT_BIT; -+ cap->cap_lowcontext = ELAN3_MRF_CONTEXT_NUM | SYS_CONTEXT_BIT; -+ cap->cap_highcontext = ELAN3_MRF_CONTEXT_NUM | SYS_CONTEXT_BIT; -+ cap->cap_railmask = 1 << dev->Devinfo.dev_rail; -+ -+ /* Ensure the context filter is raised while we initialise */ -+ elan3_block_inputter (ctxt, TRUE); -+ -+ if (elan3_doattach (ctxt, cap) != 0) -+ { -+ printk ("%s: cannot attach to kernel context\n", rail->Generic.Name); -+ -+ KMEM_FREE (cap, sizeof (ELAN_CAPABILITY)); -+ elan3_free (ctxt); -+ return -ENXIO; -+ } -+ KMEM_FREE (cap, sizeof (ELAN_CAPABILITY)); -+ -+ /* now attach to all the kernel comms input/dmaring/data contexts */ -+ spin_lock_irqsave (&dev->IntrLock, flags); -+ -+ for (ctx = ELAN3_DMARING_BASE_CONTEXT_NUM; ctx <= ELAN3_DMARING_TOP_CONTEXT_NUM; ctx++) -+ { -+ /* place it in the info table. NOTE: don't call elan3mmu_set_info, as this */ -+ /* will queue the info again on the devices info list */ -+ dev->CtxtTable[ctx] = ctxt; -+ -+ elan3mmu_set_context_filter (dev, ctx|SYS_CONTEXT_BIT, TRUE, 0, NULL); -+ elan3mmu_attach (dev, ctx, ctxt->Elan3mmu, ctxt->RouteTable->Table, ctxt->RouteTable->Size-1); -+ } -+ -+ for (ctx = ELAN3_KCOMM_BASE_CONTEXT_NUM; ctx <= ELAN3_KCOMM_TOP_CONTEXT_NUM; ctx++) -+ { -+ /* place it in the info table. NOTE: don't call elan3mmu_set_info, as this */ -+ /* will queue the info again on the devices info list */ -+ dev->CtxtTable[ctx] = ctxt; -+ -+ elan3mmu_set_context_filter (dev, ctx|SYS_CONTEXT_BIT, TRUE, 0, NULL); -+ elan3mmu_attach (dev, ctx, ctxt->Elan3mmu, ctxt->RouteTable->Table, ctxt->RouteTable->Size-1); -+ } -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+ -+ /* Stash the ctxt,commandport, mmu and route table */ -+ rail->Ctxt = ctxt; -+ rail->CommandPort = ctxt->CommandPort; -+ rail->Elan3mmu = ctxt->Elan3mmu; -+ rail->RouteTable = ctxt->RouteTable; -+ -+ return 0; -+} -+ -+static void -+ep3_detach_rail (EP3_RAIL *rail) -+{ -+ ELAN3_DEV *dev = rail->Device; -+ unsigned long flags; -+ int ctx; -+ -+ /* detach from the elan */ -+ spin_lock_irqsave (&dev->IntrLock, flags); -+ -+ for (ctx = ELAN3_KCOMM_BASE_CONTEXT_NUM; ctx <= ELAN3_KCOMM_TOP_CONTEXT_NUM; ctx++) -+ { -+ dev->CtxtTable[ctx] = NULL; -+ elan3mmu_detach (dev, ctx); -+ } -+ -+ for (ctx = ELAN3_DMARING_BASE_CONTEXT_NUM; ctx <= ELAN3_DMARING_TOP_CONTEXT_NUM; ctx++) -+ { -+ dev->CtxtTable[ctx] = NULL; -+ elan3mmu_detach (dev, ctx); -+ } -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+ -+ elan3_dodetach(rail->Ctxt); -+ elan3_free (rail->Ctxt); -+ -+ rail->Ctxt = NULL; -+ rail->CommandPort = 0; -+ rail->Elan3mmu = NULL; -+ rail->RouteTable = NULL; -+} -+ -+int -+ep3_start_rail (EP_RAIL *r) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) r; -+ int i, res; -+ unsigned long flags; -+ -+ if ((res = ep3_attach_rail (rail)) != 0) -+ return res; -+ -+ spin_lock_init (&rail->CookieLock); -+ kmutex_init (&rail->HaltOpMutex); -+ kcondvar_init (&rail->HaltOpSleep); -+ -+ /* Initialise event interrupt cookie table */ -+ InitialiseCookieTable (&rail->CookieTable); -+ -+ /* Load and map the thread code */ -+ rail->ThreadCode = threadcode_elan3; -+ if (ep_loadcode (&rail->Generic, &rail->ThreadCode) != ESUCCESS) -+ goto failed; -+ -+ /* Map the command port to be visible to the Elan */ -+ ep3_ioaddr_map (&rail->Generic, EP3_COMMANDPORT_ADDR, rail->Ctxt->CommandPage, PAGESIZE, EP_PERM_WRITE); -+ rail->CommandPortAddr = EP3_COMMANDPORT_ADDR + (rail->Ctxt->CommandPort - rail->Ctxt->CommandPage); -+ -+ /* Allocate the elan visible sdram/main memory */ -+ if ((rail->RailElan = ep_alloc_elan (&rail->Generic, sizeof (EP3_RAIL_ELAN), 0, &rail->RailElanAddr)) == 0 || -+ (rail->RailMain = ep_alloc_main (&rail->Generic, sizeof (EP3_RAIL_MAIN), 0, &rail->RailMainAddr)) == 0) -+ { -+ goto failed; -+ } -+ -+ /* Allocate the system input queues at their fixed elan address */ -+ if (! (rail->QueueDescs = ep_alloc_memory_elan (&rail->Generic, EP_SYSTEM_QUEUE_BASE, PAGESIZE, EP_PERM_ALL, 0))) -+ goto failed; -+ -+ /* Initialise all queue entries to be full */ -+ for (i = 0; i < EP_NUM_SYSTEMQ; i++) -+ elan3_sdram_writel (rail->Device, EP_SYSTEMQ_DESC(rail->QueueDescs, i) + offsetof (EP3_InputQueue, q_state), E3_QUEUE_FULL); -+ -+ /* initialise the dma rings */ -+ if (DmaRingsCreate (rail)) -+ goto failed; -+ -+ if (InitialiseDmaRetries (rail)) -+ goto failed; -+ -+ if (ep3_init_probenetwork (rail)) -+ goto failed; -+ -+ /* can now drop the context filter for the system context */ -+ spin_lock_irqsave (&rail->Device->IntrLock, flags); -+ elan3mmu_set_context_filter (rail->Device, ELAN3_MRF_CONTEXT_NUM|SYS_CONTEXT_BIT, FALSE, 0, NULL); -+ spin_unlock_irqrestore (&rail->Device->IntrLock, flags); -+ -+ return 0; -+ -+ failed: -+ printk ("ep3_start_rail: failed for rail %d\n", rail->Generic.Number); -+ ep3_stop_rail (&rail->Generic); -+ -+ return -ENOMEM; -+} -+ -+void -+ep3_stall_rail (EP_RAIL *r) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) r; -+ int ctx; -+ unsigned long flags; -+ -+ /* raise all the context filters */ -+ spin_lock_irqsave (&rail->Device->IntrLock, flags); -+ -+ for (ctx = ELAN3_KCOMM_BASE_CONTEXT_NUM; ctx <= ELAN3_KCOMM_TOP_CONTEXT_NUM; ctx++) -+ elan3mmu_set_context_filter (rail->Device, ctx|SYS_CONTEXT_BIT, TRUE, 0, NULL); -+ -+ for (ctx = ELAN3_DMARING_BASE_CONTEXT_NUM; ctx <= ELAN3_DMARING_TOP_CONTEXT_NUM; ctx++) -+ elan3mmu_set_context_filter (rail->Device, ctx|SYS_CONTEXT_BIT, TRUE, 0, NULL); -+ -+ elan3mmu_set_context_filter (rail->Device, ELAN3_MRF_CONTEXT_NUM|SYS_CONTEXT_BIT, TRUE, 0, NULL); -+ -+ spin_unlock_irqrestore (&rail->Device->IntrLock, flags); -+} -+ -+void -+ep3_stop_rail (EP_RAIL *r) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) r; -+ -+ ep3_destroy_probenetwork (rail); -+ -+ if (rail->DmaRetryInitialised) -+ DestroyDmaRetries (rail); -+ -+ DmaRingsRelease(rail); -+ -+ if (rail->Generic.State == EP_RAIL_STATE_RUNNING) -+ { -+ KMEM_FREE (rail->MainCookies, rail->Generic.Position.pos_nodes * sizeof (E3_uint32)); -+ -+ ep_free_elan (&rail->Generic, rail->ElanCookies, rail->Generic.Position.pos_nodes * sizeof (E3_uint32)); -+ } -+ -+ if (rail->QueueDescs) -+ ep_free_memory_elan (&rail->Generic, EP_SYSTEM_QUEUE_BASE); -+ rail->QueueDescs = 0; -+ -+ if (rail->RailMain) -+ ep_free_main (&rail->Generic, rail->RailMainAddr, sizeof (EP3_RAIL_MAIN)); -+ rail->RailMain = 0; -+ -+ if (rail->RailElan) -+ ep_free_elan (&rail->Generic, rail->RailElanAddr, sizeof (EP3_RAIL_ELAN)); -+ rail->RailElan = 0; -+ -+ ep_unloadcode (&rail->Generic, &rail->ThreadCode); -+ -+ DestroyCookieTable (&rail->CookieTable); -+ -+ ep_perrail_unmap (&rail->Generic, rail->Ctxt->CommandPage, PAGESIZE); -+ -+ kcondvar_destroy (&rail->HaltOpSleep); -+ kmutex_destroy (&rail->HaltOpMutex); -+ spin_lock_destroy (&rail->CookieLock); -+ -+ ep3_detach_rail (rail); -+} -+ -+void -+ep3_position_found (EP_RAIL *r, ELAN_POSITION *pos) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) r; -+ sdramaddr_t addr; -+ -+ rail->SwitchBroadcastLevelTick = lbolt; -+ -+ elan3_sdram_writel (rail->Device, rail->RailElan + offsetof (EP3_RAIL_ELAN, NodeId), pos->pos_nodeid); -+ -+ /* Allocate Network Identify cookie state */ -+ KMEM_ZALLOC (rail->MainCookies, E3_uint32 *, pos->pos_nodes * sizeof (E3_uint32), 1); -+ -+ if (! (addr = ep_alloc_elan (&rail->Generic, pos->pos_nodes * sizeof (E3_uint32), 0, &rail->ElanCookies))) -+ panic ("ep: PositionFound: cannot allocate elan cookies array\n"); -+ -+ elan3_sdram_zeroq_sdram (rail->Device, addr, pos->pos_nodes * sizeof (E3_uint32)); -+ -+ ep3_probe_position_found (rail, pos); -+} -+ -+sdramaddr_t -+ep3_sdram_alloc (EP_RAIL *r, EP_ADDR addr, unsigned size) -+{ -+ return elan3_sdram_alloc (((EP3_RAIL *) r)->Device, size); -+} -+ -+void -+ep3_sdram_free (EP_RAIL *r, sdramaddr_t addr, unsigned size) -+{ -+ elan3_sdram_free (((EP3_RAIL *) r)->Device, addr, size); -+} -+ -+void -+ep3_sdram_writeb (EP_RAIL *r, sdramaddr_t addr, unsigned char val) -+{ -+ elan3_sdram_writeb (((EP3_RAIL *) r)->Device, addr, val); -+} -+ -+void -+ep3_flush_tlb (EP_RAIL *r) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) r; -+ ELAN3_DEV *dev = rail->Device; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&dev->TlbLock, flags); -+ -+ IncrStat (dev, TlbFlushes); -+ -+ write_reg32 (dev, Cache_Control_Reg.ContReg, dev->Cache_Control_Reg | MMU_FLUSH); -+ mmiob (); -+ spin_unlock_irqrestore (&dev->TlbLock, flags); -+ -+ while (! (read_reg32 (dev, Cache_Control_Reg.ContReg) & MMU_FLUSHED)) -+ mb(); -+} -+ -+void -+ep3_load_system_route (EP_RAIL *r, unsigned vp, unsigned lowNode, unsigned highNode) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) r; -+ E3_uint16 flits[MAX_FLITS]; -+ int nflits; -+ -+ nflits = GenerateRoute (&rail->Generic.Position, flits, lowNode, highNode, DEFAULT_ROUTE_TIMEOUT, HIGH_ROUTE_PRIORITY); -+ -+ if (LoadRoute (rail->Device, rail->RouteTable, vp, ELAN3_MRF_CONTEXT_NUM|SYS_CONTEXT_BIT, nflits, flits) != 0) -+ { -+ /* XXXX: whilst LoadRoute() can fail - it is not likely. */ -+ panic ("ep3_load_system_route: cannot load p2p route entry\n"); -+ } -+} -+ -+void -+ep3_load_node_route (EP_RAIL *r, unsigned nodeId) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) r; -+ E3_uint16 flits[MAX_FLITS]; -+ int nflits; -+ -+ nflits = GenerateRoute (&rail->Generic.Position, flits, nodeId, nodeId, DEFAULT_ROUTE_TIMEOUT, DEFAULT_ROUTE_PRIORITY); -+ -+ if (LoadRoute (rail->Device, rail->RouteTable, EP_VP_DATA(nodeId), EP3_CONTEXT_NUM(rail->Generic.Position.pos_nodeid), nflits, flits) != 0) -+ panic ("ep3_load_node_route: cannot load p2p data route entry\n"); -+} -+ -+void -+ep3_unload_node_route (EP_RAIL *r, unsigned nodeId) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) r; -+ -+ ClearRoute (rail->Device, rail->RouteTable, EP_VP_DATA(nodeId)); -+} -+ -+void -+ep3_lower_filter (EP_RAIL *r, unsigned nodeId) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) r; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&rail->Device->IntrLock, flags); -+ elan3mmu_set_context_filter (rail->Device, EP3_CONTEXT_NUM(nodeId), 0, 0, NULL); -+ spin_unlock_irqrestore (&rail->Device->IntrLock, flags); -+} -+ -+void -+ep3_raise_filter (EP_RAIL *r, unsigned nodeId) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) r; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&rail->Device->IntrLock, flags); -+ elan3mmu_set_context_filter (rail->Device, EP3_CONTEXT_NUM(nodeId), 1, 0, NULL); -+ spin_unlock_irqrestore (&rail->Device->IntrLock, flags); -+} -+ -+void -+ep3_node_disconnected (EP_RAIL *r, unsigned nodeId) -+{ -+ FreeStalledDmas ((EP3_RAIL *) r, nodeId); -+} -+ -+void -+ep3_fillout_stats(EP_RAIL *r, char *str) -+{ -+ /* no stats here yet */ -+ /* EP3_RAIL *ep3rail = (EP3_RAIL *)r; */ -+} -diff -urN clean/drivers/net/qsnet/ep/kcomm_elan3.h linux-2.6.9/drivers/net/qsnet/ep/kcomm_elan3.h ---- clean/drivers/net/qsnet/ep/kcomm_elan3.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/kcomm_elan3.h 2004-12-14 05:19:23.000000000 -0500 -@@ -0,0 +1,431 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __EP_KCOMM_ELAN3_H -+#define __EP_KCOMM_ELAN3_H -+ -+#ident "@(#)$Id: kcomm_elan3.h,v 1.53 2004/12/14 10:19:23 mike Exp $ $Name: QSNETMODULES-5-11-3_20050907 $" -+/* $Source: /cvs/master/quadrics/epmod/kcomm_elan3.h,v $*/ -+ -+#if !defined(__ELAN3__) -+#include -+#include -+#include -+#include -+#include -+#include -+#endif /* !defined(__ELAN3__) */ -+ -+#include -+ -+/* private address allocation */ -+#define EP3_TEXT_BASE 0xFF000000 /* base address for thread code (defined in makerules.elan3) */ -+#define EP3_COMMANDPORT_ADDR 0xFFF00000 /* mapping address for elan command port */ -+ -+#define EP3_STACK_SIZE 1024 /* default thread code stack size */ -+ -+#define EP3_PACEMAKER_EVENTADDR 0xfeedbeef /* mis-aligned address used by heartbeat pacemaker */ -+ -+/* context number allocation */ -+#define EP3_CONTEXT_NUM(nodeId) ((ELAN3_KCOMM_BASE_CONTEXT_NUM + (nodeId)) | SYS_CONTEXT_BIT) -+#define EP3_CONTEXT_ISDATA(ctx) (((ctx) & MAX_ROOT_CONTEXT_MASK) >= ELAN3_KCOMM_BASE_CONTEXT_NUM && \ -+ ((ctx) & MAX_ROOT_CONTEXT_MASK) <= ELAN3_KCOMM_TOP_CONTEXT_NUM) -+#define EP3_CONTEXT_TO_NODE(ctx) (((ctx) & MAX_ROOT_CONTEXT_MASK) - ELAN3_KCOMM_BASE_CONTEXT_NUM) -+ -+/* DMA issueing rings */ -+#define EP3_RING_CRITICAL 0 -+#define EP3_RING_CRITICAL_LEN 128 -+#define EP3_RING_HIGH_PRI 1 -+#define EP3_RING_HIGH_PRI_LEN 64 -+#define EP3_RING_LOW_PRI 2 -+#define EP3_RING_LOW_PRI_LEN 32 -+#define EP3_NUM_RINGS 3 -+ -+/* Value to "return" from c_close() when envelope handled by the trap handler */ -+#define EP3_PAckStolen 4 -+ -+/* unimplemented instruction trap types for thread code */ -+#define EP3_UNIMP_TRAP_NO_DESCS 0 -+#define EP3_UNIMP_TRAP_PACKET_NACKED 1 -+#define EP3_UNIMP_THREAD_HALTED 2 -+#define EP3_NUM_UNIMP_TRAPS 3 -+ -+/* forward declarations */ -+typedef struct ep3_rail EP3_RAIL; -+ -+/* block copy elan3 inputter queue - with waitvent0 */ -+typedef struct ep3_inputqueue -+{ -+ volatile E3_uint32 q_state; /* queue is full=bit0, queue is locked=bit8 */ -+ volatile E3_Addr q_bptr; /* block aligned ptr to current back item */ -+ E3_uint32 q_size; /* size of queue item; 0x1 <= size <= (0x40 * 5) */ -+ E3_Addr q_top; /* block aligned ptr to last queue item */ -+ E3_Addr q_base; /* block aligned ptr to first queue item */ -+ volatile E3_Addr q_fptr; /* block aligned ptr to current front item */ -+ E3_BlockCopyEvent q_event; /* queue block copy event */ -+ E3_uint32 q_pad[4]; /* pad to 64 bytes */ -+ E3_Addr q_wevent; /* WaitEvent0 struct */ -+ E3_int32 q_wcount; -+} EP3_InputQueue; -+ -+ -+#if !defined(__ELAN3__) -+ -+/* dma retries types and retry times */ -+typedef struct ep3_retry_dma -+{ -+ struct list_head Link; /* chained on free/retry list */ -+ long RetryTime; /* "lbolt" to retry at */ -+ E3_DMA_BE Dma; /* DMA (in main memory) */ -+} EP3_RETRY_DMA; -+ -+typedef struct ep3_dma_ring -+{ -+ sdramaddr_t pEvent; -+ E3_Addr epEvent; -+ -+ sdramaddr_t pDma; -+ E3_Addr epDma; -+ -+ E3_uint32 *pDoneBlk; -+ E3_Addr epDoneBlk; -+ -+ int Entries; /* number of slots in array */ -+ int Position; /* current position in array */ -+ -+ ioaddr_t CommandPort; -+ ioaddr_t CommandPage; -+ DeviceMappingHandle CommandPageHandle; -+} EP3_DMA_RING; -+ -+#define DMA_RING_EVENT(ring,n) ((ring)->pEvent + (n)*sizeof (E3_BlockCopyEvent)) -+#define DMA_RING_EVENT_ELAN(ring,n) ((ring)->epEvent + (n)*sizeof (E3_BlockCopyEvent)) -+ -+#define DMA_RING_DMA(ring,n) ((ring)->pDma + (n)*sizeof (E3_DMA)) -+#define DMA_RING_DMA_ELAN(ring,n) ((ring)->epDma + (n)*sizeof (E3_DMA)) -+ -+#define DMA_RING_DONE_ELAN(ring,n) ((ring)->epDoneBlk + (n)*sizeof (E3_uint32)) -+ -+/* Event interrupt cookie operations and lookup table */ -+typedef struct ep3_cookie_ops -+{ -+ void (*Event) (EP3_RAIL *rail, void *arg); /* called from the interrupt handler when an event is "set" */ -+ void (*DmaRetry) (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma, int error); /* called from the interrupt handler when a DMA is "nacked" */ -+ void (*DmaCancelled)(EP3_RAIL *rail, void *arg, E3_DMA_BE *dma); /* called from the interrupt handler/flush disconnecting when cancelled. */ -+ void (*DmaVerify) (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma); /* called from multiple places, to check dma is consistent with state. */ -+} EP3_COOKIE_OPS; -+ -+typedef struct ep3_cookie -+{ -+ struct ep3_cookie *Next; /* Cookies are chained in hash table. */ -+ E3_uint32 Cookie; /* Cooke store in ev_Type */ -+ EP3_COOKIE_OPS *Operations; /* Cookie operations */ -+ void *Arg; /* Users arguement. */ -+} EP3_COOKIE; -+ -+#define EP3_COOKIE_HASH_SIZE (256) -+#define EP3_HASH_COOKIE(a) ((((a) >> 3) ^ ((a) >> 7) ^ ((a) >> 11)) & (EP3_COOKIE_HASH_SIZE-1)) -+ -+typedef struct ep3_cookie_table -+{ -+ spinlock_t Lock; -+ EP3_COOKIE *Entries[EP3_COOKIE_HASH_SIZE]; -+} EP3_COOKIE_TABLE; -+ -+#endif /* !defined(__ELAN3__) */ -+ -+#define EP3_EVENT_FREE ((1 << 4) | EV_WCOPY) -+#define EP3_EVENT_ACTIVE ((2 << 4) | EV_WCOPY) -+/* DONE == Cookie */ -+#define EP3_EVENT_FAILED ((3 << 4) | EV_WCOPY) -+#define EP3_EVENT_PRIVATE ((4 << 4) | EV_WCOPY) -+ -+/* The event cookie can get posted (and seen) before the write has */ -+/* hit main memory - in this case the event count is <= 0 and the block */ -+/* will be marked as ACTIVE - but could transition to DONE at any time */ -+/* Also for a word copy event, the value written into the "done" word */ -+/* can be the event interrupt cookie rather than the "source" value */ -+/* this happens since the uCode does not wait for the write to have */ -+/* occured before overwriting TMP_0 with the cookie */ -+#define EP3_EVENT_FIRING(edev, event, cookie, done) \ -+ (((((done) & ~(EV_TYPE_BCOPY | EV_TYPE_MASK_EVIRQ)) == (cookie).Cookie) || (done) == EP3_EVENT_ACTIVE) && \ -+ (int) elan3_sdram_readl (edev, (event) + offsetof (E3_BlockCopyEvent, ev_Count)) <= 0) -+#define EP3_EVENT_FIRED(cookie, done) \ -+ (((done) & ~(EV_TYPE_BCOPY | EV_TYPE_MASK_EVIRQ)) == (cookie).Cookie) -+ -+ -+/* Time limit to wait while event is firing and block write has not occured */ -+#define EP3_EVENT_FIRING_TLIMIT 16384 /* 1023 uS */ -+ -+#define EP3_INIT_COPY_EVENT(event, cookie, dest, intr) \ -+{ \ -+ (event).ev_Count = 0; \ -+ (event).ev_Type = (intr) ? EV_TYPE_BCOPY | EV_TYPE_EVIRQ | (cookie).Cookie : EV_TYPE_BCOPY; \ -+ (event).ev_Source = (cookie).Cookie | EV_WCOPY; \ -+ (event).ev_Dest = (dest) | EV_TYPE_BCOPY_WORD; \ -+} -+ -+#if !defined(__ELAN3__) -+ -+/* Generic input queues which can be polled */ -+typedef struct ep3_inputq -+{ -+ EP3_COOKIE q_cookie; -+ unsigned int q_slotSize; -+ unsigned int q_slotCount; -+ -+ void *q_slots; -+ EP_ADDR q_slotsAddr; -+ -+ EP_INPUTQ_CALLBACK *q_callback; -+ void *q_arg; -+ -+ sdramaddr_t q_desc; -+ E3_Addr q_descAddr; -+ -+ E3_Addr q_base; -+ E3_Addr q_top; -+ E3_Addr q_fptr; -+ -+ E3_uint32 q_waitCount; -+} EP3_INPUTQ; -+ -+typedef struct ep3_outputq -+{ -+ EP3_COOKIE q_cookie; -+ -+ unsigned int q_slotCount; /* # slots allocated */ -+ unsigned int q_slotSize; /* size of each slot (rounded up) */ -+ -+ sdramaddr_t q_elan; -+ E3_Addr q_elanAddr; -+ -+ void *q_main; -+ E3_Addr q_mainAddr; -+} EP3_OUTPUTQ; -+ -+#endif /* !defined(__ELAN3__) */ -+ -+/* per-rail elan memory portion of device */ -+typedef struct ep3_rail_elan -+{ -+ E3_uint16 ProbeSource0[TR_TRACEROUTE_ENTRIES]; /* 32 byte aligned */ -+ E3_uint16 ProbeSource1[TR_TRACEROUTE_ENTRIES]; -+ -+ E3_BlockCopyEvent ProbeDone; /* 16 byte aligned */ -+ E3_Event ProbeStart; /* 8 byte aligned */ -+ -+ E3_uint32 ProbeType; /* 4 byte aligned */ -+ E3_uint32 ProbeLevel; -+ -+ E3_uint32 NodeId; -+} EP3_RAIL_ELAN; -+ -+/* values for ProbeType */ -+#define PROBE_SINGLE 0 -+#define PROBE_MULTIPLE 1 -+/* number of attempts for each type */ -+#define PROBE_SINGLE_ATTEMPTS 10 -+#define PROBE_SINGLE_TIMEOUTS 5 -+#define PROBE_MULTIPLE_ATTEMPTS 20 -+#define PROBE_MULTIPLE_TIMEOUTS 10 -+ -+/* per-rail elan memory portsion of device */ -+typedef struct ep3_rail_main -+{ -+ E3_uint16 ProbeDest0[TR_TRACEROUTE_ENTRIES]; /* 32 byte aligned */ -+ E3_uint16 ProbeDest1[TR_TRACEROUTE_ENTRIES]; -+ -+ E3_uint32 ProbeDone; /* 4 byte aligned */ -+ E3_uint32 ProbeResult; -+ E3_uint32 ProbeLevel; -+} EP3_RAIL_MAIN; -+ -+#if !defined(__ELAN3__) -+ -+struct ep3_rail -+{ -+ EP_RAIL Generic; /* Generic rail */ -+ -+ ELAN3_DEV *Device; /* Elan device we're using */ -+ ELAN3_CTXT *Ctxt; /* Elan context struct */ -+ ioaddr_t CommandPort; /* commandport from context */ -+ E3_Addr CommandPortAddr; /* and address mapped into elan */ -+ -+ ELAN3_ROUTE_TABLE *RouteTable; /* routetable from context */ -+ ELAN3MMU *Elan3mmu; /* elanmmu from context */ -+ -+ EP3_COOKIE_TABLE CookieTable; /* Event cookie table */ -+ -+ EP_CODE ThreadCode; /* copy of thread code */ -+ unsigned int CommandPortEventTrap; /* flag to indicate command port eventint queue overflow trap */ -+ -+ sdramaddr_t RailElan; /* Elan visible main/sdram portions of */ -+ E3_Addr RailElanAddr; /* device structure */ -+ EP3_RAIL_MAIN *RailMain; -+ E3_Addr RailMainAddr; -+ -+ /* small system message queues */ -+ sdramaddr_t QueueDescs; /* Input Queue descriptors */ -+ -+ /* Network position prober */ -+ E3_Addr ProbeStack; /* Network position thread command structure */ -+ EP3_COOKIE ProbeCookie; /* event cookie for Done event */ -+ kcondvar_t ProbeWait; /* place to wait on probe thread */ -+ spinlock_t ProbeLock; /* and lock */ -+ volatile int ProbeDone; /* and flag to indicate it's done */ -+ -+ E3_uint16 ProbeDest0[TR_TRACEROUTE_ENTRIES]; /* last result of CheckNetworkPosition */ -+ E3_uint16 ProbeDest1[TR_TRACEROUTE_ENTRIES]; -+ E3_uint32 ProbeResult; -+ -+ long ProbeLevelTick[ELAN_MAX_LEVELS]; -+ long SwitchBroadcastLevelTick; -+ -+ /* rings for issueing dmas */ -+ EP3_DMA_RING DmaRings[EP3_NUM_RINGS]; -+ -+ /* retry lists for dmas */ -+ struct list_head DmaRetries[EP_NUM_RETRIES]; /* Dma retry lists */ -+ struct list_head DmaRetryFreeList; /* and free list */ -+ u_int DmaRetryCount; /* and total retry count */ -+ u_int DmaRetryReserved; /* and number reserved */ -+ u_int DmaRetryThreadShouldStall; /* count of reasons to stall retries */ -+ u_int DmaRetryThreadStarted:1; /* dma retry thread running */ -+ u_int DmaRetryThreadShouldStop:1; /* but should stop */ -+ u_int DmaRetryThreadStopped:1; /* and now it's stopped */ -+ u_int DmaRetryInitialised:1; /* have initialise dma retries */ -+ -+ spinlock_t DmaRetryLock; /* spinlock protecting lists */ -+ kcondvar_t DmaRetryWait; /* place retry thread sleeps */ -+ long DmaRetryTime; /* and when it will next wakeup */ -+ unsigned int DmaRetrySleeping; /* and it's sleeping there */ -+ -+ /* Network Identify Cookies */ -+ E3_uint32 *MainCookies; /* One cookie allocator per-node for main*/ -+ E3_Addr ElanCookies; /* and one for elan */ -+ spinlock_t CookieLock; /* spinlock to protect main cookies */ -+ -+ /* Halt operation flags for flushing. */ -+ kmutex_t HaltOpMutex; /* serialize access to halt operations */ -+ unsigned int HaltOpCompleted; /* flag to indicate halt operation completed */ -+ kcondvar_t HaltOpSleep; /* place to wait for it to complete */ -+ -+ /* Network error state */ -+ kcondvar_t NetworkErrorSleep; /* place to sleep for network error halt operation */ -+ u_int NetworkErrorFlushed; /* and flag to indicate flushed */ -+ -+ -+ EP3_RAIL_STATS Stats; /* statistics */ -+}; -+ -+/* support.c */ -+ -+extern ELAN3_OPS ep3_elan3_ops; -+ -+extern E3_uint32 LocalCookie (EP3_RAIL *rail, unsigned int remoteNode); -+extern E3_uint32 RemoteCookie (EP3_RAIL *rail, unsigned int remoteNode); -+ -+extern void InitialiseCookieTable (EP3_COOKIE_TABLE *table); -+extern void DestroyCookieTable (EP3_COOKIE_TABLE *table); -+extern void RegisterCookie (EP3_COOKIE_TABLE *table, EP3_COOKIE *cookie, -+ E3_Addr event, EP3_COOKIE_OPS *ops, void *arg); -+extern void DeregisterCookie (EP3_COOKIE_TABLE *table, EP3_COOKIE *cookie); -+extern EP3_COOKIE *LookupCookie (EP3_COOKIE_TABLE *table, uint32_t cookie); -+extern EP3_COOKIE *LookupEventCookie (EP3_RAIL *rail, EP3_COOKIE_TABLE *table, E3_Addr); -+ -+extern int DmaRingsCreate (EP3_RAIL *rail); -+extern void DmaRingsRelease (EP3_RAIL *rail); -+extern int IssueDma (EP3_RAIL *rail, E3_DMA_BE *dma, int type, int retryThread); -+ -+extern int IssueWaitevent (EP3_RAIL *rail, E3_Addr value); -+extern void IssueSetevent (EP3_RAIL *rail, E3_Addr value); -+extern void IssueRunThread (EP3_RAIL *rail, E3_Addr value); -+extern long DmaRetryTime (int type); -+extern int InitialiseDmaRetries (EP3_RAIL *rail); -+extern void DestroyDmaRetries (EP3_RAIL *rail); -+extern int ReserveDmaRetries (EP3_RAIL *rail, int count, EP_ATTRIBUTE attr); -+extern void ReleaseDmaRetries (EP3_RAIL *rail, int count); -+extern void StallDmaRetryThread (EP3_RAIL *rail); -+extern void ResumeDmaRetryThread (EP3_RAIL *rail); -+extern void QueueDmaForRetry (EP3_RAIL *rail, E3_DMA_BE *dma, int interval); -+extern void QueueDmaOnStalledList (EP3_RAIL *rail, E3_DMA_BE *dma); -+extern void FreeStalledDmas (EP3_RAIL *rail, unsigned int nodeId); -+ -+extern void SetQueueLocked(EP3_RAIL *rail, sdramaddr_t qaddr); -+ -+/* threadcode_elan3.c */ -+extern E3_Addr ep3_init_thread (ELAN3_DEV *dev, E3_Addr fn, E3_Addr addr, sdramaddr_t stack, -+ int stackSize, int nargs, ...); -+ -+/* probenetwork.c */ -+extern int ep3_init_probenetwork (EP3_RAIL *rail); -+extern void ep3_destroy_probenetwork (EP3_RAIL *rail); -+extern void ep3_probe_position_found (EP3_RAIL *rail, ELAN_POSITION *pos); -+extern int ep3_probe_route (EP_RAIL *r, int level, int sw, int nodeid, int *linkup, int *linkdown, int attempts, EP_SWITCH *lsw); -+extern int ep3_check_position (EP_RAIL *rail); -+ -+/* neterr_elan3.c */ -+extern void ep3_neterr_fixup (EP_RAIL *r, unsigned int nodeId, EP_NETERR_COOKIE *cookies); -+ -+/* kcomm_elan3.c */ -+extern EP_RAIL *ep3_create_rail (EP_SYS *sys, ELAN3_DEV *dev); -+extern void ep3_destroy_rail (EP_RAIL *rail); -+ -+extern int ep3_start_rail (EP_RAIL *rail); -+extern void ep3_stall_rail (EP_RAIL *rail); -+extern void ep3_stop_rail (EP_RAIL *rail); -+ -+extern void ep3_position_found (EP_RAIL *rail, ELAN_POSITION *pos); -+ -+extern sdramaddr_t ep3_sdram_alloc (EP_RAIL *rail, EP_ADDR addr, unsigned int size); -+extern void ep3_sdram_free (EP_RAIL *rail, sdramaddr_t addr, unsigned int size); -+extern void ep3_sdram_writeb (EP_RAIL *rail, sdramaddr_t addr, unsigned char val); -+ -+extern void ep3_flush_tlb (EP_RAIL *r); -+extern void ep3_load_system_route (EP_RAIL *r, unsigned int vp, unsigned int lowNode, unsigned int highNode); -+extern void ep3_load_node_route (EP_RAIL *r, unsigned int nodeId); -+extern void ep3_unload_node_route (EP_RAIL *r, unsigned int nodeId); -+extern void ep3_lower_filter (EP_RAIL *r, unsigned int nodeId); -+extern void ep3_raise_filter (EP_RAIL *rail, unsigned int nodeId); -+extern void ep3_node_disconnected (EP_RAIL *r, unsigned int nodeId); -+ -+extern void ep3_fillout_stats(EP_RAIL *rail, char *str); -+ -+/* kmap_elan3.c */ -+extern void ep3_kaddr_map (EP_RAIL *r, EP_ADDR eaddr, virtaddr_t kaddr, unsigned int len, unsigned int perm, int ep_attr); -+extern void ep3_sdram_map (EP_RAIL *r, EP_ADDR eaddr, sdramaddr_t saddr, unsigned int len, unsigned int perm, int ep_attr); -+extern void ep3_ioaddr_map (EP_RAIL *r, EP_ADDR eaddr, ioaddr_t ioaddr, unsigned int len, unsigned int perm); -+extern void ep3_unmap (EP_RAIL *r, EP_ADDR eaddr, unsigned int len); -+extern void *ep3_dvma_reserve (EP_RAIL *r, EP_ADDR eaddr, unsigned int npages); -+extern void ep3_dvma_release (EP_RAIL *r, EP_ADDR eaddr, unsigned int npages, void *private); -+extern void ep3_dvma_set_pte (EP_RAIL *r, void *private, unsigned int index, physaddr_t paddr, unsigned int perm); -+extern physaddr_t ep3_dvma_read_pte (EP_RAIL *r, void *private, unsigned int index); -+extern void ep3_dvma_unload (EP_RAIL *r, void *private, unsigned int index, unsigned int npages); -+ -+/* kmsg_elan3.c */ -+extern EP_INPUTQ *ep3_alloc_inputq (EP_RAIL *r, unsigned int qnum, unsigned int slotSize, unsigned int slotCount, -+ EP_INPUTQ_CALLBACK *callback, void *arg); -+extern void ep3_free_inputq (EP_RAIL *r, EP_INPUTQ *q); -+extern void ep3_enable_inputq (EP_RAIL *r, EP_INPUTQ *q); -+extern void ep3_disable_inputq (EP_RAIL *r, EP_INPUTQ *q); -+extern int ep3_poll_inputq (EP_RAIL *r, EP_INPUTQ *q, int maxCount, EP_INPUTQ_HANDLER *handler, void *arg); -+extern EP_OUTPUTQ *ep3_alloc_outputq (EP_RAIL *r, unsigned int slotSize, unsigned int slotCount); -+extern void ep3_free_outputq (EP_RAIL *r, EP_OUTPUTQ *q); -+extern void *ep3_outputq_msg (EP_RAIL *r, EP_OUTPUTQ *q, unsigned int slotNum); -+extern int ep3_outputq_state (EP_RAIL *r, EP_OUTPUTQ *q, unsigned int slotNum); -+extern int ep3_outputq_send (EP_RAIL *r, EP_OUTPUTQ *q, unsigned int slotNum, unsigned int size, -+ unsigned int nodeId, unsigned int qnum, unsigned int retries); -+ -+/* support_elan3.c */ -+extern void ep3_flush_filters (EP_RAIL *r); -+extern void ep3_flush_queues (EP_RAIL *r); -+ -+#endif /* !defined(__ELAN3__) */ -+ -+#endif /* __EP_KCOMM_ELAN3_H */ -diff -urN clean/drivers/net/qsnet/ep/kcomm_elan4.c linux-2.6.9/drivers/net/qsnet/ep/kcomm_elan4.c ---- clean/drivers/net/qsnet/ep/kcomm_elan4.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/kcomm_elan4.c 2004-11-30 07:02:06.000000000 -0500 -@@ -0,0 +1,526 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: kcomm_elan4.c,v 1.19 2004/11/30 12:02:06 mike Exp $ $Name: QSNETMODULES-5-11-3_20050907 $" -+/* $Source: /cvs/master/quadrics/epmod/kcomm_elan4.c,v $*/ -+ -+#include -+#include -+ -+#include -+ -+#include "kcomm_vp.h" -+#include "kcomm_elan4.h" -+#include "conf_linux.h" -+ -+extern EP_CODE threadcode_elan4; -+ -+unsigned int -+ep4_create_rails (EP_SYS *sys, unsigned int disabled) -+{ -+ unsigned int rmask = 0; -+ ELAN4_DEV *dev; -+ EP_RAIL *rail; -+ int i; -+ -+ for (i = 0; i < EP_MAX_RAILS; i++) -+ { -+ if ((dev = elan4_reference_device (i, ELAN4_STATE_STARTED)) != NULL) -+ { -+ if ((rail = ep4_create_rail (sys, dev)) == NULL) -+ elan4_dereference_device (dev); -+ else -+ { -+ if (disabled & (1 << rail->Number)) -+ printk ("%s: auto-start of device disabled by configuration\n", rail->Name); -+ else -+ ep_start_rail (rail); -+ -+ ep_procfs_rail_init(rail); -+ -+ rmask |= (1 << rail->Number); -+ } -+ } -+ } -+ -+ if (rmask) -+ qsnet_debug_alloc(); -+ -+ return rmask; -+} -+ -+EP_RAIL * -+ep4_create_rail (EP_SYS *sys, ELAN4_DEV *dev) -+{ -+ EP4_RAIL *rail; -+ int res; -+ -+ KMEM_ZALLOC (rail, EP4_RAIL *, sizeof (EP4_RAIL), 1); -+ -+ if (rail == NULL) -+ return (EP_RAIL *) NULL; -+ -+ if ((res = ep_init_rail (sys, &rail->r_generic)) != 0) -+ { -+ KMEM_FREE (rail, sizeof (EP4_RAIL)); -+ return (EP_RAIL *) NULL; -+ } -+ -+ rail->r_ctxt.ctxt_dev = dev; -+ -+ /* install our rail operations */ -+ rail->r_generic.Operations.DestroyRail = ep4_destroy_rail; -+ rail->r_generic.Operations.StartRail = ep4_start_rail; -+ rail->r_generic.Operations.StallRail = ep4_stall_rail; -+ rail->r_generic.Operations.StopRail = ep4_stop_rail; -+ -+ rail->r_generic.Operations.SdramAlloc = ep4_sdram_alloc; -+ rail->r_generic.Operations.SdramFree = ep4_sdram_free; -+ rail->r_generic.Operations.SdramWriteb = ep4_sdram_writeb; -+ -+ rail->r_generic.Operations.KaddrMap = ep4_kaddr_map; -+ rail->r_generic.Operations.SdramMap = ep4_sdram_map; -+ rail->r_generic.Operations.Unmap = ep4_unmap; -+ -+ rail->r_generic.Operations.DvmaReserve = ep4_dvma_reserve; -+ rail->r_generic.Operations.DvmaRelease = ep4_dvma_release; -+ rail->r_generic.Operations.DvmaSetPte = ep4_dvma_set_pte; -+ rail->r_generic.Operations.DvmaReadPte = ep4_dvma_read_pte; -+ rail->r_generic.Operations.DvmaUnload = ep4_dvma_unload; -+ rail->r_generic.Operations.FlushTlb = ep4_flush_tlb; -+ -+ rail->r_generic.Operations.ProbeRoute = ep4_probe_route; -+ -+ rail->r_generic.Operations.PositionFound = ep4_position_found; -+ rail->r_generic.Operations.CheckPosition = ep4_check_position; -+ rail->r_generic.Operations.NeterrFixup = ep4_neterr_fixup; -+ -+ rail->r_generic.Operations.LoadSystemRoute = ep4_load_system_route; -+ -+ rail->r_generic.Operations.LoadNodeRoute = ep4_load_node_route; -+ rail->r_generic.Operations.UnloadNodeRoute = ep4_unload_node_route; -+ rail->r_generic.Operations.LowerFilter = ep4_lower_filter; -+ rail->r_generic.Operations.RaiseFilter = ep4_raise_filter; -+ rail->r_generic.Operations.NodeDisconnected = ep4_node_disconnected; -+ -+ rail->r_generic.Operations.FlushFilters = ep4_flush_filters; -+ rail->r_generic.Operations.FlushQueues = ep4_flush_queues; -+ -+ rail->r_generic.Operations.AllocInputQ = ep4_alloc_inputq; -+ rail->r_generic.Operations.FreeInputQ = ep4_free_inputq; -+ rail->r_generic.Operations.EnableInputQ = ep4_enable_inputq; -+ rail->r_generic.Operations.DisableInputQ = ep4_disable_inputq; -+ rail->r_generic.Operations.PollInputQ = ep4_poll_inputq; -+ -+ rail->r_generic.Operations.AllocOutputQ = ep4_alloc_outputq; -+ rail->r_generic.Operations.FreeOutputQ = ep4_free_outputq; -+ rail->r_generic.Operations.OutputQMsg = ep4_outputq_msg; -+ rail->r_generic.Operations.OutputQState = ep4_outputq_state; -+ rail->r_generic.Operations.OutputQSend = ep4_outputq_send; -+ -+ rail->r_generic.Operations.FillOutStats = ep4_fillout_stats; -+ rail->r_generic.Operations.Debug = ep4_debug_rail; -+ -+ rail->r_generic.Devinfo = dev->dev_devinfo; -+ -+ printk ("%s: connected via elan4 rev%c device %d\n", rail->r_generic.Name, -+ 'a' + dev->dev_devinfo.dev_revision_id, dev->dev_instance); -+ -+ return (EP_RAIL *) rail; -+} -+ -+void -+ep4_destroy_rail (EP_RAIL *r) -+{ -+ EP4_RAIL *rail = (EP4_RAIL *) r; -+ -+ elan4_dereference_device (rail->r_ctxt.ctxt_dev); -+ -+ KMEM_FREE (rail, sizeof (EP4_RAIL)); -+} -+ -+static int -+ep4_attach_rail (EP4_RAIL *r) -+{ -+ EP4_RAIL *rail = (EP4_RAIL *) r; -+ ELAN4_DEV *dev = rail->r_ctxt.ctxt_dev; -+ unsigned ctx; -+ -+ if (elan4_insertctxt (dev, &rail->r_ctxt, &ep4_trap_ops) != 0) -+ return -ENOMEM; -+ -+ if ((rail->r_routetable = elan4_alloc_routetable (dev, 4)) == NULL) /* 512 << 4 == 8192 entries */ -+ { -+ elan4_removectxt (dev, &rail->r_ctxt); -+ return -ENOMEM; -+ } -+ elan4_set_routetable (&rail->r_ctxt, rail->r_routetable); -+ -+ /* Attach to the kernel comms nextwork context */ -+ if (elan4_attach_filter (&rail->r_ctxt, ELAN4_KCOMM_CONTEXT_NUM) < 0) -+ { -+ elan4_free_routetable (dev, rail->r_routetable); -+ elan4_removectxt (dev, &rail->r_ctxt); -+ -+ return -EBUSY; -+ } -+ -+ for (ctx = ELAN4_KCOMM_BASE_CONTEXT_NUM; ctx <= ELAN4_KCOMM_TOP_CONTEXT_NUM; ctx++) -+ elan4_attach_filter (&rail->r_ctxt, ctx); -+ -+ return 0; -+} -+ -+static void -+ep4_detach_rail (EP4_RAIL *rail) -+{ -+ ELAN4_DEV *dev = rail->r_ctxt.ctxt_dev; -+ unsigned ctx; -+ -+ elan4_detach_filter (&rail->r_ctxt, ELAN4_KCOMM_CONTEXT_NUM); -+ -+ for (ctx = ELAN4_KCOMM_BASE_CONTEXT_NUM; ctx <= ELAN4_KCOMM_TOP_CONTEXT_NUM; ctx++) -+ elan4_detach_filter (&rail->r_ctxt, ctx); -+ -+ if (rail->r_routetable) -+ { -+ elan4_set_routetable (&rail->r_ctxt, NULL); -+ elan4_free_routetable (dev, rail->r_routetable); -+ } -+ -+ elan4_removectxt (dev, &rail->r_ctxt); -+} -+ -+int -+ep4_start_rail (EP_RAIL *r) -+{ -+ EP4_RAIL *rail = (EP4_RAIL *) r; -+ ELAN4_DEV *dev = rail->r_ctxt.ctxt_dev; -+ E4_InputQueue qdesc; -+ int i, res; -+ -+ if ((res = ep4_attach_rail (rail)) < 0) -+ return res; -+ -+ /* Initialise main interrupt cookie table */ -+ spin_lock_init (&rail->r_intcookie_lock); -+ for (i = 0; i < EP4_INTCOOKIE_HASH_SIZE; i++) -+ INIT_LIST_HEAD (&rail->r_intcookie_hash[i]); -+ -+ kmutex_init (&rail->r_haltop_mutex); -+ kcondvar_init (&rail->r_haltop_sleep); -+ spin_lock_init (&rail->r_haltop_lock); -+ -+ spin_lock_init (&rail->r_cookie_lock); -+ -+ INIT_LIST_HEAD (&rail->r_ecq_list[EP4_ECQ_EVENT]); -+ INIT_LIST_HEAD (&rail->r_ecq_list[EP4_ECQ_ATOMIC]); -+ INIT_LIST_HEAD (&rail->r_ecq_list[EP4_ECQ_SINGLE]); -+ INIT_LIST_HEAD (&rail->r_ecq_list[EP4_ECQ_MAIN]); -+ spin_lock_init (&rail->r_ecq_lock); -+ -+ ep_kthread_init (&rail->r_retry_thread); -+ INIT_LIST_HEAD (&rail->r_retry_ops); -+ -+ INIT_LIST_HEAD (&rail->r_neterr_ops); -+ -+ kmutex_init (&rail->r_flush_mutex); -+ kcondvar_init (&rail->r_flush_sleep); -+ -+ /* Allocate the elan visible sdram/main memory */ -+ if ((rail->r_elan = ep_alloc_elan (&rail->r_generic, sizeof (EP4_RAIL_ELAN), 0, &rail->r_elan_addr)) == 0 || -+ (rail->r_main = ep_alloc_main (&rail->r_generic, sizeof (EP4_RAIL_MAIN), 0, &rail->r_main_addr)) == 0) -+ { -+ goto failed; -+ } -+ -+ for (i = 0; i < EP_NUM_SYSTEMQ; i++) -+ elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_qevents[i].ev_CountAndType), 0); -+ -+ elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_flush_event.ev_CountAndType), E4_EVENT_INIT_VALUE (0, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0)); -+ -+ /* Allocate the system input queues at their fixed elan address */ -+ /* avoid sdram address aliasing by allocating the min sdram pagesize */ -+ if (! (rail->r_queuedescs= ep_alloc_memory_elan (&rail->r_generic, EP_SYSTEM_QUEUE_BASE, SDRAM_PAGE_SIZE, EP_PERM_ALL, 0))) -+ goto failed; -+ -+ /* Initialise the input queue descriptor as "full" with no event */ -+ qdesc.q_bptr = 0; -+ qdesc.q_fptr = 8; -+ qdesc.q_control = E4_InputQueueControl(qdesc.q_bptr, qdesc.q_fptr, 8); -+ qdesc.q_event = 0; -+ -+ for (i = 0; i < EP_NUM_SYSTEMQ; i++) -+ elan4_sdram_copyq_to_sdram (dev, &qdesc, EP_SYSTEMQ_DESC (rail->r_queuedescs, i), sizeof (E4_InputQueue)); -+ -+ /* Allocate the resource map for command queue mappings */ -+ if ((rail->r_ecq_rmap = ep_rmallocmap (EP4_ECQ_RMAPSIZE, "r_ecq_rmap", 1)) == NULL) -+ goto failed; -+ -+ ep_rmfree (rail->r_ecq_rmap, EP4_ECQ_TOP - EP4_ECQ_BASE, EP4_ECQ_BASE); -+ -+ /* register an interrupt cookie & allocate command queues for command queue flushing */ -+ rail->r_flush_mcq = ep4_get_ecq (rail, EP4_ECQ_MAIN, 4); -+ rail->r_flush_ecq = ep4_get_ecq (rail, EP4_ECQ_EVENT, 1); -+ -+ if (rail->r_flush_mcq == NULL || rail->r_flush_ecq == NULL) -+ goto failed; -+ -+ ep4_register_intcookie (rail, &rail->r_flush_intcookie, rail->r_elan_addr + offsetof (EP4_RAIL_ELAN, r_flush_event), ep4_flush_interrupt, rail); -+ -+ /* startup the retry thread */ -+ if (kernel_thread_create (ep4_retry_thread, (void *) rail) == 0) -+ goto failed; -+ ep_kthread_started (&rail->r_retry_thread); -+ -+ ep4_initialise_dma_retries (rail); -+ -+ if ((rail->r_event_ecq = ep4_alloc_ecq (rail, CQ_Size1K)) == NULL) -+ goto failed; -+ -+ rail->r_threadcode = threadcode_elan4; -+ if (ep_loadcode (&rail->r_generic, &rail->r_threadcode)) -+ goto failed; -+ -+ elan4_flush_icache (&rail->r_ctxt); -+ -+ if (ep4_probe_init (rail)) -+ goto failed; -+ -+ /* can now drop the context filter for the system context */ -+ elan4_set_filter (&rail->r_ctxt, ELAN4_KCOMM_CONTEXT_NUM, E4_FILTER_HIGH_PRI); -+ -+ return 0; -+ -+ failed: -+ printk ("ep4_start_rail: failed for rail '%s'\n", rail->r_generic.Name); -+ ep4_stop_rail (&rail->r_generic); -+ -+ return -ENOMEM; -+} -+ -+void -+ep4_stall_rail (EP_RAIL *r) -+{ -+ EP4_RAIL *rail = (EP4_RAIL *) r; -+ unsigned ctx; -+ -+ /* Raise all the context filters */ -+ elan4_set_filter (&rail->r_ctxt, ELAN4_KCOMM_CONTEXT_NUM, E4_FILTER_DISCARD_ALL); -+ -+ for (ctx = ELAN4_KCOMM_BASE_CONTEXT_NUM; ctx <= ELAN4_KCOMM_TOP_CONTEXT_NUM; ctx++) -+ elan4_set_filter (&rail->r_ctxt, ctx, E4_FILTER_DISCARD_ALL); -+} -+ -+void -+ep4_stop_rail (EP_RAIL *r) -+{ -+ EP4_RAIL *rail = (EP4_RAIL *) r; -+ -+ if (rail->r_generic.State == EP_RAIL_STATE_RUNNING) /* undo ep4_position_found() */ -+ { -+ ELAN_POSITION *pos = &rail->r_generic.Position; -+ EP_ADDR addr = elan4_sdram_readq (rail->r_ctxt.ctxt_dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_cookies)); -+ -+ ep_free_elan (&rail->r_generic, addr, pos->pos_nodes * sizeof (E4_uint64)); -+ -+ KMEM_FREE (rail->r_cookies, pos->pos_nodes * sizeof (E4_uint64)); -+ } -+ -+ ep4_probe_destroy (rail); -+ -+ ep_unloadcode (&rail->r_generic, &rail->r_threadcode); -+ -+ if (rail->r_event_ecq) -+ ep4_free_ecq (rail, rail->r_event_ecq); -+ rail->r_event_ecq = NULL; -+ -+ ep4_finalise_dma_retries (rail); -+ -+ ep_kthread_stop (&rail->r_retry_thread); -+ ep_kthread_destroy (&rail->r_retry_thread); -+ -+ if (rail->r_flush_intcookie.int_arg) -+ ep4_deregister_intcookie (rail, &rail->r_flush_intcookie); -+ rail->r_flush_intcookie.int_arg = NULL; -+ -+ if (rail->r_flush_mcq) -+ ep4_put_ecq (rail, rail->r_flush_mcq, 4); -+ rail->r_flush_mcq = NULL; -+ -+ if (rail->r_flush_ecq) -+ ep4_put_ecq (rail, rail->r_flush_ecq, 1); -+ rail->r_flush_ecq = NULL; -+ -+ if (rail->r_ecq_rmap) -+ ep_rmfreemap (rail->r_ecq_rmap); -+ -+ if (rail->r_queuedescs) -+ ep_free_memory_elan (&rail->r_generic, EP_SYSTEM_QUEUE_BASE); -+ rail->r_queuedescs = 0; -+ -+ if (rail->r_elan) -+ ep_free_elan (&rail->r_generic, rail->r_elan_addr, sizeof (EP4_RAIL_ELAN)); -+ rail->r_elan = 0; -+ -+ if (rail->r_main) -+ ep_free_main (&rail->r_generic, rail->r_main_addr, sizeof (EP4_RAIL_MAIN)); -+ rail->r_main = NULL; -+ -+ kcondvar_destroy (&rail->r_flush_sleep); -+ kmutex_destroy (&rail->r_flush_mutex); -+ -+ spin_lock_destroy (&rail->r_ecq_lock); -+ spin_lock_destroy (&rail->r_cookie_lock); -+ -+ spin_lock_destroy (&rail->r_haltop_lock); -+ kcondvar_destroy(&rail->r_haltop_sleep); -+ kmutex_destroy (&rail->r_haltop_mutex); -+ spin_lock_destroy (&rail->r_intcookie_lock); -+ -+ ep4_detach_rail (rail); -+} -+ -+void -+ep4_position_found (EP_RAIL *r, ELAN_POSITION *pos) -+{ -+ EP4_RAIL *rail = (EP4_RAIL *) r; -+ sdramaddr_t cookies; -+ EP_ADDR addr; -+ int i; -+ -+ KMEM_ZALLOC (rail->r_cookies, E4_uint64 *, pos->pos_nodes * sizeof (E4_uint64), 1); -+ -+ if (! (cookies = ep_alloc_elan (&rail->r_generic, pos->pos_nodes * sizeof (E4_uint64), 0, &addr))) -+ panic ("ep4_position_found: cannot allocate elan cookies array\n"); -+ -+ for (i = 0; i < pos->pos_nodes; i++) -+ elan4_sdram_writeq (rail->r_ctxt.ctxt_dev, cookies + (i * sizeof (E4_uint64)), 0); -+ -+ for (i = 0; i < pos->pos_nodes; i++) -+ rail->r_cookies[i] = 0; -+ -+ elan4_sdram_writeq (rail->r_ctxt.ctxt_dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_nodeid), pos->pos_nodeid); -+ elan4_sdram_writeq (rail->r_ctxt.ctxt_dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_cookies), addr); -+ -+ ep4_probe_position_found (rail, pos); -+} -+ -+sdramaddr_t -+ep4_sdram_alloc (EP_RAIL *r, EP_ADDR addr, unsigned size) -+{ -+ ELAN4_DEV *dev = ((EP4_RAIL *) r)->r_ctxt.ctxt_dev; -+ -+ if (size >= SDRAM_PAGE_SIZE) -+ return elan4_sdram_alloc (dev, size); -+ else -+ { -+ sdramaddr_t block = elan4_sdram_alloc (dev, SDRAM_PAGE_SIZE); -+ sdramaddr_t sdram = block + (addr & (SDRAM_PAGE_SIZE-1)); -+ -+ /* free of the portion before sdram */ -+ if (sdram > block) -+ elan4_sdram_free (dev, block, sdram - block); -+ -+ /* free of the portion after sdram */ -+ if ((block + SDRAM_PAGE_SIZE) > (sdram + size)) -+ elan4_sdram_free (dev, sdram + size, block + SDRAM_PAGE_SIZE - (sdram + size)); -+ -+ return sdram; -+ } -+} -+ -+void -+ep4_sdram_free (EP_RAIL *r, sdramaddr_t addr, unsigned size) -+{ -+ elan4_sdram_free (((EP4_RAIL *) r)->r_ctxt.ctxt_dev, addr, size); -+} -+ -+void -+ep4_sdram_writeb (EP_RAIL *r, sdramaddr_t addr, unsigned char val) -+{ -+ elan4_sdram_writeb (((EP4_RAIL *) r)->r_ctxt.ctxt_dev, addr, val); -+} -+ -+void -+ep4_flush_tlb (EP_RAIL *r) -+{ -+ elan4mmu_flush_tlb (((EP4_RAIL *) r)->r_ctxt.ctxt_dev); -+} -+ -+void -+ep4_load_system_route (EP_RAIL *r, unsigned vp, unsigned lowNode, unsigned highNode) -+{ -+ EP4_RAIL *rail = (EP4_RAIL *) r; -+ ELAN4_DEV *dev = rail->r_ctxt.ctxt_dev; -+ E4_VirtualProcessEntry route; -+ -+ if (elan4_generate_route (&rail->r_generic.Position, &route, ELAN4_KCOMM_CONTEXT_NUM, -+ lowNode, highNode, FIRST_SYSTEM_PACKET | FIRST_HIGH_PRI | FIRST_TIMEOUT(3)) < 0) -+ { -+ panic ("ep4_load_system_route: generate route failed\n"); -+ /* NOTREACHED */ -+ } -+ -+ elan4_write_route (dev, rail->r_routetable, vp, &route); -+} -+ -+void -+ep4_load_node_route (EP_RAIL *r, unsigned nodeId) -+{ -+ EP4_RAIL *rail = (EP4_RAIL *) r; -+ ELAN4_DEV *dev = rail->r_ctxt.ctxt_dev; -+ E4_VirtualProcessEntry route; -+ -+ if (elan4_generate_route (&rail->r_generic.Position, &route, EP4_CONTEXT_NUM(rail->r_generic.Position.pos_nodeid), -+ nodeId, nodeId, FIRST_SYSTEM_PACKET | FIRST_TIMEOUT(3)) < 0) -+ { -+ panic ("ep4_load_node_route: generate route failed\n"); -+ /* NOTREACHED */ -+ } -+ -+ elan4_write_route (dev, rail->r_routetable, EP_VP_DATA(nodeId), &route); -+} -+ -+void -+ep4_unload_node_route (EP_RAIL *r, unsigned nodeId) -+{ -+ EP4_RAIL *rail = (EP4_RAIL *) r; -+ ELAN4_DEV *dev = rail->r_ctxt.ctxt_dev; -+ -+ elan4_invalidate_route (dev, rail->r_routetable, EP_VP_DATA(nodeId)); -+} -+ -+void -+ep4_lower_filter (EP_RAIL *r, unsigned nodeId) -+{ -+ EP4_RAIL *rail = (EP4_RAIL *) r; -+ -+ elan4_set_filter (&rail->r_ctxt, EP4_CONTEXT_NUM(nodeId), E4_FILTER_HIGH_PRI); -+} -+ -+void -+ep4_raise_filter (EP_RAIL *r, unsigned nodeId) -+{ -+ EP4_RAIL *rail = (EP4_RAIL *) r; -+ -+ elan4_set_filter (&rail->r_ctxt, EP4_CONTEXT_NUM(nodeId), E4_FILTER_DISCARD_ALL); -+} -+ -+void -+ep4_node_disconnected (EP_RAIL *r, unsigned nodeId) -+{ -+ ep4_free_stalled_dmas ((EP4_RAIL *) r, nodeId); -+} -+ -+void -+ep4_fillout_stats(EP_RAIL *r, char *str) -+{ -+ /* no stats here yet */ -+ /* EP4_RAIL *ep4rail = (EP4_RAIL *)r; */ -+} -diff -urN clean/drivers/net/qsnet/ep/kcomm_elan4.h linux-2.6.9/drivers/net/qsnet/ep/kcomm_elan4.h ---- clean/drivers/net/qsnet/ep/kcomm_elan4.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/kcomm_elan4.h 2005-07-20 08:01:34.000000000 -0400 -@@ -0,0 +1,443 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __EP_KCOMM_ELAN4_H -+#define __EP_KCOMM_ELAN4_H -+ -+#ident "@(#)$Id: kcomm_elan4.h,v 1.19.2.1 2005/07/20 12:01:34 mike Exp $ $Name: QSNETMODULES-5-11-3_20050907 $" -+/* $Source: /cvs/master/quadrics/epmod/kcomm_elan4.h,v $*/ -+ -+#include -+ -+#include -+#include -+#include -+ -+#if !defined(__elan4__) -+#include -+#endif /* ! defined(__elan4__) */ -+ -+/* private address allocation */ -+#define EP4_TEXT_BASE 0xF8000000 /* base address for thread code (defined in makerules.elan4) */ -+#define EP4_ECQ_BASE 0xFF000000 /* address space for mapping command queues */ -+#define EP4_ECQ_TOP 0xFF800000 -+ -+#define EP4_ECQ_RMAPSIZE 128 -+#define EP4_STACK_SIZE 1024 /* default thread code stack size */ -+#define EP4_MAX_LEVELS 8 /* same as ELAN_MAX_LEVELS */ -+ -+/* context number allocation */ -+#define EP4_CONTEXT_NUM(nodeId) (ELAN4_KCOMM_BASE_CONTEXT_NUM + (nodeId)) -+#define EP4_CONTEXT_ISDATA(ctx) ((ctx) >= ELAN4_KCOMM_BASE_CONTEXT_NUM && \ -+ (ctx) <= ELAN4_KCOMM_TOP_CONTEXT_NUM) -+#define EP4_CONTEXT_TO_NODE(ctx) ((ctx) - ELAN4_KCOMM_BASE_CONTEXT_NUM) -+ -+/* -+ * network error cookie format: -+ * ------------------------------------------------- -+ * | unique cookie value | Remote | DMA | Location | -+ * ------------------------------------------------- -+ * [63:4] Cookie - unique cookie number -+ * [3] Thread - cookie generated by thread code -+ * [2] Remote - cookie generated by remote end -+ * [1] STEN - cookie is for a STEN packet -+ * [0] DMA - cookie is for a DMA -+ */ -+#define EP4_COOKIE_DMA (1 << 0) -+#define EP4_COOKIE_STEN (1 << 1) -+#define EP4_COOKIE_REMOTE (1 << 2) -+#define EP4_COOKIE_THREAD (1 << 3) -+#define EP4_COOKIE_INC (1ull << 4) -+ -+#define EP4_COOKIE_STRING(val) (long long)(((val) & ~(EP4_COOKIE_INC-1)) >> 4), \ -+ ((val) & EP4_COOKIE_DMA) ? ",dma" : "", \ -+ ((val) & EP4_COOKIE_REMOTE) ? ",remote" : "", \ -+ ((val) & EP4_COOKIE_THREAD) ? ",thread" : "", \ -+ ((val) & EP4_COOKIE_STEN) ? ",sten" : "" -+/* -+ * Done "word" values -+ */ -+#define EP4_STATE_FREE 0 -+#define EP4_STATE_ACTIVE 1 -+#define EP4_STATE_FINISHED 2 -+#define EP4_STATE_FAILED 3 -+#define EP4_STATE_PRIVATE 4 -+ -+#define EP4_EVENT_FIRING_TLIMIT 16384 /* 1023 uS */ -+ -+/* forward declarations */ -+typedef struct ep4_rail EP4_RAIL; -+ -+#if !defined(__elan4__) -+ -+typedef struct ep4_intcookie -+{ -+ struct list_head int_link; -+ E4_uint64 int_val; -+ void (*int_callback)(EP4_RAIL *rail, void *arg); -+ void *int_arg; -+} EP4_INTCOOKIE; -+ -+#define EP4_INTCOOKIE_HASH_SIZE 256 -+#define EP4_INTCOOKIE_HASH(a) ((((a) >> 3) ^ ((a) >> 7) ^ ((a) >> 11)) & (EP4_INTCOOKIE_HASH_SIZE-1)) -+ -+typedef struct ep4_ecq -+{ -+ struct list_head ecq_link; /* linked on r_ecq_list */ -+ ELAN4_INTOP ecq_intop; /* main interrupt op space */ -+ ELAN4_CQ *ecq_cq; /* command queue */ -+ E4_Addr ecq_addr; /* address mapped into elan */ -+ unsigned int ecq_avail; /* # dwords still available */ -+ -+ spinlock_t ecq_lock; /* spinlock for main accesses */ -+ sdramaddr_t ecq_event; /* event for flushing "event" queues */ -+ EP_ADDR ecq_event_addr; -+ struct ep4_ecq *ecq_flushcq; /* and command port to issue setevent to */ -+} EP4_ECQ; -+ -+#define EP4_ECQ_EVENT 0 /* command queues targetted by multi-blocks events */ -+#define EP4_ECQ_ATOMIC 1 /* command queues targetted by atomic store operations */ -+#define EP4_ECQ_SINGLE 2 /* command queues targetted by single word commands from main */ -+#define EP4_ECQ_MAIN 3 /* command queues targetted by multi word commands from main */ -+#define EP4_NUM_ECQ 4 -+ -+#define EP4_ECQ_Size(which) ((which) == EP4_ECQ_EVENT ? CQ_Size64K : \ -+ (which) == EP4_ECQ_ATOMIC ? CQ_Size8K : \ -+ (which) == EP4_ECQ_SINGLE ? CQ_Size1K : \ -+ (which) == EP4_ECQ_MAIN ? CQ_Size8K : \ -+ CQ_Size1K) -+ -+typedef struct ep4_dma_retry -+{ -+ struct list_head retry_link; /* chained on free/retry list */ -+ unsigned long retry_time; /* "lbolt" to retry at */ -+ E4_DMA retry_dma; /* DMA (in main memory) */ -+} EP4_DMA_RETRY; -+ -+#define EP4_DMA_RETRY_CQSIZE CQ_Size8K /* size of command queue for dma retry */ -+#define EP4_DMA_RETRY_FLOWCNT (CQ_Size(EP4_DMA_RETRY_CQSIZE)/72) /* # of reissued DMA's which can fit in */ -+ -+typedef struct ep4_inputq -+{ -+ EP4_INTCOOKIE q_intcookie; -+ unsigned int q_slotSize; -+ unsigned int q_slotCount; -+ -+ void *q_slots; -+ EP_ADDR q_slotsAddr; -+ -+ EP_INPUTQ_CALLBACK *q_callback; -+ void *q_arg; -+ -+ sdramaddr_t q_desc; -+ EP_ADDR q_descAddr; -+ EP_ADDR q_eventAddr; -+ EP4_ECQ *q_wcq; /* command queue to issue waitevent to */ -+ EP4_ECQ *q_ecq; /* command queue targetted by event to generate interrupt */ -+ -+ EP_ADDR q_fptr; /* cached current front pointer */ -+ EP_ADDR q_last; /* elan addr for last queue slot */ -+ -+ atomic_t q_fired; /* atomic flag that interrupt received */ -+ unsigned int q_count; /* count of slots consumed */ -+} EP4_INPUTQ; -+ -+typedef struct ep4_outputq -+{ -+ spinlock_t q_lock; -+ unsigned int q_slotCount; -+ unsigned int q_slotSize; -+ unsigned int q_dwords; -+ ELAN4_CQ *q_cq; -+ void *q_main; -+ EP_ADDR q_mainAddr; -+ unsigned int q_retries; -+} EP4_OUTPUTQ; -+ -+#endif /* ! defined(__elan4__) */ -+ -+typedef struct ep4_check_sten -+{ -+ E4_uint64 c_reset_event_cmd; /* WRITEDWORD to reset start event */ -+ E4_uint64 c_reset_event_value; -+ -+ E4_uint64 c_open; /* OPEN VP_PROBE(lvl) */ -+ E4_uint64 c_trans_traceroute0; /* SENDTRANS TR_TRACEROUTE 0s */ -+ E4_uint64 c_addr_traceroute0; -+ E4_uint64 c_data_traceroute0[8]; -+ E4_uint64 c_trans_traceroute1; /* SENDTRANS TR_TRACEROUTE 1s */ -+ E4_uint64 c_addr_traceroute1; -+ E4_uint64 c_data_traceroute1[8]; -+ E4_uint64 c_trans_sendack; /* SENDTRANS SENDACK */ -+ E4_uint64 c_addr_sendack; -+ -+ E4_uint64 c_guard_ok; /* GUARD OK - write level */ -+ E4_uint64 c_writedword_ok; -+ E4_uint64 c_value_ok; -+ -+ E4_uint64 c_guard_fail; /* GUARD FAIL - chain setevent/write fail */ -+ E4_uint64 c_setevent_fail; -+ E4_uint64 c_setevent_nop; -+ E4_uint64 c_nop_pad; -+} EP4_CHECK_STEN; -+ -+#define EP4_CHECK_STEN_NDWORDS (sizeof (EP4_CHECK_STEN) >> 3) -+ -+typedef struct ep4_rail_elan -+{ -+ EP4_CHECK_STEN r_check_sten[EP4_MAX_LEVELS]; -+ E4_Event32 r_check_fail; /* Check failed (== r_check_start[-1]) */ -+ E4_Event32 r_check_start[EP4_MAX_LEVELS]; -+ -+ E4_Event32 r_qevents[EP_NUM_SYSTEMQ]; -+ E4_Event32 r_flush_event; -+ -+ E4_uint64 r_nodeid; -+#ifdef __elan4__ -+ E4_uint64 *r_cookies; -+#else -+ E4_Addr r_cookies; -+#endif -+} EP4_RAIL_ELAN; -+ -+#define TRACEROUTE_ENTRIES 16 /* 2 * ELAN_MAX_LEVELS */ -+#define TRACEROUTE_NDWORDS (TRACEROUTE_ENTRIES/2) -+ -+typedef struct ep4_rail_main -+{ -+ E4_uint32 r_probe_dest0[TRACEROUTE_ENTRIES]; -+ E4_uint32 r_probe_dest1[TRACEROUTE_ENTRIES]; -+ E4_uint64 r_probe_result; -+ E4_uint64 r_probe_level; -+ -+ E4_uint64 r_dma_flowcnt; /* count of dma's queued */ -+} EP4_RAIL_MAIN; -+ -+#define EP4_PROBE_ACTIVE (0xffff) -+#define EP4_PROBE_FAILED (0xfffe) -+ -+#if !defined(__elan4__) -+ -+typedef struct ep4_retry_ops -+{ -+ struct list_head op_link; -+ unsigned long (*op_func)(EP4_RAIL *rail, void *arg, unsigned long nextRunTime); -+ void *op_arg; -+} EP4_RETRY_OPS; -+ -+typedef struct ep4_neterr_ops -+{ -+ struct list_head op_link; -+ void (*op_func) (EP4_RAIL *rail, void *arg, unsigned int nodeId, EP_NETERR_COOKIE *cookies); -+ void *op_arg; -+} EP4_NETERR_OPS; -+ -+struct ep4_rail -+{ -+ EP_RAIL r_generic; -+ ELAN4_CTXT r_ctxt; -+ ELAN4_ROUTE_TABLE *r_routetable; -+ -+ spinlock_t r_intcookie_lock; -+ struct list_head r_intcookie_hash[EP4_INTCOOKIE_HASH_SIZE]; -+ -+ sdramaddr_t r_elan; -+ EP_ADDR r_elan_addr; -+ EP4_RAIL_MAIN *r_main; -+ EP_ADDR r_main_addr; -+ -+ EP_CODE r_threadcode; /* copy of thread code */ -+ -+ sdramaddr_t r_queuedescs; /* systemq queue descriptors */ -+ -+ E4_uint64 *r_cookies; /* network error cookies */ -+ spinlock_t r_cookie_lock; /* and spin lock */ -+ -+ kcondvar_t r_probe_wait; /* network position probing */ -+ spinlock_t r_probe_lock; -+ volatile int r_probe_done; -+ EP4_INTCOOKIE r_probe_intcookie; -+ EP4_ECQ *r_probe_cq; -+ E4_uint32 r_probe_source0[TRACEROUTE_ENTRIES]; -+ E4_uint32 r_probe_source1[TRACEROUTE_ENTRIES]; -+ -+ kmutex_t r_haltop_mutex; /* halt/flush operations */ -+ ELAN4_HALTOP r_haltop; -+ ELAN4_DMA_FLUSHOP r_flushop; -+ kcondvar_t r_haltop_sleep; -+ spinlock_t r_haltop_lock; -+ -+ struct list_head r_ecq_list[EP4_NUM_ECQ]; /* list of statically allocated command queues */ -+ EP_RMAP *r_ecq_rmap; /* resource map for command queue mappings */ -+ spinlock_t r_ecq_lock; /* spinlock for list/space management */ -+ -+ kmutex_t r_flush_mutex; /* serialize command queue flushing */ -+ unsigned long r_flush_count; /* # setevents issued for flushing */ -+ EP4_ECQ *r_flush_mcq; /* and command queue for waitevent */ -+ EP4_ECQ *r_flush_ecq; /* and command queue for interrupt */ -+ EP4_INTCOOKIE r_flush_intcookie; /* and interrupt cookie */ -+ kcondvar_t r_flush_sleep; /* and place to sleep ... */ -+ -+ EP_KTHREAD r_retry_thread; /* retry thread */ -+ struct list_head r_retry_ops; /* list of retry operations */ -+ -+ EP4_RETRY_OPS r_dma_ops; /* dma retry operations */ -+ EP4_ECQ *r_dma_ecq; /* command queue to reissue DMAs */ -+ E4_uint64 r_dma_flowcnt; /* count of dma's reissued */ -+ struct list_head r_dma_retrylist[EP_NUM_RETRIES]; /* retry lists */ -+ struct list_head r_dma_freelist; /* and free list */ -+ spinlock_t r_dma_lock; /* and spinlock to protect lists */ -+ unsigned long r_dma_allocated; /* # retries allocated*/ -+ unsigned long r_dma_reserved; /* # retries reserved */ -+ -+ EP4_ECQ *r_event_ecq; /* command queue for occasional setevents */ -+ -+ struct list_head r_neterr_ops; /* list of neterr fixup operations */ -+ -+ ELAN4_IPROC_TRAP r_iproc_trap; -+ ELAN4_TPROC_TRAP r_tproc_trap; -+} ; -+ -+#define EP4_CTXT_TO_RAIL(ctxt) ((EP4_RAIL *) (((unsigned long) (ctxt)) - offsetof (EP4_RAIL, r_ctxt))) -+ -+#if defined(DEBUG_ASSERT) -+#define EP4_ASSERT(rail,EXPR) EP_ASSERT(&((rail)->r_generic), EXPR) -+#define EP4_SDRAM_ASSERT(rail,off,value) EP4_ASSERT(rail, (sdram_assert ? elan4_sdram_readq ((rail)->r_ctxt.ctxt_dev, (off)) == (value) : 1)) -+#else -+#define EP4_ASSERT(rail,EXPR) -+#define EP4_SDRAM_ASSERT(rail,off,value) -+#endif -+ -+/* kcomm_elan4.c */ -+extern EP_RAIL *ep4_create_rail (EP_SYS *sys, ELAN4_DEV *dev); -+extern void ep4_destroy_rail (EP_RAIL *rail); -+ -+extern int ep4_start_rail (EP_RAIL *rail); -+extern void ep4_stall_rail (EP_RAIL *rail); -+extern void ep4_stop_rail (EP_RAIL *rail); -+ -+extern void ep4_debug_rail (EP_RAIL *rail); -+ -+extern void ep4_position_found (EP_RAIL *rail, ELAN_POSITION *pos); -+ -+extern sdramaddr_t ep4_sdram_alloc (EP_RAIL *rail, EP_ADDR addr, unsigned int size); -+extern void ep4_sdram_free (EP_RAIL *rail, sdramaddr_t addr, unsigned int size); -+extern void ep4_sdram_writeb (EP_RAIL *rail, sdramaddr_t addr, unsigned char val); -+ -+extern void ep4_flush_tlb (EP_RAIL *r); -+extern void ep4_load_system_route (EP_RAIL *r, unsigned int vp, unsigned int lowNode, unsigned int highNode); -+extern void ep4_load_node_route (EP_RAIL *r, unsigned int nodeId); -+extern void ep4_unload_node_route (EP_RAIL *r, unsigned int nodeId); -+extern void ep4_lower_filter (EP_RAIL *r, unsigned int nodeId); -+extern void ep4_raise_filter (EP_RAIL *rail, unsigned int nodeId); -+extern void ep4_node_disconnected (EP_RAIL *r, unsigned int nodeId); -+ -+/* kmap_elan4.c */ -+extern void ep4_kaddr_map (EP_RAIL *r, EP_ADDR eaddr, virtaddr_t kaddr, unsigned int len, unsigned int perm, int ep_attr); -+extern void ep4_sdram_map (EP_RAIL *r, EP_ADDR eaddr, sdramaddr_t saddr, unsigned int len, unsigned int perm, int ep_attr); -+extern void ep4_cport_map (EP_RAIL *r, EP_ADDR eaddr, unsigned long cqaddr, unsigned int len, unsigned int perm); -+extern void ep4_unmap (EP_RAIL *r, EP_ADDR eaddr, unsigned int len); -+extern void *ep4_dvma_reserve (EP_RAIL *r, EP_ADDR eaddr, unsigned int npages); -+extern void ep4_dvma_release (EP_RAIL *r, EP_ADDR eaddr, unsigned int npages, void *private); -+extern void ep4_dvma_set_pte (EP_RAIL *r, void *private, unsigned int index, physaddr_t paddr, unsigned int perm); -+extern physaddr_t ep4_dvma_read_pte (EP_RAIL *r, void *private, unsigned int index); -+extern void ep4_dvma_unload (EP_RAIL *r, void *private, unsigned int index, unsigned int npages); -+ -+/* kmsg_elan4.c */ -+extern EP_INPUTQ *ep4_alloc_inputq (EP_RAIL *r, unsigned int qnum, unsigned int slotSize, unsigned int slotCount, -+ EP_INPUTQ_CALLBACK *callback, void *arg); -+extern void ep4_free_inputq (EP_RAIL *r, EP_INPUTQ *q); -+extern void ep4_enable_inputq (EP_RAIL *r, EP_INPUTQ *q); -+extern void ep4_disable_inputq (EP_RAIL *r, EP_INPUTQ *q); -+extern int ep4_poll_inputq (EP_RAIL *r, EP_INPUTQ *q, int maxCount, EP_INPUTQ_HANDLER *handler, void *arg); -+extern EP_OUTPUTQ *ep4_alloc_outputq (EP_RAIL *r, unsigned int slotSize, unsigned int slotCount); -+extern void ep4_free_outputq (EP_RAIL *r, EP_OUTPUTQ *q); -+extern void *ep4_outputq_msg (EP_RAIL *r, EP_OUTPUTQ *q, unsigned int slotNum); -+extern int ep4_outputq_state (EP_RAIL *r, EP_OUTPUTQ *q, unsigned int slotNum); -+extern int ep4_outputq_send (EP_RAIL *r, EP_OUTPUTQ *q, unsigned int slotNum, unsigned int size, -+ unsigned int nodeId, unsigned int qnum, unsigned int retries); -+ -+/* probenetwork_elan4.c */ -+extern int ep4_probe_init (EP4_RAIL *r); -+extern void ep4_probe_destroy (EP4_RAIL *r); -+extern void ep4_probe_position_found (EP4_RAIL *rail, ELAN_POSITION *pos); -+extern int ep4_probe_route (EP_RAIL *r, int level, int sw, int nodeid, int *linkup, int *linkdown, int attempts, EP_SWITCH *lsw); -+extern int ep4_check_position (EP_RAIL *rail); -+ -+/* support_elan4.c */ -+extern ELAN4_TRAP_OPS ep4_trap_ops; -+extern void ep4_register_intcookie (EP4_RAIL *rail, EP4_INTCOOKIE *cp, E4_uint64 cookie, void (*callback)(EP4_RAIL *r, void *arg), void *arg); -+extern void ep4_deregister_intcookie (EP4_RAIL *rail, EP4_INTCOOKIE *cp); -+extern EP4_INTCOOKIE *ep4_lookup_intcookie (EP4_RAIL *rail, E4_uint64 cookie); -+extern E4_uint64 ep4_neterr_cookie (EP4_RAIL *rail, unsigned int node); -+ -+extern void ep4_flush_filters (EP_RAIL *r); -+extern void ep4_flush_queues (EP_RAIL *r); -+extern void ep4_write_qdesc (EP4_RAIL *rail, sdramaddr_t qaddr, E4_InputQueue *qdesc); -+ -+extern EP4_ECQ *ep4_alloc_ecq (EP4_RAIL *rail, unsigned int cqsize); -+extern void ep4_free_ecq (EP4_RAIL *rail, EP4_ECQ *ecq); -+extern EP4_ECQ *ep4_get_ecq (EP4_RAIL *rail, unsigned int which, unsigned int ndwords); -+extern void ep4_put_ecq (EP4_RAIL *rail, EP4_ECQ *ecq, unsigned int ndwords); -+ -+extern void ep4_nop_cmd (EP4_ECQ *ecq, E4_uint64 tag); -+extern void ep4_set_event_cmd (EP4_ECQ *ecq, E4_Addr event); -+extern void ep4_wait_event_cmd (EP4_ECQ *ecq, E4_Addr event, E4_uint64 candt, E4_uint64 param0, E4_uint64 param1); -+ -+extern void ep4_flush_interrupt (EP4_RAIL *rail, void *arg); -+extern void ep4_flush_ecqs (EP4_RAIL *rail); -+ -+extern void ep4_init_thread (EP4_RAIL *rail, E4_ThreadRegs *regs, sdramaddr_t stackTop, -+ EP_ADDR stackAddr, E4_Addr startpc, int nargs,...); -+ -+extern void ep4_initialise_dma_retries (EP4_RAIL *rail); -+extern void ep4_finalise_dma_retries (EP4_RAIL *rail); -+extern int ep4_reserve_dma_retries (EP4_RAIL *rail, unsigned int count, unsigned int attr); -+extern void ep4_release_dma_retries(EP4_RAIL *rail, unsigned int count); -+extern void ep4_queue_dma_retry (EP4_RAIL *rail, E4_DMA *dma, int interval); -+extern void ep4_queue_dma_stalled (EP4_RAIL *rail, E4_DMA *dma); -+extern void ep4_free_stalled_dmas (EP4_RAIL *rail, unsigned int nodeId); -+extern void ep4_display_rail (EP4_RAIL *rail); -+ -+extern void ep4_add_retry_ops (EP4_RAIL *rail, EP4_RETRY_OPS *ops); -+extern void ep4_remove_retry_ops (EP4_RAIL *rail, EP4_RETRY_OPS *ops); -+extern void ep4_retry_thread (EP4_RAIL *rail); -+ -+/* neterr_elan4.c */ -+extern void ep4_add_neterr_ops (EP4_RAIL *rail, EP4_NETERR_OPS *ops); -+extern void ep4_remove_neterr_ops (EP4_RAIL *rail, EP4_NETERR_OPS *ops); -+extern void ep4_neterr_fixup (EP_RAIL *r, unsigned int nodeId, EP_NETERR_COOKIE *cookies); -+ -+/* commands_elan4.c */ -+extern void elan4_nop_cmd (ELAN4_CQ *cq, E4_uint64 tag); -+extern void elan4_write_dword_cmd (ELAN4_CQ *cq, E4_Addr addr, E4_uint64 data); -+extern void elan4_add_dword_cmd (ELAN4_CQ *cq, E4_Addr addr, E4_uint64 data); -+extern void elan4_copy64_cmd (ELAN4_CQ *cq, E4_Addr from, E4_Addr to, E4_uint32 datatype); -+extern void elan4_interrupt_cmd (ELAN4_CQ *cq, E4_uint64 cookie); -+extern void elan4_run_thread_cmd (ELAN4_CQ *cq, E4_ThreadRegs *regs); -+extern void elan4_run_dma_cmd (ELAN4_CQ *cq, E4_DMA *dma); -+extern void elan4_set_event_cmd (ELAN4_CQ *cq, E4_Addr event); -+extern void elan4_set_eventn_cmd (ELAN4_CQ *cq, E4_Addr event, E4_uint32 count); -+extern void elan4_wait_event_cmd (ELAN4_CQ *cq, E4_Addr event, E4_uint64 candt, E4_uint64 param0, E4_uint64 param1); -+extern void elan4_open_packet (ELAN4_CQ *cq, E4_uint64 command); -+extern void elan4_guard (ELAN4_CQ *cq, E4_uint64 command); -+extern void elan4_sendtrans0 (ELAN4_CQ *cq, E4_uint16 trtype, E4_uint64 addr); -+extern void elan4_sendtrans1 (ELAN4_CQ *cq, E4_uint16 trtype, E4_uint64 addr, E4_uint64 p0); -+extern void elan4_sendtrans2 (ELAN4_CQ *cq, E4_uint16 trtype, E4_uint64 addr, E4_uint64 p0, E4_uint64 p1); -+extern void elan4_sendtransn (ELAN4_CQ *cq, E4_uint16 trtype, E4_uint64 addr, ...); -+extern void elan4_sendtransp (ELAN4_CQ *cq, E4_uint16 trtype, E4_uint64 addr, E4_uint64 *ptr); -+ -+extern void ep4_add_retry_ops (EP4_RAIL *rail, EP4_RETRY_OPS *ops); -+extern void ep4_remove_retry_ops (EP4_RAIL *rail, EP4_RETRY_OPS *ops); -+extern void ep4_retry_thread (EP4_RAIL *rail); -+ -+extern void ep4_fillout_stats(EP_RAIL *rail, char *str); -+ -+#endif /* ! defined(__elan4__) */ -+ -+#endif /* __EP_KCOMM_ELAN4_H */ -diff -urN clean/drivers/net/qsnet/ep/kcomm_vp.h linux-2.6.9/drivers/net/qsnet/ep/kcomm_vp.h ---- clean/drivers/net/qsnet/ep/kcomm_vp.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/kcomm_vp.h 2004-03-24 06:32:56.000000000 -0500 -@@ -0,0 +1,36 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __EP_KCOMM_VP_H -+#define __EP_KCOMM_VP_H -+ -+#ident "@(#)$Id: kcomm_vp.h,v 1.2 2004/03/24 11:32:56 david Exp $ $Name: QSNETMODULES-5-11-3_20050907 $" -+/* $Source: /cvs/master/quadrics/epmod/kcomm_vp.h,v $*/ -+ -+#define EP_MAX_NODES 2048 /* Max nodes we support */ -+ -+/* virtual process allocation */ -+#define EP_VP_NODE_BASE (0) -+#define EP_VP_DATA_BASE (EP_VP_NODE_BASE + EP_MAX_NODES) -+#define EP_VP_PROBE_BASE (EP_VP_DATA_BASE + EP_MAX_NODES) -+#define EP_VP_PROBE_COUNT ELAN_MAX_LEVELS -+ -+#define EP_VP_BCAST_BASE (EP_VP_PROBE_BASE + EP_VP_PROBE_COUNT) -+#define EP_VP_BCAST_COUNT (CM_SGMTS_PER_LEVEL * (CM_MAX_LEVELS - 1) + 1) -+ -+#define EP_VP_NODE(nodeId) (EP_VP_NODE_BASE + (nodeId)) -+#define EP_VP_DATA(nodeId) (EP_VP_DATA_BASE + (nodeId)) -+#define EP_VP_PROBE(lvl) (EP_VP_PROBE_BASE + (lvl)) -+#define EP_VP_BCAST(lvl,sgmt) (EP_VP_BCAST_BASE + ((lvl) - 1)*CM_SGMTS_PER_LEVEL + (sgmt)) -+ -+#define EP_VP_TO_NODE(vp) ((vp) & (EP_MAX_NODES-1)) -+#define EP_VP_ISDATA(vp) ((vp) >= EP_VP_DATA_BASE && (vp) < (EP_VP_DATA_BASE + EP_MAX_NODES)) -+ -+#endif /* __EP_KCOMM_VP_H */ -+ -+ -diff -urN clean/drivers/net/qsnet/ep/kmap.c linux-2.6.9/drivers/net/qsnet/ep/kmap.c ---- clean/drivers/net/qsnet/ep/kmap.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/kmap.c 2004-12-14 05:19:23.000000000 -0500 -@@ -0,0 +1,561 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: kmap.c,v 1.12 2004/12/14 10:19:23 mike Exp $" -+/* $Source: /cvs/master/quadrics/epmod/kmap.c,v $*/ -+ -+#include -+#include -+ -+#include -+ -+#include "debug.h" -+ -+#if defined(DIGITAL_UNIX) -+# define kernel_map (first_task->map) -+# define vaddr_to_phys(map, addr) (pmap_extract (vm_map_pmap ((vm_map_t) map), (unsigned long) addr)) -+#elif defined(LINUX) -+# define kernel_map get_kern_mm() -+# define vaddr_to_phys(map, addr) (kmem_to_phys(addr)) -+#elif defined(SOLARIS) -+# define kernel_map &kas -+# define vaddr_to_phys(map,addr) ptob(hat_getpfnum (((struct as *) map)->a_hat, (caddr_t) addr)) -+#endif -+ -+void -+ep_perrail_kaddr_map (EP_RAIL *rail, EP_ADDR eaddr, virtaddr_t kaddr, unsigned long len, unsigned int perm, int ep_attr) -+{ -+ rail->Operations.KaddrMap (rail, eaddr, kaddr, len, perm, ep_attr); -+} -+ -+void -+ep_perrail_sdram_map (EP_RAIL *rail, EP_ADDR eaddr, sdramaddr_t saddr, unsigned long len, unsigned int perm, int ep_attr) -+{ -+ rail->Operations.SdramMap (rail, eaddr, saddr, len, perm, ep_attr); -+} -+ -+void -+ep_perrail_unmap (EP_RAIL *rail, EP_ADDR eaddr, unsigned long len) -+{ -+ rail->Operations.Unmap (rail, eaddr, len); -+} -+ -+void -+ep_perrail_dvma_sync (EP_RAIL *rail) -+{ -+ if (rail->TlbFlushRequired) -+ { -+ rail->TlbFlushRequired = 0; -+ -+ rail->Operations.FlushTlb (rail); -+ } -+} -+ -+ -+static int ep_dvma_map_rails (EP_SYS *sys, EP_NMH *nmh, EP_NMD *nmd, EP_RAILMASK mask); -+ -+#if ! defined(CONFIG_EP_NO_CHECK_SUM) -+static uint16_t ep_dvma_calc_check_sum (EP_SYS *sys, EP_NMH *nmh, EP_NMD *nmd, uint16_t check_sum); -+#endif -+ -+EP_NMH_OPS ep_dvma_nmh_ops = -+{ -+ ep_dvma_map_rails, -+#if ! defined(CONFIG_EP_NO_CHECK_SUM) -+ ep_dvma_calc_check_sum -+#endif -+}; -+ -+extern void -+ep_dvma_init (EP_SYS *sys) -+{ -+ EP_DVMA_STATE *d = &sys->DvmaState; -+ -+ kmutex_init (&d->dvma_lock); -+ -+ INIT_LIST_HEAD (&d->dvma_handles); -+ INIT_LIST_HEAD (&d->dvma_rails); -+ -+ d->dvma_rmap = ep_rmallocmap (EP_DVMA_RMAP_SIZE, "dvma_rmap", 1); -+ -+ ep_rmfree (d->dvma_rmap, EP_DVMA_TOP - EP_DVMA_BASE, EP_DVMA_BASE); -+} -+ -+extern void -+ep_dvma_fini (EP_SYS *sys) -+{ -+ EP_DVMA_STATE *d = &sys->DvmaState; -+ -+ ep_rmfreemap (d->dvma_rmap); -+ -+ kmutex_destroy (&d->dvma_lock); -+} -+ -+extern int -+ep_dvma_add_rail (EP_SYS *sys, EP_RAIL *rail) -+{ -+ EP_DVMA_STATE *d = &sys->DvmaState; -+ EP_RAIL_ENTRY *l; -+ struct list_head *el; -+ -+ KMEM_ZALLOC (l, EP_RAIL_ENTRY *, sizeof (EP_RAIL_ENTRY), 1); -+ -+ if (l == NULL) -+ return (ENOMEM); -+ -+ kmutex_lock (&d->dvma_lock); -+ -+ l->Rail = rail; -+ -+ list_add_tail (&l->Link, &d->dvma_rails); -+ -+ list_for_each (el, &d->dvma_handles) { -+ EP_DVMA_NMH *desc = list_entry (el, EP_DVMA_NMH, dvma_link); -+ int npages = desc->dvma_nmh.nmh_nmd.nmd_len >> PAGESHIFT; -+ -+ desc->dvma_rails[rail->Number] = rail; -+ desc->dvma_railmask |= ( 1 << rail->Number); -+ -+ desc->dvma_private[rail->Number] = rail->Operations.DvmaReserve (rail, desc->dvma_nmh.nmh_nmd.nmd_addr, npages); -+ } -+ -+ kmutex_unlock (&d->dvma_lock); -+ return (0); -+} -+ -+extern void -+ep_dvma_remove_rail (EP_SYS *sys, EP_RAIL *rail) -+{ -+ EP_DVMA_STATE *d = &sys->DvmaState; -+ struct list_head *el; -+ -+ kmutex_lock (&d->dvma_lock); -+ -+ list_for_each (el, &d->dvma_handles) { -+ EP_DVMA_NMH *desc = list_entry (el, EP_DVMA_NMH, dvma_link); -+ int npages = desc->dvma_nmh.nmh_nmd.nmd_len >> PAGESHIFT; -+ -+ desc->dvma_rails[rail->Number] = NULL; -+ desc->dvma_railmask &= ~(1 << rail->Number); -+ -+ rail->Operations.DvmaRelease (rail, desc->dvma_nmh.nmh_nmd.nmd_addr, npages, desc->dvma_private[rail->Number]); -+ } -+ -+ list_for_each (el, &d->dvma_rails) { -+ EP_RAIL_ENTRY *tmp = list_entry (el, EP_RAIL_ENTRY, Link); -+ -+ if (tmp->Rail == rail) -+ { -+ list_del (el); -+ -+ KMEM_FREE (tmp, sizeof (EP_RAIL_ENTRY)); -+ break; -+ } -+ } -+ kmutex_unlock (&d->dvma_lock); -+} -+ -+EP_NMH * -+ep_dvma_reserve (EP_SYS *sys, unsigned npages, unsigned perm) -+{ -+ EP_DVMA_STATE *d = &sys->DvmaState; -+ EP_DVMA_NMH *desc; -+ EP_ADDR addr; -+ struct list_head *el; -+ int i; -+ -+ KMEM_ZALLOC (desc, EP_DVMA_NMH *, offsetof (EP_DVMA_NMH, dvma_attrs[npages]), 1); -+ -+ if (desc == NULL) -+ return (NULL); -+ -+ if ((addr = ep_rmalloc (d->dvma_rmap, npages << PAGESHIFT, 0)) == 0) -+ { -+ -+ KMEM_FREE (desc, sizeof (EP_DVMA_NMH)); -+ return (NULL); -+ } -+ -+ spin_lock_init (&desc->dvma_lock); -+ -+ desc->dvma_perm = perm; -+ -+ kmutex_lock (&d->dvma_lock); -+ /* reserve the mapping resource */ -+ list_for_each (el, &d->dvma_rails) { -+ EP_RAIL *rail = list_entry (el, EP_RAIL_ENTRY, Link)->Rail; -+ -+ EPRINTF4 (DBG_KMAP, "%s: ep_dvma_reserve desc=%p npages=%d rail=%p\n", rail->Name, desc, npages, rail); -+ -+ if ((desc->dvma_private[rail->Number] = rail->Operations.DvmaReserve (rail, addr, npages)) == NULL) -+ { -+ printk ("%s: !!ep_dvma_reserve - rail->DvmaReserve failed\n", rail->Name); -+ goto failed; -+ } -+ -+ desc->dvma_rails[rail->Number] = rail; -+ desc->dvma_railmask |= (1 << rail->Number); -+ } -+ -+ /* insert into the network mapping handle table */ -+ desc->dvma_nmh.nmh_nmd.nmd_addr = addr; -+ desc->dvma_nmh.nmh_nmd.nmd_len = npages << PAGESHIFT; -+ desc->dvma_nmh.nmh_nmd.nmd_attr = EP_NMD_ATTR (sys->Position.pos_nodeid, 0); -+ desc->dvma_nmh.nmh_ops = &ep_dvma_nmh_ops; -+ -+ ep_nmh_insert (&sys->MappingTable, &desc->dvma_nmh); -+ -+ list_add (&desc->dvma_link, &d->dvma_handles); -+ -+ kmutex_unlock (&d->dvma_lock); -+ -+ return (&desc->dvma_nmh); -+ -+ failed: -+ -+ kmutex_unlock (&d->dvma_lock); -+ -+ for (i = 0; i < EP_MAX_RAILS; i++) -+ if (desc->dvma_rails[i] != NULL) -+ desc->dvma_rails[i]->Operations.DvmaRelease (desc->dvma_rails[i], addr, npages, desc->dvma_private[i]); -+ -+ ep_rmfree (d->dvma_rmap, npages << PAGESHIFT, addr); -+ -+ KMEM_FREE (desc, sizeof (EP_DVMA_NMH)); -+ return (NULL); -+} -+ -+void -+ep_dvma_release (EP_SYS *sys, EP_NMH *nmh) -+{ -+ EP_DVMA_STATE *d = &sys->DvmaState; -+ EP_DVMA_NMH *desc = (EP_DVMA_NMH *) nmh; -+ EP_ADDR addr = nmh->nmh_nmd.nmd_addr; -+ int npages = nmh->nmh_nmd.nmd_len >> PAGESHIFT; -+ EP_RAIL *rail; -+ int i; -+ -+ kmutex_lock (&d->dvma_lock); -+ -+ list_del (&desc->dvma_link); -+ -+ ep_nmh_remove (&sys->MappingTable, nmh); -+ -+ for (i = 0; i < EP_MAX_RAILS; i++) -+ if ((rail = desc->dvma_rails[i]) != NULL) -+ rail->Operations.DvmaRelease (rail, addr, npages, desc->dvma_private[i]); -+ -+ ep_rmfree (d->dvma_rmap, npages << PAGESHIFT, addr); -+ -+ KMEM_FREE (desc, offsetof (EP_DVMA_NMH, dvma_attrs[npages])); -+ -+ kmutex_unlock (&d->dvma_lock); -+} -+ -+void -+ep_dvma_load (EP_SYS *sys, void *map, caddr_t vaddr, unsigned len, EP_NMH *nmh, unsigned index, EP_RAILMASK *hints, EP_NMD *subset) -+{ -+ EP_DVMA_NMH *desc = (EP_DVMA_NMH *) nmh; -+ unsigned offset = (unsigned long) vaddr & PAGEOFFSET; -+ unsigned npages = btopr (len + offset); -+ EP_ADDR addr = nmh->nmh_nmd.nmd_addr + (index << PAGESHIFT); -+ int rmask = *hints; -+ EP_RAIL *rail; -+ register int i, rnum; -+ unsigned long flags; -+ -+ EPRINTF7 (DBG_KMAP, "ep_dvma_load: map=%p vaddr=%p len=%x nmh=%p(%x,%x) index=%d\n", -+ map, vaddr, len, nmh, nmh->nmh_nmd.nmd_addr, nmh->nmh_nmd.nmd_len, index); -+ -+ /* If no rail specified, then map into all rails */ -+ if (rmask == 0) -+ rmask = desc->dvma_railmask; -+ -+ ASSERT ((index + npages) <= (nmh->nmh_nmd.nmd_len >> PAGESHIFT)); -+ -+ /* If not map specified then use the kernel map */ -+ if (map == NULL) -+ map = kernel_map; -+ -+ spin_lock_irqsave (&desc->dvma_lock, flags); -+ /* Now map each of the specified pages (backwards) */ -+ -+ vaddr = (vaddr - offset) + (npages-1)*PAGESIZE; -+ for (i = npages-1; i >= 0; i--, vaddr -= PAGESIZE) -+ { -+ physaddr_t paddr = vaddr_to_phys (map, vaddr); -+ -+ for (rnum = 0; rnum < EP_MAX_RAILS; rnum++) -+ { -+ if (! (rmask & (1 << rnum)) || (rail = desc->dvma_rails[rnum]) == NULL) -+ rmask &= ~(1 << rnum); -+ else -+ { -+ rail->Operations.DvmaSetPte (rail, desc->dvma_private[rnum], index + i, paddr, desc->dvma_perm); -+ -+ desc->dvma_attrs[index + i] |= (1 << rnum); -+ } -+ } -+ } -+ -+ for (rnum = 0; rnum < EP_MAX_RAILS; rnum++) -+ if ((rmask & (1 << rnum)) && (rail = desc->dvma_rails[rnum]) != NULL) -+ rail->TlbFlushRequired = 1; -+ -+ spin_unlock_irqrestore (&desc->dvma_lock, flags); -+ -+ /* Construct the network mapping handle to be returned. */ -+ subset->nmd_addr = addr + offset; -+ subset->nmd_len = len; -+ subset->nmd_attr = EP_NMD_ATTR(sys->Position.pos_nodeid, rmask); -+} -+ -+void -+ep_dvma_unload (EP_SYS *sys, EP_NMH *nmh, EP_NMD *nmd) -+{ -+ EP_DVMA_NMH *desc = (EP_DVMA_NMH *) nmh; -+ unsigned offset = nmd->nmd_addr & PAGEOFFSET; -+ unsigned npages = btopr (nmd->nmd_len + offset); -+ unsigned index = (nmd->nmd_addr - nmh->nmh_nmd.nmd_addr) >> PAGESHIFT; -+ EP_RAIL *rail; -+ int rnum; -+ int rmask; -+ register int i; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&desc->dvma_lock, flags); -+ -+ /* compute which rails we need to unload on */ -+ for (rmask = 0, i = 0; i < npages; i++) -+ { -+ rmask |= desc->dvma_attrs[index + i]; -+ -+ desc->dvma_attrs[index + i] = 0; -+ } -+ -+ for (rnum = 0; rnum < EP_MAX_RAILS; rnum++) -+ if ((rmask & (1 << rnum)) && (rail = desc->dvma_rails[rnum]) != NULL) -+ rail->Operations.DvmaUnload (rail, desc->dvma_private[rnum], index, npages); -+ -+ spin_unlock_irqrestore (&desc->dvma_lock, flags); -+} -+ -+int -+ep_dvma_map_rails (EP_SYS *sys, EP_NMH *nmh, EP_NMD *nmd, EP_RAILMASK mask) -+{ -+ EP_DVMA_NMH *desc = (EP_DVMA_NMH *) nmh; -+ unsigned offset = nmd->nmd_addr & PAGEOFFSET; -+ unsigned npages = btopr (nmd->nmd_len + offset); -+ unsigned index = (nmd->nmd_addr - nmh->nmh_nmd.nmd_addr) >> PAGESHIFT; -+ int r, rnum; -+ register int i; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&desc->dvma_lock, flags); -+ -+ EPRINTF4 (DBG_KMAP, "ep_dvma_map_rails: nmd=%08x.%08x.%08x mask=%04x\n", nmd->nmd_addr, nmd->nmd_len, nmd->nmd_attr, mask); -+ -+ if ((mask &= desc->dvma_railmask) == 0) -+ { -+ printk ("ep_dvma_map_rails: no intersecting rails %04x.%04x\n", mask, desc->dvma_railmask); -+ spin_unlock_irqrestore (&desc->dvma_lock, flags); -+ return (-1); -+ } -+ -+ for (i = npages-1; i >= 0; i--) -+ { -+ int pgidx = (index + i); -+ -+ for (rnum = 0; rnum < EP_MAX_RAILS; rnum++) -+ if (desc->dvma_attrs[pgidx] & (1 << rnum)) -+ break; -+ -+ if (rnum == EP_MAX_RAILS) -+ { -+ EPRINTF3 (DBG_KMAP, "ep_dvma_map_rails: nmh=%p idx=%x [%08x] not ptes valid\n", nmh, pgidx, -+ nmh->nmh_nmd.nmd_addr + ((pgidx) << PAGESHIFT)); -+ mask = 0; -+ } -+ else -+ { -+ EP_RAIL *rail = desc->dvma_rails[rnum]; -+ physaddr_t paddr = rail->Operations.DvmaReadPte (rail, desc->dvma_private[rnum], pgidx); -+ -+ EPRINTF5 (DBG_KMAP, "%s: ep_dvma_map_rails: nmh=%p idx=%x [%08x] paddr %llx\n", rail->Name, nmh, pgidx, -+ nmh->nmh_nmd.nmd_addr + (pgidx << PAGESHIFT), (long long) paddr); -+ -+ for (r = 0; r < EP_MAX_RAILS; r++) -+ { -+ if ((mask & (1 << r)) == 0) -+ continue; -+ -+ if ((desc->dvma_attrs[pgidx] & (1 << r)) == 0) -+ { -+ EPRINTF5 (DBG_KMAP, "%s: ep_dvma_map_rails: nmh=%p idx=%x [%08x] paddr=%llx\n", -+ desc->dvma_rails[rnum]->Name, nmh, pgidx, nmh->nmh_nmd.nmd_addr + (pgidx << PAGESHIFT), -+ (long long) paddr); -+ -+ rail->Operations.DvmaSetPte (rail, desc->dvma_private[rnum], pgidx, paddr, desc->dvma_perm); -+ -+ desc->dvma_attrs[pgidx] |= (1 << r); -+ } -+ } -+ } -+ } -+ -+ for (rnum = 0; rnum < EP_MAX_RAILS; rnum++) -+ if ((mask & (1 << rnum)) != 0) -+ desc->dvma_rails[rnum]->TlbFlushRequired = 1; -+ -+ EPRINTF4 (DBG_KMAP, "ep_dvma_map_rails: nmd=%08x.%08x.%08x|%04x\n", nmd->nmd_addr, nmd->nmd_len, nmd->nmd_attr, mask); -+ -+ /* Finally update the network memory descriptor */ -+ nmd->nmd_attr |= mask; -+ -+ spin_unlock_irqrestore (&desc->dvma_lock, flags); -+ -+ return (0); -+} -+#if ! defined(CONFIG_EP_NO_CHECK_SUM) -+#include -+ -+/* Generic rolling checksum algorithm */ -+uint16_t -+rolling_check_sum (char *msg, int nob, uint16_t sum) -+{ -+ while (nob-- > 0) -+ sum = sum * 13 + *msg++; -+ -+ return (sum); -+} -+ -+#if ! defined(NO_RMAP) -+void -+unmap_phys_address(unsigned long phys_addr) -+{ -+ unsigned long pfn = (phys_addr >> PAGE_SHIFT); -+ -+ if (pfn_valid(pfn)) -+ kunmap(pfn_to_page(pfn)); -+} -+ -+void * -+map_phys_address(unsigned long phys_addr) -+{ -+ unsigned long pfn = (phys_addr >> PAGE_SHIFT); -+ -+ if (pfn_valid(pfn)) -+ return kmap(pfn_to_page(pfn)); -+ -+ return NULL; -+} -+#else -+void -+unmap_phys_address(unsigned long phys_addr) -+{ -+ struct page *p = virt_to_page(__va(phys_addr)); -+ -+ if (VALID_PAGE(p)) -+ kunmap(p); -+} -+ -+void * -+map_phys_address(unsigned long phys_addr) -+{ -+ struct page *p = virt_to_page(__va(phys_addr)); -+ -+ if (VALID_PAGE(p)) -+ return kmap(p); -+ -+ return NULL; -+} -+#endif -+ -+uint16_t -+ep_dvma_calc_check_sum (EP_SYS *sys, EP_NMH *nmh, EP_NMD *nmd, uint16_t check_sum) -+{ -+ /* cant be called from an interupt */ -+ -+ EP_DVMA_NMH *desc = (EP_DVMA_NMH *) nmh; -+ unsigned offset = nmd->nmd_addr & PAGEOFFSET; -+ unsigned npages = btopr (nmd->nmd_len + offset); -+ unsigned index = (nmd->nmd_addr - nmh->nmh_nmd.nmd_addr) >> PAGESHIFT; -+ unsigned start, len; -+ int rnum; -+ register int i; -+ unsigned long flags; -+ EP_RAIL *rail; -+ -+ -+ spin_lock_irqsave (&desc->dvma_lock, flags); -+ -+ EPRINTF3 (DBG_KMAP, "ep_dvma_calc_check_sum: nmd=%08x.%08x.%08x \n", nmd->nmd_addr, nmd->nmd_len, nmd->nmd_attr); -+ -+ /* find a rail */ -+ for (rnum = 0; rnum < EP_MAX_RAILS; rnum++) -+ if (desc->dvma_attrs[index] & (1 << rnum)) -+ break; -+ -+ ASSERT (rnum != EP_MAX_RAILS); -+ -+ rail = desc->dvma_rails[rnum]; -+ -+ for (i = 0; i <= (npages-1); i++) -+ { -+ int pgidx = (index + i); -+ physaddr_t paddr = rail->Operations.DvmaReadPte (rail, desc->dvma_private[rnum], pgidx); -+ void * virt; -+ -+ spin_unlock_irqrestore (&desc->dvma_lock, flags); /* unlock for check sum calc */ -+ -+ virt = map_phys_address(paddr); -+ -+ if (!virt) -+ printk("ep_dvma_calc_check_sum: virt = NULL ! \n"); -+ else { -+ if ( i == 0 ) { -+ /* last bit of the first page */ -+ start = (nmd->nmd_addr & (PAGESIZE - 1)) ; -+ len = PAGESIZE - start; -+ if ( len > nmd->nmd_len) /* less than the remaining page */ -+ len = nmd->nmd_len; -+ } else { -+ if ( i != (npages-1)) { -+ /* all of the middle pages */ -+ start = 0; -+ len = PAGESIZE; -+ } else { -+ /* first bit of the last page */ -+ start = 0; -+ len = ((nmd->nmd_addr + nmd->nmd_len -1) & (PAGESIZE -1)) +1; -+ } -+ } -+ -+ check_sum = rolling_check_sum (((char *)virt)+start, len, check_sum); -+ unmap_phys_address(paddr); -+ -+ /* re aquire the lock */ -+ spin_lock_irqsave (&desc->dvma_lock, flags); -+ } -+ -+ EPRINTF5 (DBG_KMAP, "%s: ep_dvma_calc_check_sum: nmh=%p idx=%x [%08x] paddr %llx\n", rail->Name, nmh, pgidx, -+ nmh->nmh_nmd.nmd_addr + (pgidx << PAGESHIFT), (long long) paddr); -+ } -+ -+ EPRINTF4 (DBG_KMAP, "ep_dvma_calc_check_sum: nmd=%08x.%08x.%08x = %d\n", nmd->nmd_addr, nmd->nmd_len, nmd->nmd_attr, check_sum); -+ -+ spin_unlock_irqrestore (&desc->dvma_lock, flags); -+ -+ return (check_sum); -+} -+#endif -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/ep/kmap_elan3.c linux-2.6.9/drivers/net/qsnet/ep/kmap_elan3.c ---- clean/drivers/net/qsnet/ep/kmap_elan3.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/kmap_elan3.c 2004-12-14 05:19:23.000000000 -0500 -@@ -0,0 +1,209 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: kmap_elan3.c,v 1.4 2004/12/14 10:19:23 mike Exp $" -+/* $Source: /cvs/master/quadrics/epmod/kmap_elan3.c,v $ */ -+ -+#include -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+ -+#include "kcomm_elan3.h" -+ -+#if defined(DIGITAL_UNIX) -+# define kernel_map (first_task->map) -+# define vaddr_to_phys(map, addr) (pmap_extract (vm_map_pmap ((vm_map_t) map), (unsigned long) addr)) -+#elif defined(LINUX) -+# define kernel_map get_kern_mm() -+# define vaddr_to_phys(map, addr) (kmem_to_phys(addr)) -+#elif defined(SOLARIS) -+# define kernel_map &kas -+# define vaddr_to_phys(map,addr) ptob(hat_getpfnum (((struct as *) map)->a_hat, (caddr_t) addr)) -+#endif -+ -+#define ELAN3_PTES_PER_PAGE (PAGESIZE/ELAN3_PAGE_SIZE) -+ -+#if defined(__LITTLE_ENDIAN__) -+#define PERM_ENDIAN 0 -+#else -+#define PERM_ENDIAN ELAN3_PTE_BIG_ENDIAN -+#endif -+ -+static unsigned int main_permtable[] = -+{ -+ ELAN3_PERM_REMOTEALL, /* EP_PERM_EXECUTE */ -+ ELAN3_PERM_REMOTEREAD, /* EP_PERM_READ */ -+ ELAN3_PERM_REMOTEWRITE, /* EP_PERM_WRITE */ -+ ELAN3_PERM_REMOTEWRITE, /* EP_PERM_ALL */ -+}; -+ -+static unsigned int sdram_permtable[] = -+{ -+ ELAN3_PERM_REMOTEREAD, /* EP_PERM_EXECUTE */ -+ ELAN3_PERM_REMOTEREAD, /* EP_PERM_READ */ -+ ELAN3_PERM_REMOTEWRITE, /* EP_PERM_WRITE */ -+ ELAN3_PERM_REMOTEALL, /* EP_PERM_ALL */ -+}; -+ -+static unsigned int io_permtable[] = -+{ -+ ELAN3_PERM_LOCAL_READ, /* EP_PERM_EXECUTE */ -+ ELAN3_PERM_REMOTEREAD, /* EP_PERM_READ */ -+ ELAN3_PERM_REMOTEWRITE, /* EP_PERM_WRITE */ -+ ELAN3_PERM_REMOTEWRITE, /* EP_PERM_ALL */ -+}; -+ -+void -+ep3_kaddr_map (EP_RAIL *r, EP_ADDR eaddr, virtaddr_t kaddr, unsigned len, unsigned int perm, int ep_attr) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) r; -+ unsigned npages = len >> PAGESHIFT; -+ int i; -+ unsigned int off; -+ -+ ASSERT ((eaddr & PAGEOFFSET) == 0 && (kaddr & PAGEOFFSET) == 0 && (len & PAGEOFFSET) == 0); -+ -+ for (i = 0; i < npages; i++) -+ { -+ physaddr_t paddr = vaddr_to_phys (kernel_map, (void *) kaddr); -+ -+ for (off = 0; off < PAGESIZE; off += ELAN3_PAGE_SIZE) -+ elan3mmu_pteload (rail->Elan3mmu, PTBL_LEVEL_3, eaddr + off, paddr + off, -+ main_permtable[perm], PTE_LOAD_LOCK | PTE_LOAD_NOSYNC | ((ep_attr & EP_NO_SLEEP) ? PTE_NO_SLEEP : 0)); -+ -+ eaddr += PAGESIZE; -+ kaddr += PAGESIZE; -+ } -+} -+ -+void -+ep3_sdram_map (EP_RAIL *r, EP_ADDR eaddr, sdramaddr_t saddr, unsigned len, unsigned int perm, int ep_attr) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) r; -+ unsigned npages = len >> PAGESHIFT; -+ int i; -+ unsigned int off; -+ -+ ASSERT ((eaddr & PAGEOFFSET) == 0 && (saddr & PAGEOFFSET) == 0 && (len & PAGEOFFSET) == 0); -+ -+ for (i = 0; i < npages; i++) -+ { -+ physaddr_t paddr = elan3_sdram_to_phys (rail->Device, saddr); -+ -+ for (off = 0; off < PAGESIZE; off += ELAN3_PAGE_SIZE) -+ elan3mmu_pteload (rail->Elan3mmu, PTBL_LEVEL_3, eaddr+off, paddr+off, -+ sdram_permtable[perm], PTE_LOAD_LOCK | PTE_LOAD_NOSYNC | ((ep_attr & EP_NO_SLEEP) ? PTE_NO_SLEEP : 0) ); -+ -+ eaddr += PAGESIZE; -+ saddr += PAGESIZE; -+ } -+} -+ -+void -+ep3_ioaddr_map (EP_RAIL *r, EP_ADDR eaddr, ioaddr_t ioaddr, unsigned len, unsigned int perm) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) r; -+ unsigned npages = len >> PAGESHIFT; -+ int i; -+ unsigned int off; -+ -+ ASSERT ((eaddr & PAGEOFFSET) == 0 && (ioaddr & PAGEOFFSET) == 0 && (len & PAGEOFFSET) == 0); -+ -+ for (i = 0; i < npages; i++) -+ { -+ physaddr_t paddr = vaddr_to_phys (kernel_map, (void *) ioaddr); -+ -+ for (off = 0; off < PAGESIZE; off += ELAN3_PAGE_SIZE) -+ elan3mmu_pteload (rail->Elan3mmu, PTBL_LEVEL_3, eaddr + off, paddr + off, -+ io_permtable[perm], PTE_LOAD_LOCK | PTE_LOAD_NOSYNC); -+ -+ eaddr += PAGESIZE; -+ ioaddr += PAGESIZE; -+ } -+} -+void -+ep3_unmap (EP_RAIL *r, EP_ADDR eaddr, unsigned len) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) r; -+ -+ ASSERT ((eaddr & PAGEOFFSET) == 0 && (len & PAGEOFFSET) == 0); -+ -+ elan3mmu_unload (rail->Elan3mmu, eaddr, len, PTE_UNLOAD_UNLOCK | PTE_UNLOAD_NOSYNC); -+} -+ -+void * -+ep3_dvma_reserve (EP_RAIL *r, EP_ADDR eaddr, unsigned npages) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) r; -+ void *private; -+ -+ KMEM_ALLOC (private, void *, npages * ELAN3_PTES_PER_PAGE * sizeof (sdramaddr_t), 1); -+ -+ if (private == NULL) -+ return NULL; -+ -+ elan3mmu_reserve (rail->Elan3mmu, eaddr, npages * ELAN3_PTES_PER_PAGE, (sdramaddr_t *) private); -+ -+ return private; -+} -+ -+void -+ep3_dvma_release (EP_RAIL *r, EP_ADDR eaddr, unsigned npages, void *private) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) r; -+ -+ elan3mmu_release (rail->Elan3mmu, eaddr, npages * ELAN3_PTES_PER_PAGE, (sdramaddr_t *) private); -+ -+ KMEM_FREE (private, npages * ELAN3_PTES_PER_PAGE * sizeof (sdramaddr_t)); -+} -+ -+void -+ep3_dvma_set_pte (EP_RAIL *r, void *private, unsigned index, physaddr_t paddr, unsigned int perm) -+{ -+ ELAN3_DEV *dev = ((EP3_RAIL *) r)->Device; -+ sdramaddr_t *ptep = &((sdramaddr_t *) private)[index * ELAN3_PTES_PER_PAGE]; -+ int off; -+ -+ for (off =0 ; off < PAGESIZE; off += ELAN3_PAGE_SIZE) -+ { -+ ELAN3_PTE newpte = elan3mmu_phys_to_pte (dev, paddr + off, main_permtable[perm]) | ELAN3_PTE_REF | ELAN3_PTE_MOD; -+ -+ elan3_writepte (dev, *ptep, newpte); -+ -+ ptep++; -+ } -+} -+ -+physaddr_t -+ep3_dvma_read_pte (EP_RAIL *r, void *private, unsigned index) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) r; -+ sdramaddr_t *ptep = &((sdramaddr_t *) private)[index * ELAN3_PTES_PER_PAGE]; -+ ELAN3_PTE pte = elan3_readpte (rail->Device, *ptep); -+ -+ return pte & ELAN3_PTE_PFN_MASK; -+} -+ -+void -+ep3_dvma_unload (EP_RAIL *r, void *private, unsigned index, unsigned npages) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) r; -+ sdramaddr_t *ptep = &((sdramaddr_t *) private)[index * ELAN3_PTES_PER_PAGE]; -+ ELAN3_PTE tpte = elan3mmu_kernel_invalid_pte (rail->Elan3mmu); -+ int i; -+ -+ for (i = (npages * ELAN3_PTES_PER_PAGE) - 1; i >= 0; i--) -+ elan3_writepte (rail->Device, ptep[i], tpte); -+} -diff -urN clean/drivers/net/qsnet/ep/kmap_elan4.c linux-2.6.9/drivers/net/qsnet/ep/kmap_elan4.c ---- clean/drivers/net/qsnet/ep/kmap_elan4.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/kmap_elan4.c 2005-07-20 07:35:37.000000000 -0400 -@@ -0,0 +1,224 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: kmap_elan4.c,v 1.12.2.1 2005/07/20 11:35:37 mike Exp $" -+/* $Source: /cvs/master/quadrics/epmod/kmap_elan4.c,v $ */ -+ -+#include -+ -+#include -+ -+#include "debug.h" -+#include "kcomm_elan4.h" -+ -+#if defined(DIGITAL_UNIX) -+# define kernel_map (first_task->map) -+# define vaddr_to_phys(map, addr) (pmap_extract (vm_map_pmap ((vm_map_t) map), (unsigned long) addr)) -+#elif defined(LINUX) -+# define kernel_map get_kern_mm() -+# define vaddr_to_phys(map, addr) (kmem_to_phys(addr)) -+#elif defined(SOLARIS) -+# define kernel_map &kas -+# define vaddr_to_phys(map,addr) ptob(hat_getpfnum (((struct as *) map)->a_hat, (caddr_t) addr)) -+#endif -+ -+static unsigned int main_permtable[] = -+{ -+ PERM_Unused, /* EP_PERM_EXECUTE */ -+ PERM_RemoteReadOnly, /* EP_PERM_READ */ -+ PERM_DataReadWrite, /* EP_PERM_WRITE */ -+ PERM_DataReadWrite, /* EP_PERM_ALL */ -+}; -+ -+static unsigned int sdram_permtable[] = -+{ -+ PERM_LocExecute, /* EP_PERM_EXECUTE */ -+ PERM_RemoteReadOnly, /* EP_PERM_READ */ -+ PERM_DataReadWrite, /* EP_PERM_WRITE */ -+ PERM_RemoteAll, /* EP_PERM_ALL */ -+}; -+ -+static unsigned int cport_permtable[] = -+{ -+ PERM_Unused, /* EP_PERM_EXECUTE */ -+ PERM_RemoteReadOnly, /* EP_PERM_READ */ -+ PERM_DataReadWrite, /* EP_PERM_WRITE */ -+ PERM_Unused, /* EP_PERM_ALL */ -+}; -+ -+void -+ep4_kaddr_map (EP_RAIL *r, EP_ADDR eaddr, virtaddr_t kaddr, unsigned int len, unsigned int perm, int ep_attr) -+{ -+ EP4_RAIL *rail = (EP4_RAIL *) r; -+ ELAN4_DEV *dev = rail->r_ctxt.ctxt_dev; -+ unsigned int npages = len >> PAGESHIFT; -+ int i; -+ unsigned int off; -+ -+ ASSERT ((eaddr & PAGEOFFSET) == 0 && (kaddr & PAGEOFFSET) == 0 && (len & PAGEOFFSET) == 0); -+ -+ for (i = 0; i < npages; i++) -+ { -+ physaddr_t paddr = vaddr_to_phys (kernel_map, (void *) kaddr); -+ -+ for (off = 0; off < PAGESIZE; off += (1 << dev->dev_pageshift[0])) -+ { -+ E4_uint64 newpte = elan4mmu_phys2pte (dev, paddr + off, main_permtable[perm]); -+ -+ elan4mmu_pteload (&rail->r_ctxt, 0, eaddr + off, HE_TYPE_OTHER, newpte); -+ } -+ -+ eaddr += PAGESIZE; -+ kaddr += PAGESIZE; -+ } -+} -+ -+void -+ep4_sdram_map (EP_RAIL *r, EP_ADDR eaddr, sdramaddr_t saddr, unsigned int len, unsigned int perm, int ep_attr) -+{ -+ EP4_RAIL *rail = (EP4_RAIL *) r; -+ ELAN4_DEV *dev = rail->r_ctxt.ctxt_dev; -+ unsigned int npages = len >> PAGESHIFT; -+ int i; -+ unsigned int off; -+ -+ ASSERT ((eaddr & PAGEOFFSET) == 0 && (saddr & PAGEOFFSET) == 0 && (len & PAGEOFFSET) == 0); -+ -+ if ((eaddr & (SDRAM_PGOFF_OFFSET << PAGE_SHIFT)) != (saddr & (SDRAM_PGOFF_OFFSET << PAGE_SHIFT))) -+ printk ("ep4_sdram_map: eaddr=%x saddr=%lx - incorrectly alised\n", eaddr, saddr); -+ -+ for (i = 0; i < npages; i++) -+ { -+ for (off = 0; off < PAGESIZE; off += (1 << dev->dev_pageshift[0])) -+ { -+ E4_uint64 newpte = ((saddr + off) >> PTE_PADDR_SHIFT) | PTE_SetPerm (sdram_permtable[perm]); -+ -+ elan4mmu_pteload (&rail->r_ctxt, 0, eaddr + off, HE_TYPE_OTHER, newpte); -+ } -+ -+ eaddr += PAGESIZE; -+ saddr += PAGESIZE; -+ } -+} -+ -+void -+ep4_cport_map (EP_RAIL *r, EP_ADDR eaddr, unsigned long cqaddr, unsigned int len, unsigned int perm) -+{ -+ EP4_RAIL *rail = (EP4_RAIL *) r; -+ ELAN4_DEV *dev = rail->r_ctxt.ctxt_dev; -+ unsigned int npages = len >> PAGESHIFT; -+ int i; -+ unsigned int off; -+ -+ ASSERT ((eaddr & PAGEOFFSET) == 0 && (cqaddr & PAGEOFFSET) == 0 && (len & PAGEOFFSET) == 0); -+ -+ for (i = 0; i < npages; i++) -+ { -+ for (off = 0; off < PAGESIZE; off += (1 << dev->dev_pageshift[0])) -+ { -+ E4_uint64 newpte = ((cqaddr + off) >> PTE_PADDR_SHIFT) | PTE_SetPerm(cport_permtable[perm]) | PTE_CommandQueue; -+ -+ elan4mmu_pteload (&rail->r_ctxt, 0, eaddr + off, HE_TYPE_OTHER, newpte); -+ } -+ -+ eaddr += PAGESIZE; -+ cqaddr += PAGESIZE; -+ } -+} -+void -+ep4_unmap (EP_RAIL *r, EP_ADDR eaddr, unsigned int len) -+{ -+ EP4_RAIL *rail = (EP4_RAIL *) r; -+ -+ ASSERT ((eaddr & PAGEOFFSET) == 0 && (len & PAGEOFFSET) == 0); -+ -+ elan4mmu_unload_range (&rail->r_ctxt, 0, eaddr, len); -+} -+ -+void * -+ep4_dvma_reserve (EP_RAIL *r, EP_ADDR eaddr, unsigned int npages) -+{ -+ EP4_RAIL *rail = (EP4_RAIL *) r; -+ ELAN4_DEV *dev = rail->r_ctxt.ctxt_dev; -+ -+ EPRINTF3 (DBG_KMAP, "ep4_dvma_reserve: eaddr=%x npages=%d (=> %d)\n", eaddr, npages, (npages << (PAGE_SHIFT - dev->dev_pageshift[0]))); -+ -+ return elan4mmu_reserve (&rail->r_ctxt, 0, (E4_Addr) eaddr, (npages << (PAGE_SHIFT - dev->dev_pageshift[0])), 1); -+} -+ -+void -+ep4_dvma_release (EP_RAIL *r, EP_ADDR eaddr, unsigned int npages, void *private) -+{ -+ EP4_RAIL *rail = (EP4_RAIL *) r; -+ -+ EPRINTF3 (DBG_KMAP, "ep4_dvma_release: eaddr=%x npages=%d private=%p\n", eaddr, npages, private); -+ -+ elan4mmu_release (&rail->r_ctxt, (ELAN4_HASH_CACHE *) private); -+} -+ -+void -+ep4_dvma_set_pte (EP_RAIL *r, void *private, unsigned int index, physaddr_t paddr, unsigned int perm) -+{ -+ EP4_RAIL *rail = (EP4_RAIL *) r; -+ ELAN4_DEV *dev = rail->r_ctxt.ctxt_dev; -+ unsigned int off; -+ unsigned long flags; -+ -+ EPRINTF3 (DBG_KMAP, "ep4_dvma_set_pte: index %x -> eaddr %llx paddr %llx\n", -+ index, (long long)(((ELAN4_HASH_CACHE *) private)->hc_start + (index * PAGE_SIZE)), (long long) paddr); -+ -+ local_irq_save (flags); -+ for (off = 0; off < PAGESIZE; off += (1 << dev->dev_pageshift[0])) -+ { -+ E4_uint64 newpte = elan4mmu_phys2pte (dev, paddr + off, main_permtable[perm]); -+ -+ elan4mmu_set_pte (&rail->r_ctxt, (ELAN4_HASH_CACHE *) private, (index << (PAGE_SHIFT - dev->dev_pageshift[0])) + -+ (off >> dev->dev_pageshift[0]), newpte); -+ } -+ local_irq_restore (flags); -+} -+ -+physaddr_t -+ep4_dvma_read_pte (EP_RAIL *r, void *private, unsigned int index) -+{ -+ EP4_RAIL *rail = (EP4_RAIL *) r; -+ ELAN4_DEV *dev = rail->r_ctxt.ctxt_dev; -+ E4_uint64 pte; -+ unsigned long flags; -+ -+ local_irq_save (flags); -+ pte = elan4mmu_get_pte (&rail->r_ctxt, (ELAN4_HASH_CACHE *) private, index << (PAGE_SHIFT - dev->dev_pageshift[0])); -+ local_irq_restore (flags); -+ -+ return elan4mmu_pte2phys (dev, pte); -+} -+ -+void -+ep4_dvma_unload (EP_RAIL *r, void *private, unsigned int index, unsigned int npages) -+{ -+ EP4_RAIL *rail = (EP4_RAIL *) r; -+ ELAN4_DEV *dev = rail->r_ctxt.ctxt_dev; -+ EP_ADDR eaddr = ((ELAN4_HASH_CACHE *) private)->hc_start + (index * PAGE_SIZE); -+ unsigned long idx = (index << (PAGE_SHIFT - dev->dev_pageshift[0])); -+ unsigned long lim = idx + (npages << (PAGE_SHIFT - dev->dev_pageshift[0])); -+ unsigned long flags; -+ -+ EPRINTF5 (DBG_KMAP, "ep4_dvma_unload: eaddr %x -> %lx : index=%d idx=%ld lim=%ld\n", -+ eaddr, (unsigned long)(eaddr + (npages * PAGE_SIZE)), index, idx, lim); -+ -+ local_irq_save (flags); -+ for (; idx < lim; idx++) -+ elan4mmu_clear_pte (&rail->r_ctxt, (ELAN4_HASH_CACHE *) private, idx); -+ local_irq_restore (flags); -+} -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/ep/kmsg_elan3.c linux-2.6.9/drivers/net/qsnet/ep/kmsg_elan3.c ---- clean/drivers/net/qsnet/ep/kmsg_elan3.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/kmsg_elan3.c 2005-07-19 10:26:48.000000000 -0400 -@@ -0,0 +1,348 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: kmsg_elan3.c,v 1.4.2.1 2005/07/19 14:26:48 david Exp $" -+/* $Source: /cvs/master/quadrics/epmod/kmsg_elan3.c,v $ */ -+ -+#include -+ -+#include -+ -+#include "kcomm_vp.h" -+#include "kcomm_elan3.h" -+#include "debug.h" -+ -+static void -+ep3_inputq_event (EP3_RAIL *rail, void *arg) -+{ -+ EP3_INPUTQ *inputq = (EP3_INPUTQ *) arg; -+ -+ (*inputq->q_callback)((EP_RAIL *)rail, inputq->q_arg); -+} -+ -+static EP3_COOKIE_OPS ep3_inputq_cookie_ops = -+{ -+ ep3_inputq_event, -+}; -+ -+EP_INPUTQ * -+ep3_alloc_inputq (EP_RAIL *r, unsigned qnum, unsigned slotSize, unsigned slotCount, -+ EP_INPUTQ_CALLBACK *callback, void *arg) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) r; -+ EP3_INPUTQ *inputq; -+ EP3_InputQueue qdesc; -+ void *slots; -+ int i; -+ -+ ASSERT ((slotSize & (EP_SYSTEMQ_MSG_ALIGN-1)) == 0); -+ -+ KMEM_ALLOC (inputq, EP3_INPUTQ *, sizeof (EP3_INPUTQ), TRUE); -+ -+ if (inputq == NULL) -+ return (EP_INPUTQ *) NULL; -+ -+ if ((slots = ep_alloc_main (&rail->Generic, slotSize * slotCount, 0, &inputq->q_slotsAddr)) == NULL) -+ { -+ KMEM_FREE (inputq, sizeof (EP3_INPUTQ)); -+ return (EP_INPUTQ *) NULL; -+ } -+ -+ inputq->q_slotSize = slotSize; -+ inputq->q_slotCount = slotCount; -+ inputq->q_callback = callback; -+ inputq->q_arg = arg; -+ inputq->q_slots = slots; -+ -+ /* Initialise all the slots to be "unreceived" */ -+ for (i = 0; i < slotCount; i++) -+ ((uint32_t *) ((unsigned long) slots + (i+1) * slotSize))[-1] = EP_SYSTEMQ_UNRECEIVED; -+ -+ inputq->q_base = inputq->q_slotsAddr; -+ inputq->q_top = inputq->q_base + (slotCount-1) * slotSize; -+ inputq->q_fptr = inputq->q_base; -+ inputq->q_desc = EP_SYSTEMQ_DESC(rail->QueueDescs, qnum); -+ inputq->q_descAddr = EP_SYSTEMQ_ADDR (qnum); -+ -+ if (callback) -+ RegisterCookie (&rail->CookieTable, &inputq->q_cookie, inputq->q_descAddr, &ep3_inputq_cookie_ops, inputq); -+ -+ /* Initialise the input queue descriptor */ -+ qdesc.q_state = E3_QUEUE_FULL; -+ qdesc.q_bptr = inputq->q_base + slotSize; -+ qdesc.q_fptr = inputq->q_fptr; -+ qdesc.q_base = inputq->q_base; -+ qdesc.q_top = inputq->q_top; -+ qdesc.q_size = slotSize; -+ qdesc.q_event.ev_Count = 1; -+ qdesc.q_event.ev_Type = callback ? EV_TYPE_EVIRQ | inputq->q_cookie.Cookie : 0; -+ qdesc.q_wevent = inputq->q_descAddr + offsetof (EP3_InputQueue, q_event); -+ qdesc.q_wcount = 0; -+ -+ /* copy the queue descriptor down to sdram */ -+ elan3_sdram_copyl_to_sdram (rail->Device, &qdesc, inputq->q_desc, sizeof (EP3_InputQueue)); -+ -+ return (EP_INPUTQ *) inputq; -+} -+ -+void -+ep3_free_inputq (EP_RAIL *r, EP_INPUTQ *q) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) r; -+ EP3_INPUTQ *inputq = (EP3_INPUTQ *) q; -+ -+ ep_free_main (&rail->Generic, inputq->q_slotsAddr, inputq->q_slotSize * inputq->q_slotCount); -+ -+ if (inputq->q_callback) -+ DeregisterCookie (&rail->CookieTable, &inputq->q_cookie); -+ -+ KMEM_FREE (inputq, sizeof (EP3_INPUTQ)); -+} -+ -+void -+ep3_enable_inputq (EP_RAIL *r, EP_INPUTQ *q) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) r; -+ EP3_INPUTQ *inputq = (EP3_INPUTQ *) q; -+ -+ elan3_sdram_writel (rail->Device, inputq->q_desc + offsetof (EP3_InputQueue, q_state), 0); -+} -+ -+void -+ep3_disable_inputq (EP_RAIL *r, EP_INPUTQ *q) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) r; -+ EP3_INPUTQ *inputq = (EP3_INPUTQ *) q; -+ EP3_InputQueue qdesc; -+ -+ /* mark the queue as locked */ -+ SetQueueLocked (rail, inputq->q_desc); -+ -+ /* re-initialise the queue as empty */ -+ qdesc.q_state = E3_QUEUE_FULL; -+ qdesc.q_bptr = (E3_Addr) inputq->q_base + inputq->q_slotSize; -+ qdesc.q_fptr = inputq->q_fptr; -+ qdesc.q_base = inputq->q_base; -+ qdesc.q_top = inputq->q_top; -+ qdesc.q_size = inputq->q_slotSize; -+ qdesc.q_event.ev_Count = 1; -+ qdesc.q_event.ev_Type = inputq->q_callback ? EV_TYPE_EVIRQ | inputq->q_cookie.Cookie : 0; -+ qdesc.q_wevent = inputq->q_descAddr + offsetof (EP3_InputQueue, q_event); -+ qdesc.q_wcount = 0; -+ -+ /* copy the queue descriptor down to sdram */ -+ elan3_sdram_copyl_to_sdram (rail->Device, &qdesc, inputq->q_desc, sizeof (EP3_InputQueue)); -+} -+ -+int -+ep3_poll_inputq (EP_RAIL *r, EP_INPUTQ *q, int maxCount, EP_INPUTQ_HANDLER *handler, void *arg) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) r; -+ EP3_INPUTQ *inputq = (EP3_INPUTQ *) q; -+ sdramaddr_t qdesc = inputq->q_desc; -+ E3_Addr nfptr; -+ int count = 0; -+ E3_uint32 state; -+ int delay; -+ -+ run_again_because_of_eventqueue_overflow: -+ nfptr = inputq->q_fptr + inputq->q_slotSize; -+ if (nfptr > inputq->q_top) -+ nfptr = inputq->q_base; -+ -+ while (nfptr != elan3_sdram_readl (rail->Device, qdesc + offsetof (EP3_InputQueue, q_bptr))) /* PCI read */ -+ { -+ unsigned long slot = (unsigned long) inputq->q_slots + (nfptr - inputq->q_base); -+ -+ /* Poll the final word of the message until the message has completely -+ * arrived in main memory. */ -+ for (delay = 1; ((uint32_t *) (slot + inputq->q_slotSize))[-1] == EP_SYSTEMQ_UNRECEIVED && delay < EP_SYSTEMQ_UNRECEIVED_TLIMIT; delay <<= 1) -+ DELAY (delay); -+ -+ /* Call the message handler */ -+ (*handler) (r, arg, (void *) slot); -+ -+ /* reset the last word of the slot to "unreceived" */ -+ ((uint32_t *) (slot + inputq->q_slotSize))[-1] = EP_SYSTEMQ_UNRECEIVED; -+ -+ state = elan3_sdram_readl (rail->Device, qdesc + offsetof (EP3_InputQueue, q_state)); /* PCI read */ -+ if ((state & E3_QUEUE_FULL) == 0) -+ elan3_sdram_writel (rail->Device, qdesc + offsetof (EP3_InputQueue, q_fptr), nfptr); /* PCI write */ -+ else -+ { -+ elan3_sdram_writel (rail->Device, qdesc + offsetof (EP3_InputQueue, q_fptr), nfptr); /* PCI write */ -+ elan3_sdram_writel (rail->Device, qdesc + offsetof (EP3_InputQueue, q_state), (state & ~E3_QUEUE_FULL)); /* PCI write */ -+ } -+ inputq->q_fptr = nfptr; -+ -+ nfptr += roundup (inputq->q_slotSize, E3_BLK_ALIGN); -+ if (nfptr > inputq->q_top) -+ nfptr = inputq->q_base; -+ -+ if (++count >= maxCount && maxCount) -+ break; -+ } -+ -+ if (inputq->q_callback && count != 0) -+ { -+ if (count != inputq->q_waitCount) -+ elan3_sdram_writel (rail->Device, qdesc + offsetof (EP3_InputQueue, q_wcount), inputq->q_waitCount = count); -+ -+ if (IssueWaitevent (rail, inputq->q_descAddr + offsetof (EP3_InputQueue, q_wevent)) == ISSUE_COMMAND_TRAPPED) -+ goto run_again_because_of_eventqueue_overflow; -+ } -+ -+ return count; -+} -+ -+#define Q_EVENT(q,slotNum) ((q)->q_elan + (slotNum) * sizeof (E3_BlockCopyEvent)) -+#define Q_EVENT_ADDR(q,slotNum) ((q)->q_elanAddr + (slotNum) * sizeof (E3_BlockCopyEvent)) -+#define Q_MSG(q,slotNum) (void *)((q)->q_main + (slotNum) * (q)->q_slotSize) -+#define Q_MSG_ADDR(q,slotNum) ((q)->q_mainAddr + (slotNum) * (q)->q_slotSize) -+#define Q_DONE(q,slotNum) (*((int *)((q)->q_main + (q)->q_slotCount * (q)->q_slotSize + (slotNum) * sizeof (E3_uint32)))) -+#define Q_DONE_ADDR(q,slotNum) ((q)->q_mainAddr + (q)->q_slotCount * (q)->q_slotSize + (slotNum) * sizeof (E3_uint32)) -+ -+#define Q_ELAN_SIZE(q) ((q)->q_slotCount * sizeof (E3_BlockCopyEvent)) -+#define Q_MAIN_SIZE(q) ((q)->q_slotCount * ((q)->q_slotSize + sizeof (E3_uint32))) -+ -+static void -+ep3_outputq_retry (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma, int error) -+{ -+ E3_DMA_BE *dmabe = (E3_DMA_BE *) dma; -+ sdramaddr_t event = ep_elan2sdram (&rail->Generic, dmabe->s.dma_srcEvent); -+ E3_Addr done = elan3_sdram_readl (rail->Device, event + offsetof (E3_BlockCopyEvent, ev_Dest)); -+ E3_uint32 *donep = ep_elan2main (&rail->Generic, done & ~EV_BCOPY_DTYPE_MASK); -+ -+ EPRINTF1 (DBG_KMSG, "ep3_ouputq_retry: donep at %p -> FAILED\n", donep); -+ -+ *donep = EP3_EVENT_FAILED; -+} -+ -+static EP3_COOKIE_OPS ep3_outputq_cookie_ops = -+{ -+ NULL, /* Event */ -+ ep3_outputq_retry, -+ NULL, /* DmaCancelled */ -+ NULL, /* DmaVerify */ -+}; -+ -+EP_OUTPUTQ * -+ep3_alloc_outputq (EP_RAIL *r, unsigned slotSize, unsigned slotCount) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) r; -+ EP3_OUTPUTQ *outputq; -+ int i; -+ E3_BlockCopyEvent event; -+ -+ ASSERT ((slotSize & (EP_SYSTEMQ_MSG_ALIGN-1)) == 0); -+ -+ KMEM_ALLOC (outputq, EP3_OUTPUTQ *, sizeof (EP3_OUTPUTQ), 1); -+ -+ if (outputq == NULL) -+ return NULL; -+ -+ outputq->q_slotCount = slotCount; -+ outputq->q_slotSize = slotSize; -+ -+ outputq->q_elan = ep_alloc_elan (r, Q_ELAN_SIZE(outputq), 0, &outputq->q_elanAddr); -+ -+ if (outputq->q_elan == (sdramaddr_t) 0) -+ { -+ KMEM_FREE (outputq, sizeof (EP3_OUTPUTQ)); -+ return NULL; -+ } -+ -+ outputq->q_main = ep_alloc_main (r, Q_MAIN_SIZE(outputq), 0, &outputq->q_mainAddr); -+ -+ if (outputq->q_main == (void *) NULL) -+ { -+ ep_free_elan (r, outputq->q_elanAddr, Q_ELAN_SIZE(outputq)); -+ KMEM_FREE (outputq, sizeof (EP3_OUTPUTQ)); -+ return NULL; -+ } -+ -+ RegisterCookie (&rail->CookieTable, &outputq->q_cookie, outputq->q_elanAddr, &ep3_outputq_cookie_ops, outputq); -+ -+ for (i = 0; i < slotCount; i++) -+ { -+ EP3_INIT_COPY_EVENT (event, outputq->q_cookie, Q_DONE_ADDR(outputq, i), 0); -+ -+ Q_DONE(outputq, i) = outputq->q_cookie.Cookie; -+ -+ elan3_sdram_copyl_to_sdram (rail->Device, &event, Q_EVENT(outputq, i), sizeof (E3_BlockCopyEvent)); -+ } -+ -+ return (EP_OUTPUTQ *) outputq; -+} -+ -+void -+ep3_free_outputq (EP_RAIL *r, EP_OUTPUTQ *q) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) r; -+ EP3_OUTPUTQ *outputq = (EP3_OUTPUTQ *) q; -+ -+ DeregisterCookie (&rail->CookieTable, &outputq->q_cookie); -+ -+ ep_free_main (r, outputq->q_mainAddr, Q_MAIN_SIZE(outputq)); -+ ep_free_elan (r, outputq->q_elanAddr, Q_ELAN_SIZE(outputq)); -+ -+ KMEM_FREE (outputq, sizeof (EP3_OUTPUTQ)); -+} -+ -+void * -+ep3_outputq_msg (EP_RAIL *r, EP_OUTPUTQ *q, unsigned slotNum) -+{ -+ return Q_MSG ((EP3_OUTPUTQ *) q, slotNum); -+} -+ -+int -+ep3_outputq_state (EP_RAIL *r, EP_OUTPUTQ *q, unsigned slotNum) -+{ -+ switch (Q_DONE((EP3_OUTPUTQ *) q, slotNum)) -+ { -+ case EP3_EVENT_ACTIVE: -+ return EP_OUTPUTQ_BUSY; -+ -+ case EP3_EVENT_FAILED: -+ return EP_OUTPUTQ_FAILED; -+ -+ default: -+ return EP_OUTPUTQ_FINISHED; -+ } -+} -+ -+int -+ep3_outputq_send (EP_RAIL *r, EP_OUTPUTQ *q, unsigned slotNum, unsigned size, -+ unsigned vp, unsigned qnum, unsigned retries) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) r; -+ EP3_OUTPUTQ *outputq = (EP3_OUTPUTQ *) q; -+ unsigned base = outputq->q_slotSize - roundup (size, E3_BLK_ALIGN); -+ E3_DMA_BE dmabe; -+ -+ dmabe.s.dma_type = E3_DMA_TYPE(DMA_BYTE, DMA_WRITE, DMA_QUEUED, retries); -+ dmabe.s.dma_size = roundup (size, E3_BLK_ALIGN); -+ dmabe.s.dma_source = Q_MSG_ADDR(outputq, slotNum) + base; -+ dmabe.s.dma_dest = base; -+ dmabe.s.dma_destEvent = EP_SYSTEMQ_ADDR(qnum); -+ dmabe.s.dma_destCookieVProc = vp; -+ dmabe.s.dma_srcEvent = Q_EVENT_ADDR(outputq, slotNum); -+ dmabe.s.dma_srcCookieVProc = 0; -+ -+ Q_DONE(outputq, slotNum) = EP3_EVENT_ACTIVE; -+ -+ elan3_sdram_writel (rail->Device, Q_EVENT(outputq, slotNum), 1); -+ -+ if (IssueDma (rail, &dmabe, EP_RETRY_CRITICAL, FALSE) != ISSUE_COMMAND_OK) -+ { -+ Q_DONE(outputq, slotNum) = EP3_EVENT_FAILED; -+ return FALSE; -+ } -+ -+ return TRUE; -+} -diff -urN clean/drivers/net/qsnet/ep/kmsg_elan4.c linux-2.6.9/drivers/net/qsnet/ep/kmsg_elan4.c ---- clean/drivers/net/qsnet/ep/kmsg_elan4.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/kmsg_elan4.c 2005-02-28 09:05:38.000000000 -0500 -@@ -0,0 +1,418 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: kmsg_elan4.c,v 1.10 2005/02/28 14:05:38 david Exp $" -+/* $Source: /cvs/master/quadrics/epmod/kmsg_elan4.c,v $ */ -+ -+#include -+ -+#include -+ -+#include "debug.h" -+#include "kcomm_vp.h" -+#include "kcomm_elan4.h" -+ -+#include -+ -+static void -+ep4_inputq_interrupt (EP4_RAIL *rail, void *arg) -+{ -+ EP4_INPUTQ *inputq = (EP4_INPUTQ *) arg; -+ -+ /* mark the queue as "fired" to cause a single waitevent -+ * to be issued next time the queue is polled */ -+ atomic_inc (&inputq->q_fired); -+ -+ (*inputq->q_callback)(&rail->r_generic, inputq->q_arg); -+} -+ -+EP_INPUTQ * -+ep4_alloc_inputq (EP_RAIL *r, unsigned qnum, unsigned slotSize, unsigned slotCount, -+ EP_INPUTQ_CALLBACK *callback, void *arg) -+{ -+ EP4_RAIL *rail = (EP4_RAIL *) r; -+ EP4_INPUTQ *inputq; -+ E4_Event32 qevent; -+ void *slots; -+ int i; -+ -+ ASSERT ((slotSize & (EP_SYSTEMQ_MSG_ALIGN-1)) == 0); -+ -+ KMEM_ALLOC (inputq, EP4_INPUTQ *, sizeof (EP4_INPUTQ), 1); -+ -+ if (inputq == NULL) -+ return (EP_INPUTQ *) NULL; -+ -+ if ((slots = ep_alloc_main (&rail->r_generic, slotSize * slotCount, 0, &inputq->q_slotsAddr)) == NULL) -+ { -+ KMEM_FREE (inputq, sizeof (EP4_INPUTQ)); -+ return (EP_INPUTQ *) NULL; -+ } -+ -+ inputq->q_slotSize = slotSize; -+ inputq->q_slotCount = slotCount; -+ inputq->q_callback = callback; -+ inputq->q_arg = arg; -+ inputq->q_slots = slots; -+ -+ /* Initialise all the slots to be "unreceived" */ -+ for (i = 0; i < slotCount; i++) -+ ((uint32_t *) ((unsigned long) slots + (i+1) * slotSize))[-1] = EP_SYSTEMQ_UNRECEIVED; -+ -+ inputq->q_last = inputq->q_slotsAddr + (slotCount-1) * slotSize; -+ inputq->q_fptr = inputq->q_slotsAddr; -+ inputq->q_desc = EP_SYSTEMQ_DESC (rail->r_queuedescs, qnum); -+ inputq->q_descAddr = EP_SYSTEMQ_ADDR (qnum); -+ inputq->q_eventAddr = rail->r_elan_addr + offsetof (EP4_RAIL_ELAN, r_qevents[qnum]); -+ -+ if (callback) -+ { -+ if ((inputq->q_ecq = ep4_get_ecq (rail, EP4_ECQ_EVENT, 1)) == 0) -+ { -+ ep_free_main (&rail->r_generic, inputq->q_slotsAddr, inputq->q_slotSize * inputq->q_slotCount); -+ -+ KMEM_FREE (inputq, sizeof (EP4_INPUTQ)); -+ return (EP_INPUTQ *) NULL; -+ } -+ -+ if ((inputq->q_wcq = ep4_get_ecq (rail, EP4_ECQ_MAIN, 4)) == 0) -+ { -+ ep4_put_ecq (rail, inputq->q_ecq, 1); -+ ep_free_main (&rail->r_generic, inputq->q_slotsAddr, inputq->q_slotSize * inputq->q_slotCount); -+ -+ KMEM_FREE (inputq, sizeof (EP4_INPUTQ)); -+ return (EP_INPUTQ *) NULL; -+ } -+ -+ ep4_register_intcookie (rail, &inputq->q_intcookie, inputq->q_descAddr, ep4_inputq_interrupt, inputq); -+ -+ inputq->q_count = 0; -+ -+ atomic_set (&inputq->q_fired, 0); -+ -+ /* Initialise the queue event */ -+ qevent.ev_CountAndType = E4_EVENT_INIT_VALUE (callback ? -32 : 0, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0); -+ qevent.ev_WritePtr = inputq->q_ecq->ecq_addr; -+ qevent.ev_WriteValue = (inputq->q_intcookie.int_val << E4_MAIN_INT_SHIFT) | INTERRUPT_CMD; -+ } -+ -+ /* copy the event down to sdram */ -+ elan4_sdram_copyq_to_sdram (rail->r_ctxt.ctxt_dev, &qevent, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_qevents[qnum]), sizeof (E4_Event32)); -+ -+ return (EP_INPUTQ *) inputq; -+} -+ -+void -+ep4_free_inputq (EP_RAIL *r, EP_INPUTQ *q) -+{ -+ EP4_RAIL *rail = (EP4_RAIL *) r; -+ EP4_INPUTQ *inputq = (EP4_INPUTQ *) q; -+ -+ ep_free_main (&rail->r_generic, inputq->q_slotsAddr, inputq->q_slotSize * inputq->q_slotCount); -+ -+ if (inputq->q_callback) -+ { -+ ep4_deregister_intcookie (rail, &inputq->q_intcookie); -+ ep4_put_ecq (rail, inputq->q_ecq, 1); -+ ep4_put_ecq (rail, inputq->q_wcq, 4); -+ } -+ -+ KMEM_FREE (inputq, sizeof (EP4_INPUTQ)); -+} -+ -+void -+ep4_enable_inputq (EP_RAIL *r, EP_INPUTQ *q) -+{ -+ EP4_RAIL *rail = (EP4_RAIL *) r; -+ EP4_INPUTQ *inputq = (EP4_INPUTQ *) q; -+ EP_ADDR lastSlot = inputq->q_slotsAddr + (inputq->q_slotCount-1) * inputq->q_slotSize; -+ E4_InputQueue qdesc; -+ -+ qdesc.q_bptr = inputq->q_slotsAddr; -+ qdesc.q_fptr = inputq->q_slotsAddr; -+ qdesc.q_control = E4_InputQueueControl (inputq->q_slotsAddr, lastSlot, inputq->q_slotSize); -+ qdesc.q_event = inputq->q_callback ? inputq->q_eventAddr : 0; -+ -+ /* copy the queue descriptor down to sdram */ -+ ep4_write_qdesc (rail, inputq->q_desc, &qdesc); -+ -+ EPRINTF5 (DBG_KMSG, "ep_enable_inputq: %x - %016llx %016llx %016llx %016llx\n", (int) inputq->q_descAddr, -+ elan4_sdram_readq (rail->r_ctxt.ctxt_dev, inputq->q_desc + 0), -+ elan4_sdram_readq (rail->r_ctxt.ctxt_dev, inputq->q_desc + 8), -+ elan4_sdram_readq (rail->r_ctxt.ctxt_dev, inputq->q_desc + 16), -+ elan4_sdram_readq (rail->r_ctxt.ctxt_dev, inputq->q_desc + 24)); -+} -+ -+void -+ep4_disable_inputq (EP_RAIL *r, EP_INPUTQ *q) -+{ -+ EP4_RAIL *rail = (EP4_RAIL *) r; -+ EP4_INPUTQ *inputq = (EP4_INPUTQ *) q; -+ E4_InputQueue qdesc; -+ -+ /* Initialise the input queue descriptor as "full" with no event */ -+ qdesc.q_bptr = 0; -+ qdesc.q_fptr = 8; -+ qdesc.q_control = E4_InputQueueControl(qdesc.q_bptr, qdesc.q_fptr, 8); -+ qdesc.q_event = 0; -+ -+ /* copy the queue descriptor down to sdram */ -+ ep4_write_qdesc (rail, inputq->q_desc, &qdesc); -+} -+ -+int -+ep4_poll_inputq (EP_RAIL *r, EP_INPUTQ *q, int maxCount, EP_INPUTQ_HANDLER *handler, void *arg) -+{ -+ EP4_RAIL *rail = (EP4_RAIL *) r; -+ ELAN4_DEV *dev = rail->r_ctxt.ctxt_dev; -+ EP4_INPUTQ *inputq = (EP4_INPUTQ *) q; -+ sdramaddr_t qdesc = inputq->q_desc; -+ E4_Addr fptr = inputq->q_fptr; -+ E4_Addr bptr = elan4_sdram_readl (dev, qdesc + offsetof (E4_InputQueue, q_bptr)); -+ int count = 0; -+ int delay; -+ -+ while (bptr != 0 && fptr != bptr) -+ { -+ while (fptr != bptr) -+ { -+ unsigned long slot = (unsigned long) inputq->q_slots + (fptr - inputq->q_slotsAddr); -+ -+ /* Poll the final word of the message until the message has completely -+ * arrived in main memory. */ -+ for (delay = 1; ((uint32_t *) (slot + inputq->q_slotSize))[-1] == EP_SYSTEMQ_UNRECEIVED && delay < EP_SYSTEMQ_UNRECEIVED_TLIMIT; delay <<= 1) -+ DELAY (delay); -+ -+ EPRINTF4(DBG_KMSG, "ep4_poll_inputq: %x slot %d of %d [%08x]\n", (int)inputq->q_descAddr, -+ ((int)(fptr - inputq->q_slotsAddr))/inputq->q_slotSize, -+ inputq->q_slotCount, ((uint32_t *) (slot + inputq->q_slotSize))[-1]); -+ -+ /* Call the message handler */ -+ (*handler) (r, arg, (void *) slot); -+ -+ /* reset the last word of the slot to "unreceived" */ -+ ((uint32_t *) (slot + inputq->q_slotSize))[-1] = EP_SYSTEMQ_UNRECEIVED; -+ -+ /* move on the front pointer */ -+ fptr = (fptr == inputq->q_last) ? inputq->q_slotsAddr : fptr + inputq->q_slotSize; -+ -+ elan4_sdram_writel (dev, qdesc + offsetof (E4_InputQueue, q_fptr), fptr); -+ -+ inputq->q_count++; -+ -+ if (++count >= maxCount && maxCount) -+ { -+ inputq->q_fptr = fptr; -+ -+ return count; -+ } -+ } -+ -+ bptr = elan4_sdram_readl (dev, qdesc + offsetof (E4_InputQueue, q_bptr)); -+ } -+ -+ inputq->q_fptr = fptr; -+ -+ /* Only insert a single wait event command if the callback has -+ * occured, otherwise just acrue the count as we've just periodically -+ * polled it. -+ */ -+ if (inputq->q_callback && atomic_read (&inputq->q_fired)) -+ { -+ atomic_dec (&inputq->q_fired); -+ -+ ep4_wait_event_cmd (inputq->q_wcq, inputq->q_eventAddr, -+ E4_EVENT_INIT_VALUE (-inputq->q_count << 5, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0), -+ inputq->q_ecq->ecq_addr, -+ (inputq->q_intcookie.int_val << E4_MAIN_INT_SHIFT) | INTERRUPT_CMD); -+ -+ inputq->q_count = 0; -+ } -+ -+ return count; -+} -+ -+#define Q_MSG(q,slotNum) (unsigned long)((q)->q_main + (slotNum) * (q)->q_slotSize) -+#define Q_MSG_ADDR(q,slotNum) ((q)->q_mainAddr + (slotNum) * (q)->q_slotSize) -+#define Q_DONE(q,slotNum) *((E4_uint64 *)((q)->q_main + (q)->q_slotCount * (q)->q_slotSize + (slotNum) * sizeof (E4_uint64))) -+#define Q_DONE_ADDR(q,slotNum) ((q)->q_mainAddr + (q)->q_slotCount * (q)->q_slotSize + (slotNum) * sizeof (E4_uint64)) -+ -+#define Q_MAIN_SIZE(q) ((q)->q_slotCount * ((q)->q_slotSize + sizeof (E4_uint64))) -+ -+#define Q_DONE_VAL(val,cnt) ((cnt) << 16 | (val)) -+#define Q_DONE_RET(done) ((int) ((done) & 0xffff)) -+#define Q_DONE_CNT(done) ((int) ((done) >> 16)) -+ -+EP_OUTPUTQ * -+ep4_alloc_outputq (EP_RAIL *r, unsigned slotSize, unsigned slotCount) -+{ -+ EP4_RAIL *rail = (EP4_RAIL *) r; -+ EP4_OUTPUTQ *outputq; -+ int i; -+ -+ ASSERT ((slotSize & (EP_SYSTEMQ_MSG_ALIGN-1)) == 0); -+ -+ KMEM_ALLOC (outputq, EP4_OUTPUTQ *, sizeof (EP4_OUTPUTQ), 1); -+ -+ if (outputq == NULL) -+ return NULL; -+ -+ spin_lock_init (&outputq->q_lock); -+ -+ outputq->q_slotCount = slotCount; -+ outputq->q_slotSize = slotSize; -+ outputq->q_main = ep_alloc_main (r, Q_MAIN_SIZE(outputq), 0, &outputq->q_mainAddr); -+ -+ if (outputq->q_main == (E4_uint64 *) NULL) -+ { -+ KMEM_FREE (outputq, sizeof (EP_OUTPUTQ)); -+ return NULL; -+ } -+ -+ outputq->q_cq = elan4_alloccq (&rail->r_ctxt, CQ_Size64K, CQ_STENEnableBit | CQ_WriteEnableBit, CQ_Priority); -+ -+ if (outputq->q_cq == (ELAN4_CQ *) NULL) -+ { -+ ep_free_main (&rail->r_generic, outputq->q_mainAddr, Q_MAIN_SIZE(outputq)); -+ -+ KMEM_FREE (outputq, sizeof (EP_OUTPUTQ)); -+ } -+ -+ outputq->q_dwords = CQ_Size (outputq->q_cq->cq_size) >> 3; -+ -+ /* mark all the queue slots as finished */ -+ for (i = 0; i < slotCount; i++) -+ Q_DONE(outputq, i) = Q_DONE_VAL (EP_OUTPUTQ_FINISHED, 0); -+ -+ return (EP_OUTPUTQ *) outputq; -+} -+ -+void -+ep4_free_outputq (EP_RAIL *r, EP_OUTPUTQ *q) -+{ -+ EP4_RAIL *rail = (EP4_RAIL *) r; -+ EP4_OUTPUTQ *outputq = (EP4_OUTPUTQ *) q; -+ -+ elan4_freecq (&rail->r_ctxt, outputq->q_cq); -+ -+ ep_free_main (&rail->r_generic, outputq->q_mainAddr, Q_MAIN_SIZE(outputq)); -+ -+ spin_lock_destroy (&outputq->q_lock); -+ -+ KMEM_FREE (outputq, sizeof (EP4_OUTPUTQ)); -+} -+ -+void * -+ep4_outputq_msg (EP_RAIL *r, EP_OUTPUTQ *q, unsigned slotNum) -+{ -+ return (void *) Q_MSG ((EP4_OUTPUTQ *) q, slotNum); -+} -+ -+int -+ep4_outputq_state (EP_RAIL *r, EP_OUTPUTQ *q, unsigned slotNum) -+{ -+ EPRINTF2 (DBG_KMSG, "ep4_outputq_state: slotNum %d state %x\n", slotNum, (int)Q_DONE((EP4_OUTPUTQ *) q, slotNum)); -+ -+ return Q_DONE_RET(Q_DONE((EP4_OUTPUTQ *)q, slotNum)); -+} -+ -+int -+ep4_outputq_send (EP_RAIL *r, EP_OUTPUTQ *q, unsigned slotNum, unsigned size, -+ unsigned vp, unsigned qnum, unsigned retries) -+{ -+ EP4_OUTPUTQ *outputq = (EP4_OUTPUTQ *) q; -+ unsigned int nbytes = roundup (size, 32); -+ unsigned int base = outputq->q_slotSize - nbytes; -+ unsigned int i, dwords; -+ unsigned long flags; -+ E4_uint64 val; -+ -+ spin_lock_irqsave (&outputq->q_lock, flags); -+ -+ EPRINTF4 (DBG_KMSG, "ep4_outputq_send: slotNum=%d size=%d vp=%d qnum=%d\n", slotNum, size, vp, qnum); -+ -+ /* compute command queue size as follows - each slot uses -+ * overhead: 14 dwords + -+ * data > 128 ? 36 dwords -+ * data > 64 ? 18 dwords -+ * data > 32 ? 10 dwords -+ * else 6 dwords -+ */ -+ dwords = 14 + (size > 128 ? 36 : -+ size > 64 ? 18 : -+ size ? 10 : 6); -+ -+ outputq->q_dwords += Q_DONE_CNT (Q_DONE(outputq, slotNum)); -+ -+ if (dwords > outputq->q_dwords) -+ { -+ /* attempt to reclaim command queue space from other slots */ -+ i = slotNum; -+ do { -+ if (++i == outputq->q_slotCount) -+ i = 0; -+ -+ val = Q_DONE(outputq, i); -+ -+ if ((Q_DONE_RET (val) == EP_OUTPUTQ_FINISHED || Q_DONE_RET (val) == EP_OUTPUTQ_FAILED) && Q_DONE_CNT(val) > 0) -+ { -+ outputq->q_dwords += Q_DONE_CNT (val); -+ -+ Q_DONE(outputq, i) = Q_DONE_VAL(Q_DONE_RET(val), 0); -+ } -+ } while (i != slotNum && dwords > outputq->q_dwords); -+ } -+ -+ if (dwords > outputq->q_dwords) -+ { -+ spin_unlock_irqrestore (&outputq->q_lock, flags); -+ -+ EPRINTF0 (DBG_KMSG, "ep4_outputq_state: no command queue space\n"); -+ return 0; -+ } -+ -+ outputq->q_dwords -= dwords; -+ -+ Q_DONE(outputq, slotNum) = Q_DONE_VAL (EP_OUTPUTQ_BUSY, dwords); -+ -+ if (outputq->q_retries != retries) -+ { -+ outputq->q_retries = retries; -+ -+ elan4_guard (outputq->q_cq, GUARD_CHANNEL(1) | GUARD_RESET(retries)); -+ elan4_nop_cmd (outputq->q_cq, 0); -+ } -+ -+ /* transfer the top "size" bytes from message buffer to top of input queue */ -+ elan4_open_packet (outputq->q_cq, OPEN_PACKET (0, PACK_OK | RESTART_COUNT_ZERO, vp)); -+ elan4_sendtrans0 (outputq->q_cq, TR_INPUT_Q_GETINDEX, EP_SYSTEMQ_ADDR(qnum)); -+ -+ /* send upto EP_SYSTEMQ_MSG_MAX (256) bytes of message to the top of the slot */ -+ if (size > 128) -+ { -+ elan4_sendtransp (outputq->q_cq, TR_WRITE (128 >> 3, 0, TR_DATATYPE_DWORD), base + 0, (void *) (Q_MSG(outputq, slotNum) + base + 0)); -+ elan4_sendtransp (outputq->q_cq, TR_WRITE (128 >> 3, 0, TR_DATATYPE_DWORD), base + 128, (void *) (Q_MSG(outputq, slotNum) + base + 128)); -+ } -+ else if (size > 64) -+ elan4_sendtransp (outputq->q_cq, TR_WRITE (128 >> 3, 0, TR_DATATYPE_DWORD), base, (void *) (Q_MSG(outputq, slotNum) + base)); -+ else if (size > 32) -+ elan4_sendtransp (outputq->q_cq, TR_WRITE (64 >> 3, 0, TR_DATATYPE_DWORD), base, (void *) (Q_MSG(outputq, slotNum) + base)); -+ else -+ elan4_sendtransp (outputq->q_cq, TR_WRITE (32 >> 3, 0, TR_DATATYPE_DWORD), base, (void *) (Q_MSG(outputq, slotNum) + base)); -+ elan4_sendtrans1 (outputq->q_cq, TR_INPUT_Q_COMMIT, EP_SYSTEMQ_ADDR(qnum), 0 /* no cookie */); -+ -+ elan4_guard (outputq->q_cq, GUARD_CHANNEL (1) | GUARD_TEST(0, PACK_OK) | GUARD_RESET (outputq->q_retries)); -+ elan4_write_dword_cmd (outputq->q_cq, Q_DONE_ADDR(outputq, slotNum), Q_DONE_VAL (EP_OUTPUTQ_FINISHED, dwords)); -+ -+ elan4_guard (outputq->q_cq, GUARD_CHANNEL (1) | GUARD_TEST(0, RESTART_COUNT_ZERO) | GUARD_RESET (outputq->q_retries)); -+ elan4_write_dword_cmd (outputq->q_cq, Q_DONE_ADDR(outputq, slotNum), Q_DONE_VAL (EP_OUTPUTQ_FAILED, dwords)); -+ -+ spin_unlock_irqrestore (&outputq->q_lock, flags); -+ -+ return 1; -+} -diff -urN clean/drivers/net/qsnet/ep/kthread.c linux-2.6.9/drivers/net/qsnet/ep/kthread.c ---- clean/drivers/net/qsnet/ep/kthread.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/kthread.c 2004-05-19 04:54:57.000000000 -0400 -@@ -0,0 +1,186 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: kthread.c,v 1.5 2004/05/19 08:54:57 david Exp $ $Name: QSNETMODULES-5-11-3_20050907 $" -+/* $Source: /cvs/master/quadrics/epmod/kthread.c,v $*/ -+ -+#include -+ -+#include -+ -+void -+ep_kthread_init (EP_KTHREAD *kt) -+{ -+ spin_lock_init (&kt->lock); -+ kcondvar_init (&kt->wait); -+ -+ kt->next_run = 0; -+ kt->should_stall = 0; -+ kt->started = 0; -+ kt->should_stop = 0; -+ kt->stopped = 0; -+ kt->state = KT_STATE_RUNNING; -+} -+ -+void -+ep_kthread_destroy (EP_KTHREAD *kt) -+{ -+ spin_lock_destroy (&kt->lock); -+ kcondvar_destroy (&kt->wait); -+} -+ -+void -+ep_kthread_started (EP_KTHREAD *kt) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave (&kt->lock, flags); -+ kt->started = 1; -+ spin_unlock_irqrestore(&kt->lock, flags); -+} -+ -+void -+ep_kthread_stopped (EP_KTHREAD *kt) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave (&kt->lock, flags); -+ kt->stopped = 1; -+ kcondvar_wakeupall (&kt->wait, &kt->lock); -+ spin_unlock_irqrestore(&kt->lock, flags); -+} -+ -+int -+ep_kthread_should_stall (EP_KTHREAD *kth) -+{ -+ return (kth->should_stall); -+} -+ -+int -+ep_kthread_sleep (EP_KTHREAD *kt, long next_run) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave (&kt->lock, flags); -+ if (next_run && (kt->next_run == 0 || BEFORE (next_run, kt->next_run))) -+ kt->next_run = next_run; -+ -+ if (kt->should_stop) -+ { -+ spin_unlock_irqrestore (&kt->lock, flags); -+ return (-1); -+ } -+ -+ do { -+ if (kt->should_stall) -+ kcondvar_wakeupall (&kt->wait, &kt->lock); -+ -+ kt->state = KT_STATE_SLEEPING; -+ kt->running = 0; -+ if (kt->should_stall || kt->next_run == 0) -+ kcondvar_wait (&kt->wait, &kt->lock, &flags); -+ else -+ kcondvar_timedwait (&kt->wait,&kt->lock, &flags, kt->next_run); -+ kt->state = KT_STATE_RUNNING; -+ kt->running = lbolt; -+ } while (kt->should_stall); -+ kt->next_run = 0; -+ spin_unlock_irqrestore (&kt->lock, flags); -+ -+ return (0); -+} -+ -+void -+ep_kthread_schedule (EP_KTHREAD *kt, long tick) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave (&kt->lock, flags); -+ if (kt->next_run == 0 || BEFORE (tick, kt->next_run)) -+ { -+ kt->next_run = tick; -+ if (!kt->should_stall && kt->state == KT_STATE_SLEEPING) -+ { -+ kt->state = KT_STATE_SCHEDULED; -+ kcondvar_wakeupone (&kt->wait, &kt->lock); -+ } -+ } -+ spin_unlock_irqrestore (&kt->lock, flags); -+} -+ -+void -+ep_kthread_stall (EP_KTHREAD *kt) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave (&kt->lock, flags); -+ if (kt->should_stall++ == 0) -+ kcondvar_wakeupall (&kt->wait, &kt->lock); -+ -+ while (kt->state != KT_STATE_SLEEPING) -+ kcondvar_wait (&kt->wait, &kt->lock, &flags); -+ spin_unlock_irqrestore (&kt->lock, flags); -+} -+ -+void -+ep_kthread_resume (EP_KTHREAD *kt) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave (&kt->lock, flags); -+ if (--kt->should_stall == 0) -+ { -+ kt->state = KT_STATE_SCHEDULED; -+ kcondvar_wakeupone (&kt->wait, &kt->lock); -+ } -+ spin_unlock_irqrestore (&kt->lock, flags); -+} -+ -+void -+ep_kthread_stop (EP_KTHREAD *kt) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave (&kt->lock, flags); -+ kt->should_stop = 1; -+ while (kt->started && !kt->stopped) -+ { -+ kcondvar_wakeupall (&kt->wait, &kt->lock); -+ kcondvar_wait (&kt->wait, &kt->lock, &flags); -+ } -+ spin_unlock_irqrestore (&kt->lock, flags); -+} -+ -+int -+ep_kthread_state (EP_KTHREAD *kt, long *time) -+{ -+ unsigned long flags; -+ int res = KT_STATE_SLEEPING; -+ -+ spin_lock_irqsave (&kt->lock, flags); -+ -+ if (kt->next_run) { -+ *time = kt->next_run; -+ res = kt->should_stall ? KT_STATE_STALLED : KT_STATE_SCHEDULED; -+ } -+ -+ if (kt->running) { -+ *time = kt->running; -+ res = KT_STATE_RUNNING; -+ } -+ -+ spin_unlock_irqrestore (&kt->lock, flags); -+ -+ return res; -+} -+ -+/* -+ * Local variables: -+ * c-file-style: "linux" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/ep/kthread.h linux-2.6.9/drivers/net/qsnet/ep/kthread.h ---- clean/drivers/net/qsnet/ep/kthread.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/kthread.h 2004-05-06 10:24:08.000000000 -0400 -@@ -0,0 +1,53 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __ELAN3_KTHREAD_H -+#define __ELAN3_KTHREAD_H -+ -+#ident "@(#)$Id: kthread.h,v 1.4 2004/05/06 14:24:08 david Exp $ $Name: QSNETMODULES-5-11-3_20050907 $" -+/* $Source: /cvs/master/quadrics/epmod/kthread.h,v $*/ -+ -+typedef struct ep_kthread -+{ -+ kcondvar_t wait; /* place to sleep */ -+ spinlock_t lock; /* and lock */ -+ long next_run; /* tick when thread should next run */ -+ long running; /* tick when thread started to run */ -+ unsigned short should_stall; -+ unsigned char state; -+ unsigned int started:1; -+ unsigned int should_stop:1; -+ unsigned int stopped:1; -+} EP_KTHREAD; -+ -+#define KT_STATE_SLEEPING 0 -+#define KT_STATE_SCHEDULED 1 -+#define KT_STATE_RUNNING 2 -+#define KT_STATE_STALLED 3 -+ -+#define AFTER(a, b) ((((long)(a)) - ((long)(b))) > 0) -+#define BEFORE(a,b) ((((long)(a)) - ((long)(b))) < 0) -+ -+extern void ep_kthread_init (EP_KTHREAD *kt); -+extern void ep_kthread_destroy (EP_KTHREAD *kt); -+extern void ep_kthread_started (EP_KTHREAD *kt); -+extern void ep_kthread_stopped (EP_KTHREAD *kt); -+extern int ep_kthread_should_stall (EP_KTHREAD *kth); -+extern int ep_kthread_sleep (EP_KTHREAD *kth, long next_run); -+extern void ep_kthread_schedule (EP_KTHREAD *kt, long when); -+extern void ep_kthread_stall (EP_KTHREAD *kth); -+extern void ep_kthread_resume (EP_KTHREAD *kt); -+extern void ep_kthread_stop (EP_KTHREAD *kt); -+extern int ep_kthread_state (EP_KTHREAD *kt, long *time); -+#endif /* __ELAN3_KTHREAD_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "linux" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/ep/Makefile linux-2.6.9/drivers/net/qsnet/ep/Makefile ---- clean/drivers/net/qsnet/ep/Makefile 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/Makefile 2005-10-10 17:47:31.000000000 -0400 -@@ -0,0 +1,17 @@ -+# -+# Makefile for Quadrics QsNet -+# -+# Copyright (c) 2002-2004 Quadrics Ltd -+# -+# File: drivers/net/qsnet/ep/Makefile -+# -+ -+ -+ep3-$(CONFIG_ELAN3) := kcomm_elan3.o kmsg_elan3.o kmap_elan3.o neterr_elan3.o probenetwork_elan3.o support_elan3.o threadcode_elan3.o threadcode_elan3_Linux.o epcomms_elan3.o epcommsTx_elan3.o epcommsRx_elan3.o -+ep4-$(CONFIG_ELAN4) := kcomm_elan4.o kmsg_elan4.o kmap_elan4.o neterr_elan4.o probenetwork_elan4.o commands_elan4.o debug_elan4.o support_elan4.o threadcode_elan4_Linux.o epcomms_elan4.o epcommsTx_elan4.o epcommsRx_elan4.o -+# -+ -+obj-$(CONFIG_EP) += ep.o -+ep-objs := cm.o debug.o kalloc.o kcomm.o kmap.o kthread.o neterr.o nmh.o probenetwork.o railhints.o rmap.o statemap.o support.o threadcode.o epcomms.o epcommsRx.o epcommsTx.o epcommsFwd.o conf_linux.o procfs_linux.o ep_procfs.o cm_procfs.o $(ep3-$(CONFIG_EP)) $(ep4-$(CONFIG_EP)) -+ -+EXTRA_CFLAGS += -DDEBUG -DDEBUG_PRINTF -DDEBUG_ASSERT -diff -urN clean/drivers/net/qsnet/ep/Makefile.conf linux-2.6.9/drivers/net/qsnet/ep/Makefile.conf ---- clean/drivers/net/qsnet/ep/Makefile.conf 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/Makefile.conf 2005-09-07 10:39:44.000000000 -0400 -@@ -0,0 +1,12 @@ -+# Flags for generating QsNet Linux Kernel Makefiles -+MODNAME = ep.o -+MODULENAME = ep -+KOBJFILES = cm.o debug.o kalloc.o kcomm.o kmap.o kthread.o neterr.o nmh.o probenetwork.o railhints.o rmap.o statemap.o support.o threadcode.o epcomms.o epcommsRx.o epcommsTx.o epcommsFwd.o conf_linux.o procfs_linux.o ep_procfs.o cm_procfs.o \$\(ep3-\$\(CONFIG_EP\)\) \$\(ep4-\$\(CONFIG_EP\)\) -+EXPORT_KOBJS = conf_linux.o -+CONFIG_NAME = CONFIG_EP -+SGALFC = -+# EXTRALINES START -+ -+ep3-$(CONFIG_ELAN3) := kcomm_elan3.o kmsg_elan3.o kmap_elan3.o neterr_elan3.o probenetwork_elan3.o support_elan3.o threadcode_elan3.o threadcode_elan3_Linux.o epcomms_elan3.o epcommsTx_elan3.o epcommsRx_elan3.o -+ep4-$(CONFIG_ELAN4) := kcomm_elan4.o kmsg_elan4.o kmap_elan4.o neterr_elan4.o probenetwork_elan4.o commands_elan4.o debug_elan4.o support_elan4.o threadcode_elan4_Linux.o epcomms_elan4.o epcommsTx_elan4.o epcommsRx_elan4.o -+# EXTRALINES END -diff -urN clean/drivers/net/qsnet/ep/neterr.c linux-2.6.9/drivers/net/qsnet/ep/neterr.c ---- clean/drivers/net/qsnet/ep/neterr.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/neterr.c 2005-07-20 08:01:34.000000000 -0400 -@@ -0,0 +1,79 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: neterr.c,v 1.27.2.1 2005/07/20 12:01:34 mike Exp $" -+/* $Source: /cvs/master/quadrics/epmod/neterr.c,v $ */ -+ -+#include -+#include -+ -+#include "debug.h" -+ -+void -+ep_queue_network_error (EP_RAIL *rail, int nodeId, int what, int channel, EP_NETERR_COOKIE cookie) -+{ -+ EP_SYS *sys = rail->System; -+ EP_NODE_RAIL *nodeRail = &rail->Nodes[nodeId]; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&sys->NodeLock, flags); -+ -+ ASSERT (nodeRail->State >= EP_NODE_CONNECTED && nodeRail->State <= EP_NODE_LOCAL_PASSIVATE); -+ -+ if (nodeRail->NetworkErrorState == 0) -+ { -+ EPRINTF2 (DBG_NETWORK_ERROR, "%s: raise context filter for node %d due to network error\n", rail->Name, nodeId); -+ -+ rail->Operations.RaiseFilter (rail, nodeId); -+ -+ if (nodeRail->State == EP_NODE_LOCAL_PASSIVATE) -+ printk ("%s: node %d is flushing - deferring network error fixup\n", rail->Name, nodeId); -+ else -+ list_add_tail (&nodeRail->Link, &rail->NetworkErrorList); -+ } -+ -+ switch (what) -+ { -+ case EP_NODE_NETERR_ATOMIC_PACKET: -+ ASSERT (nodeRail->NetworkErrorCookies[channel] == 0); -+ -+ /* Need to raise the approriate context filter for this node, -+ * and periodically send a neterr fixup message to it until -+ * we receive an ack from it -+ */ -+ IncrStat (rail, NeterrAtomicPacket); -+ -+ nodeRail->NetworkErrorCookies[channel] = cookie; -+ -+ nodeRail->NetworkErrorState |= EP_NODE_NETERR_ATOMIC_PACKET; -+ nodeRail->MsgXid = ep_xid_cache_alloc (sys, &rail->XidCache); -+ -+ EPRINTF3 (DBG_NETWORK_ERROR, "%s: atomic packet destroyed - node %d cookie %llx\n", rail->Name, nodeId, (long long)cookie); -+ break; -+ -+ case EP_NODE_NETERR_DMA_PACKET: -+ /* Must be an overlapped dma packet, raise the context filter, -+ * and hold it up for a NETWORK_ERROR_TIMEOUT */ -+ IncrStat (rail, NeterrDmaPacket); -+ -+ nodeRail->NetworkErrorState |= EP_NODE_NETERR_DMA_PACKET; -+ break; -+ } -+ -+ nodeRail->NextRunTime = lbolt + NETWORK_ERROR_TIMEOUT; -+ -+ spin_unlock_irqrestore (&sys->NodeLock, flags); -+ -+ ep_kthread_schedule (&sys->ManagerThread, nodeRail->NextRunTime); -+} -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -+ -diff -urN clean/drivers/net/qsnet/ep/neterr_elan3.c linux-2.6.9/drivers/net/qsnet/ep/neterr_elan3.c ---- clean/drivers/net/qsnet/ep/neterr_elan3.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/neterr_elan3.c 2003-11-17 08:26:45.000000000 -0500 -@@ -0,0 +1,326 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: neterr_elan3.c,v 1.24 2003/11/17 13:26:45 david Exp $" -+/* $Source: /cvs/master/quadrics/epmod/neterr_elan3.c,v $ */ -+ -+#include -+ -+#include -+ -+#include "kcomm_vp.h" -+#include "kcomm_elan3.h" -+#include "debug.h" -+ -+typedef struct neterr_halt_args -+{ -+ EP3_RAIL *Rail; -+ unsigned int NodeId; -+ EP_NETERR_COOKIE *Cookies; -+} NETERR_HALT_ARGS; -+ -+static int -+DmaMatchesCookie (EP3_RAIL *rail, E3_DMA_BE *dma, int nodeId, EP_NETERR_COOKIE *cookies, char *where) -+{ -+ E3_uint32 cvproc; -+ E3_uint32 cookie; -+ -+ if (dma->s.dma_direction == DMA_WRITE) -+ { -+ cvproc = dma->s.dma_destCookieVProc; -+ cookie = dma->s.dma_srcCookieVProc; -+ } -+ else -+ { -+ cvproc = dma->s.dma_srcCookieVProc; -+ cookie = dma->s.dma_destCookieVProc; -+ } -+ -+ EPRINTF6 (DBG_NETWORK_ERROR, "%s: Neterr - %s: DMA %08x %08x %08x %08x\n", rail->Generic.Name, where, -+ dma->s.dma_type, dma->s.dma_size, dma->s.dma_source, dma->s.dma_dest); -+ EPRINTF5 (DBG_NETWORK_ERROR, "%s: %08x %08x %08x %08x\n", rail->Generic.Name, -+ dma->s.dma_destEvent, dma->s.dma_destCookieVProc, dma->s.dma_srcEvent, dma->s.dma_srcCookieVProc); -+ -+ if (EP_VP_ISDATA((cvproc & DMA_PROCESS_MASK)) && EP_VP_TO_NODE(cvproc & DMA_PROCESS_MASK) == nodeId) -+ { -+ /* -+ * This is a DMA going to the node which has a network fixup -+ * request pending, so check if the cookie matches. -+ */ -+ if ((cookie == cookies[0] || cookie == cookies[1]) /* && !WaitForEop */) -+ { -+ EPRINTF3 (DBG_NETWORK_ERROR, "%s: match cookie %08x on %s\n", rail->Generic.Name, cookie, where); -+ -+ return (TRUE); -+ } -+ } -+ -+ return (FALSE); -+} -+ -+ -+static void -+NetworkErrorHaltOperation (ELAN3_DEV *dev, void *arg) -+{ -+ NETERR_HALT_ARGS *args = (NETERR_HALT_ARGS *) arg; -+ EP3_RAIL *rail = args->Rail; -+ EP_SYS *sys = rail->Generic.System; -+ sdramaddr_t FPtr, BPtr; -+ sdramaddr_t Base, Top; -+ E3_DMA_BE dma; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&sys->NodeLock, flags); -+ -+ ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProc.s.FSR)) == 0); -+ ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData0.s.FSR.Status)) == 0); -+ ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData1.s.FSR.Status)) == 0); -+ ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData2.s.FSR.Status)) == 0); -+ ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData3.s.FSR.Status)) == 0); -+ -+ FPtr = read_reg32 (dev, DProc_SysCntx_FPtr); -+ BPtr = read_reg32 (dev, DProc_SysCntx_BPtr); -+ Base = dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxDmaQueue[0]); -+ Top = dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxDmaQueue[E3_SysCntxQueueSize-1]); -+ -+ while (FPtr != BPtr) -+ { -+ elan3_sdram_copyq_from_sdram (dev, FPtr, &dma, sizeof (E3_DMA_BE)); -+ -+ if (DmaMatchesCookie (rail, &dma, args->NodeId, args->Cookies, "runq ")) -+ { -+ /* -+ * Transfer the DMA to the node, it's source event will -+ * get executed later. -+ */ -+ QueueDmaOnStalledList (rail, &dma); -+ -+ /* -+ * Remove the DMA from the queue by replacing it with one with -+ * zero size and no events. -+ * -+ * NOTE: we must preserve the SYS_CONTEXT_BIT since the Elan uses this -+ * to mark the approriate run queue as empty. -+ */ -+ dma.s.dma_type = (SYS_CONTEXT_BIT << 16); -+ dma.s.dma_size = 0; -+ dma.s.dma_source = (E3_Addr) 0; -+ dma.s.dma_dest = (E3_Addr) 0; -+ dma.s.dma_destEvent = (E3_Addr) 0; -+ dma.s.dma_destCookieVProc = 0; -+ dma.s.dma_srcEvent = (E3_Addr) 0; -+ dma.s.dma_srcCookieVProc = 0; -+ -+ elan3_sdram_copyq_to_sdram (dev, &dma, FPtr, sizeof (E3_DMA_BE)); -+ } -+ -+ FPtr = (FPtr == Top) ? Base : FPtr + sizeof (E3_DMA); -+ } -+ -+ rail->NetworkErrorFlushed = TRUE; -+ kcondvar_wakeupall (&rail->NetworkErrorSleep, &sys->NodeLock); -+ -+ spin_unlock_irqrestore (&sys->NodeLock, flags); -+} -+ -+void -+ep3_neterr_fixup (EP_RAIL *r, unsigned int nodeId, EP_NETERR_COOKIE *cookies) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) r; -+ EP_SYS *sys = rail->Generic.System; -+ ELAN3_DEV *dev = rail->Device; -+ EP_NODE_RAIL *nodeRail = &rail->Generic.Nodes[nodeId]; -+ E3_DMA_BE dmabe; -+ EP3_COOKIE *cp; -+ E3_uint32 vp; -+ NETERR_HALT_ARGS args; -+ struct list_head *el, *nel, matchedList; -+ int i; -+ unsigned long flags; -+ -+ INIT_LIST_HEAD (&matchedList); -+ -+ StallDmaRetryThread (rail); -+ -+ args.Rail = rail; -+ args.NodeId = nodeId; -+ args.Cookies = cookies; -+ -+ spin_lock_irqsave (&rail->Device->IntrLock, flags); -+ QueueHaltOperation (rail->Device, 0, NULL, INT_TProcHalted | INT_DProcHalted, NetworkErrorHaltOperation, &args); -+ spin_unlock_irqrestore (&rail->Device->IntrLock, flags); -+ -+ spin_lock_irqsave (&sys->NodeLock, flags); -+ while (! rail->NetworkErrorFlushed) -+ kcondvar_wait (&rail->NetworkErrorSleep, &sys->NodeLock, &flags); -+ rail->NetworkErrorFlushed = FALSE; -+ -+ spin_lock (&rail->DmaRetryLock); -+ for (i = EP_RETRY_BASE; i < EP_NUM_RETRIES; i++) -+ { -+ list_for_each_safe (el, nel, &rail->DmaRetries[i]) { -+ EP3_RETRY_DMA *retry = list_entry (el, EP3_RETRY_DMA, Link); -+ -+ if (DmaMatchesCookie (rail, &retry->Dma, nodeId, cookies, "retry")) -+ { -+ /* remove from retry list */ -+ list_del (&retry->Link); -+ -+ /* add to list of dmas which matched */ -+ list_add_tail (&retry->Link, &matchedList); -+ } -+ } -+ } -+ -+ list_for_each_safe (el, nel, &nodeRail->StalledDmas) { -+ EP3_RETRY_DMA *retry = list_entry (el, EP3_RETRY_DMA, Link); -+ -+ if (DmaMatchesCookie (rail, &retry->Dma, nodeId, cookies, "stalled")) -+ { -+ /* remove from retry list */ -+ list_del (&retry->Link); -+ -+ /* add to list of dmas which matched */ -+ list_add_tail (&retry->Link, &matchedList); -+ } -+ } -+ -+ spin_unlock (&rail->DmaRetryLock); -+ spin_unlock_irqrestore (&sys->NodeLock, flags); -+ -+ ResumeDmaRetryThread (rail); -+ -+ /* Now "set" the source event of any write DMA's */ -+ while (! list_empty (&matchedList)) -+ { -+ EP3_RETRY_DMA *retry = list_entry (matchedList.next, EP3_RETRY_DMA, Link); -+ -+ list_del (&retry->Link); -+ -+ if (retry->Dma.s.dma_direction == DMA_WRITE && retry->Dma.s.dma_srcEvent) -+ { -+ sdramaddr_t event = ep_elan2sdram (&rail->Generic, retry->Dma.s.dma_srcEvent); -+ -+ /* Block local interrupts, since we need to atomically -+ * decrement the event count and perform the word write -+ */ -+ local_irq_save (flags); -+ { -+ E3_uint32 type = elan3_sdram_readl (dev, event + offsetof (E3_Event, ev_Type)); -+ E3_uint32 count = elan3_sdram_readl (dev, event + offsetof (E3_Event, ev_Count)); -+ -+ elan3_sdram_writel (dev, event + offsetof (E3_Event, ev_Count), count - 1); -+ -+ if (count == 1) -+ { -+ if (type & EV_TYPE_MASK_BCOPY) -+ { -+ E3_Addr srcVal = elan3_sdram_readl (dev, event + offsetof (E3_BlockCopyEvent, ev_Source)); -+ E3_Addr dstAddr = elan3_sdram_readl (dev, event + offsetof (E3_BlockCopyEvent, ev_Dest)) & ~EV_BCOPY_DTYPE_MASK; -+ -+ ASSERT ((srcVal & EV_WCOPY) != 0); -+ -+ EPRINTF3 (DBG_NETWORK_ERROR, "%s: neterr perform event word write at %08x with %08x\n", rail->Generic.Name, dstAddr, srcVal); -+ -+ ELAN3_OP_STORE32 (rail->Ctxt, dstAddr, srcVal); -+ } -+ -+ if ((type & ~EV_TYPE_MASK_BCOPY) != 0) -+ { -+ if ((type & EV_TYPE_MASK_CHAIN) == EV_TYPE_CHAIN) -+ { -+ printk ("%s: event at %08x - chained event %x is invalid\n", rail->Generic.Name, retry->Dma.s.dma_srcEvent, type); -+ panic ("ep: neterr invalid event type\n"); -+ } -+ else if ((type & EV_TYPE_MASK_EVIRQ) == EV_TYPE_EVIRQ) -+ { -+ EPRINTF2 (DBG_NETWORK_ERROR, "%s: neterr event interrupt - cookie %08x\n", rail->Generic.Name, (type & ~(EV_TYPE_MASK_EVIRQ|EV_TYPE_MASK_BCOPY))); -+ -+ cp = LookupCookie (&rail->CookieTable, (type & ~(EV_TYPE_MASK_EVIRQ|EV_TYPE_MASK_BCOPY))); -+ -+ if (cp->Operations->Event) -+ cp->Operations->Event(rail, cp->Arg); -+ } -+ else if ((type & EV_TYPE_MASK_DMA) == EV_TYPE_DMA) -+ { -+ sdramaddr_t dma = ep_elan2sdram (&rail->Generic, (type & ~EV_TYPE_MASK2)); -+ -+ EPRINTF2 (DBG_NETWORK_ERROR, "%s: neterr chained dma - %08x\n", rail->Generic.Name, (type & ~EV_TYPE_MASK2)); -+ -+ elan3_sdram_copyq_from_sdram (dev, dma, &dmabe, sizeof (E3_DMA)); -+ -+ if (dmabe.s.dma_direction == DMA_WRITE) -+ { -+ vp = dmabe.s.dma_destVProc; -+ cp = LookupEventCookie (rail, &rail->CookieTable, dmabe.s.dma_srcEvent); -+ } -+ else -+ { -+ vp = dmabe.s.dma_srcVProc; -+ cp = LookupEventCookie (rail, &rail->CookieTable, dmabe.s.dma_destEvent); -+ -+ /* we MUST convert this into a DMA_READ_REQUEUE dma as if we don't the -+ * DMA descriptor will be read from the EP_RETRY_DMA rather than the -+ * original DMA - this can then get reused and an incorrect DMA -+ * descriptor sent -+ * eventp->ev_Type contains the dma address with type in the lower bits -+ */ -+ -+ dmabe.s.dma_source = (type & ~EV_TYPE_MASK2); -+ dmabe.s.dma_direction = (dmabe.s.dma_direction & ~DMA_READ) | DMA_READ_REQUEUE; -+ } -+ -+ ASSERT (EP_VP_ISDATA(vp)); -+ -+ nodeRail = &rail->Generic.Nodes[EP_VP_TO_NODE(vp)]; -+ -+ switch (nodeRail->State) -+ { -+ case EP_NODE_CONNECTED: -+ case EP_NODE_LEAVING_CONNECTED: -+ if (cp != NULL) -+ cp->Operations->DmaRetry (rail, cp->Arg, &dmabe, EAGAIN); -+ else -+ { -+ ASSERT (dmabe.s.dma_direction == DMA_WRITE && dmabe.s.dma_srcEvent == 0 && dmabe.s.dma_isRemote); -+ -+ QueueDmaForRetry (rail, &dmabe, EP_RETRY_ANONYMOUS); -+ } -+ break; -+ -+ case EP_NODE_LOCAL_PASSIVATE: -+ QueueDmaOnStalledList (rail, &dmabe); -+ break; -+ -+ default: -+ panic ("ep: neterr incorrect state for node\n"); -+ } -+ } -+ else if ((type & EV_TYPE_MASK_THREAD) == EV_TYPE_THREAD) -+ { -+ printk ("%s: event at %08x - thread waiting %x is invalid\n", rail->Generic.Name, retry->Dma.s.dma_srcEvent, type); -+ panic ("ep: neterr invalid event type\n"); -+ } -+ } -+ } -+ } -+ local_irq_restore(flags); -+ } -+ -+ /* add to free list */ -+ spin_lock_irqsave (&rail->DmaRetryLock, flags); -+ list_add (&retry->Link, &rail->DmaRetryFreeList); -+ spin_unlock_irqrestore (&rail->DmaRetryLock, flags); -+ } -+} -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -+ -diff -urN clean/drivers/net/qsnet/ep/neterr_elan4.c linux-2.6.9/drivers/net/qsnet/ep/neterr_elan4.c ---- clean/drivers/net/qsnet/ep/neterr_elan4.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/neterr_elan4.c 2005-07-20 07:35:37.000000000 -0400 -@@ -0,0 +1,264 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: neterr_elan4.c,v 1.3.2.1 2005/07/20 11:35:37 mike Exp $" -+/* $Source: /cvs/master/quadrics/epmod/neterr_elan4.c,v $ */ -+ -+#include -+ -+#include -+ -+#include "kcomm_vp.h" -+#include "kcomm_elan4.h" -+#include "debug.h" -+ -+struct neterr_desc -+{ -+ EP4_RAIL *rail; -+ unsigned int nodeid; -+ EP_NETERR_COOKIE *cookies; -+ int done; -+} ; -+ -+static int -+dma_matches_cookie (EP4_RAIL *rail, E4_uint64 vproc, E4_uint64 cookie, unsigned int nodeId, EP_NETERR_COOKIE *cookies, const char *where) -+{ -+ if ((EP_VP_ISDATA (vproc) && EP_VP_TO_NODE (vproc) == nodeId) && (cookie == cookies[0] || cookie == cookies[1])) -+ { -+ EPRINTF3 (DBG_NETWORK_ERROR, "%s: match cookie %016llx on %s\n", rail->r_generic.Name, (long long)cookie, where); -+ -+ return 1; -+ } -+ return 0; -+} -+ -+static void -+ep4_neterr_dma_flushop (ELAN4_DEV *dev, void *arg, int qfull) -+{ -+ struct neterr_desc *desc = (struct neterr_desc *) arg; -+ EP4_RAIL *rail = desc->rail; -+ E4_uint64 qptrs = read_reg64 (dev, DProcHighPriPtrs); -+ E4_uint32 qsize = E4_QueueSize (E4_QueueSizeValue (qptrs)); -+ E4_uint32 qfptr = E4_QueueFrontPointer (qptrs); -+ E4_uint32 qbptr = E4_QueueBackPointer (qptrs); -+ E4_DProcQueueEntry qentry; -+ unsigned long flags; -+ -+ while ((qfptr != qbptr) || qfull) -+ { -+ E4_uint64 cookie = elan4_sdram_readq (dev, qfptr + offsetof (E4_DProcQueueEntry, Desc.dma_cookie)); -+ E4_uint64 vproc = elan4_sdram_readq (dev, qfptr + offsetof (E4_DProcQueueEntry, Desc.dma_vproc)); -+ -+ if (dma_matches_cookie (rail, vproc, cookie, desc->nodeid, desc->cookies, "runq ")) -+ { -+ elan4_sdram_copyq_from_sdram (dev, qfptr, &qentry, sizeof (E4_DProcQueueEntry)); -+ -+ ep4_queue_dma_stalled (rail, &qentry.Desc); -+ -+ /* Replace the dma with one which will "disappear" */ -+ qentry.Desc.dma_typeSize = DMA_ShMemWrite | dev->dev_ctxt.ctxt_num; -+ qentry.Desc.dma_cookie = 0; -+ qentry.Desc.dma_vproc = 0; -+ qentry.Desc.dma_srcAddr = 0; -+ qentry.Desc.dma_dstAddr = 0; -+ qentry.Desc.dma_srcEvent = 0; -+ qentry.Desc.dma_dstEvent = 0; -+ -+ elan4_sdram_copyq_to_sdram (dev, &qentry, qfptr, sizeof (E4_DProcQueueEntry)); -+ } -+ -+ qfptr = (qfptr & ~(qsize-1)) | ((qfptr + sizeof (E4_DProcQueueEntry)) & (qsize-1)); -+ qfull = 0; -+ } -+ -+ spin_lock_irqsave (&rail->r_haltop_lock, flags); -+ desc->done = 1; -+ kcondvar_wakeupall (&rail->r_haltop_sleep, &rail->r_haltop_lock); -+ spin_unlock_irqrestore (&rail->r_haltop_lock, flags); -+} -+ -+static void -+ep4_neterr_dma_haltop (ELAN4_DEV *dev, void *arg) -+{ -+ struct neterr_desc *desc = (struct neterr_desc *) arg; -+ -+ elan4_queue_dma_flushop (dev, &desc->rail->r_flushop, 1); -+} -+ -+void -+ep4_neterr_fixup_dmas (EP4_RAIL *rail, unsigned int nodeId, EP_NETERR_COOKIE *cookies) -+{ -+ EP_NODE_RAIL *nodeRail = &rail->r_generic.Nodes[nodeId]; -+ struct neterr_desc desc; -+ struct list_head matchedList; -+ struct list_head *el, *nel; -+ unsigned long flags; -+ register int i; -+ -+ desc.rail = rail; -+ desc.nodeid = nodeId; -+ desc.cookies = cookies; -+ desc.done = 0; -+ -+ INIT_LIST_HEAD (&matchedList); -+ -+ /* First - stall the retry thread, so that it will no longer restart -+ * any dma's from the retry list */ -+ ep_kthread_stall (&rail->r_retry_thread); -+ -+ /* Second - flush through all command queues targetted by events, thread etc */ -+ ep4_flush_ecqs (rail); -+ -+ /* Third - queue a halt operation to flush through all DMA's which are executing -+ * or on the run queues */ -+ kmutex_lock (&rail->r_haltop_mutex); -+ -+ rail->r_haltop.op_mask = INT_DProcHalted; -+ rail->r_haltop.op_function = ep4_neterr_dma_haltop; -+ rail->r_haltop.op_arg = &desc; -+ -+ rail->r_flushop.op_function = ep4_neterr_dma_flushop; -+ rail->r_flushop.op_arg = &desc; -+ -+ elan4_queue_haltop (rail->r_ctxt.ctxt_dev, &rail->r_haltop); -+ -+ spin_lock_irqsave (&rail->r_haltop_lock, flags); -+ while (! desc.done) -+ kcondvar_wait (&rail->r_haltop_sleep, &rail->r_haltop_lock, &flags); -+ spin_unlock_irqrestore (&rail->r_haltop_lock, flags); -+ kmutex_unlock (&rail->r_haltop_mutex); -+ -+ /* Fourth - run down the dma retry lists and move all entries to the cancelled -+ * list. Any dma's which were on the run queues have already been -+ * moved there */ -+ spin_lock_irqsave (&rail->r_dma_lock, flags); -+ for (i = EP_RETRY_BASE; i < EP_NUM_RETRIES; i++) -+ { -+ list_for_each_safe (el,nel, &rail->r_dma_retrylist[i]) { -+ EP4_DMA_RETRY *retry = list_entry (el, EP4_DMA_RETRY, retry_link); -+ -+ if (dma_matches_cookie (rail, retry->retry_dma.dma_vproc, retry->retry_dma.dma_cookie, nodeId, cookies, "retry")) -+ { -+ /* remove from retry list */ -+ list_del (&retry->retry_link); -+ -+ /* add to list of dmas which matched */ -+ list_add_tail (&retry->retry_link, &matchedList); -+ } -+ } -+ } -+ -+ list_for_each_safe (el, nel, &nodeRail->StalledDmas) { -+ EP4_DMA_RETRY *retry = list_entry (el, EP4_DMA_RETRY, retry_link); -+ -+ if (dma_matches_cookie (rail, retry->retry_dma.dma_vproc, retry->retry_dma.dma_cookie, nodeId, cookies, "stalled")) -+ { -+ /* remove from retry list */ -+ list_del (&retry->retry_link); -+ -+ /* add to list of dmas which matched */ -+ list_add_tail (&retry->retry_link, &matchedList); -+ } -+ } -+ spin_unlock_irqrestore (&rail->r_dma_lock, flags); -+ -+ /* Now "set" the source event of any put DMA#'s we can use the dma -+ * retry command queue as the retry thread is stalled */ -+ while (! list_empty (&matchedList)) -+ { -+ EP4_DMA_RETRY *retry = list_entry (matchedList.next, EP4_DMA_RETRY, retry_link); -+ -+ list_del (&retry->retry_link); -+ -+ elan4_set_event_cmd (rail->r_dma_ecq->ecq_cq, retry->retry_dma.dma_srcEvent); -+ -+ spin_lock_irqsave (&rail->r_dma_lock, flags); -+ list_add (&retry->retry_link, &rail->r_dma_freelist); -+ spin_unlock_irqrestore (&rail->r_dma_lock, flags); -+ } -+ -+ /* Flush through the command queues to ensure that all the setevents have executed */ -+ ep4_flush_ecqs (rail); -+ -+ /* Finally - allow the retry thread to run again */ -+ ep_kthread_resume (&rail->r_retry_thread); -+} -+ -+void -+ep4_add_neterr_ops (EP4_RAIL *rail, EP4_NETERR_OPS *ops) -+{ -+ /* we're called from the ManagerThread, so no need to stall it */ -+ list_add_tail (&ops->op_link, &rail->r_neterr_ops); -+} -+void -+ep4_remove_neterr_ops (EP4_RAIL *rail, EP4_NETERR_OPS *ops) -+{ -+ EP_SYS *sys = rail->r_generic.System; -+ -+ ep_kthread_stall (&sys->ManagerThread); -+ list_del (&ops->op_link); -+ ep_kthread_resume (&sys->ManagerThread); -+} -+ -+void -+ep4_neterr_fixup_sten (EP4_RAIL *rail, unsigned int nodeId, EP_NETERR_COOKIE *cookies) -+{ -+ struct list_head *el; -+ -+ /* First - stall the retry thread, so that it will no longer restart -+ * any sten packets from the retry list */ -+ ep_kthread_stall (&rail->r_retry_thread); -+ -+ /* Second - flush through all command queues targetted by events, thread etc */ -+ ep4_flush_ecqs (rail); -+ -+ list_for_each (el, &rail->r_neterr_ops) { -+ EP4_NETERR_OPS *op = list_entry (el, EP4_NETERR_OPS, op_link); -+ -+ (op->op_func) (rail, op->op_arg, nodeId, cookies); -+ } -+ -+ /* Flush through the command queues to ensure that all the setevents have executed */ -+ ep4_flush_ecqs (rail); -+ -+ /* Finally - allow the retry thread to run again */ -+ ep_kthread_resume (&rail->r_retry_thread); -+} -+ -+void -+ep4_neterr_fixup (EP_RAIL *r, unsigned int nodeId, EP_NETERR_COOKIE *cookies) -+{ -+ EP4_RAIL *rail = (EP4_RAIL *) r; -+ -+ /* network error cookies can come from the following : -+ * -+ * DMA engine -+ * if a DMA matches a network error cookie, then we just need to -+ * execute the local setevent *before* returning. -+ * -+ * STEN packet -+ * if the STEN packet was generated with as a WAIT_FOR_EOP -+ * and it's not present on the retry lists, then re-create -+ * it. -+ * -+ */ -+ EPRINTF4 (DBG_NETWORK_ERROR, "%s: ep4_neterr_fixup: node %d cookies <%lld%s%s%s%s> <%lld%s%s%s%s>\n", -+ rail->r_generic.Name, nodeId, EP4_COOKIE_STRING(cookies[0]), EP4_COOKIE_STRING(cookies[1])); -+ -+ if ((cookies[0] & EP4_COOKIE_DMA) || (cookies[1] & EP4_COOKIE_DMA)) -+ ep4_neterr_fixup_dmas (rail, nodeId, cookies); -+ -+ if ((cookies[0] & EP4_COOKIE_STEN) || (cookies[1] & EP4_COOKIE_STEN)) -+ ep4_neterr_fixup_sten (rail, nodeId, cookies); -+} -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -+ -diff -urN clean/drivers/net/qsnet/ep/nmh.c linux-2.6.9/drivers/net/qsnet/ep/nmh.c ---- clean/drivers/net/qsnet/ep/nmh.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/nmh.c 2004-01-05 08:48:08.000000000 -0500 -@@ -0,0 +1,181 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+#ident "@(#)$Id: nmh.c,v 1.6 2004/01/05 13:48:08 david Exp $" -+/* $Source: /cvs/master/quadrics/epmod/nmh.c,v $*/ -+ -+#include -+ -+#include -+ -+#define EP_NMD_SPANS(nmd, base, top) ((nmd)->nmd_addr <= (base) && \ -+ ((nmd)->nmd_addr + (nmd)->nmd_len - 1) >= (top)) -+ -+#define EP_NMD_OVERLAPS(nmd, addr, len) ((nmd)->nmd_addr <= ((addr) + (len)) && \ -+ ((nmd)->nmd_addr + (nmd)->nmd_len - 1) >= (addr)) -+ -+#define EP_NMH_HASH(tbl,idx,addr) ((addr) % (tbl)->tbl_size[idx]) -+ -+int -+ep_nmh_init (EP_NMH_TABLE *tbl) -+{ -+ int i, idx, hsize = 1; -+ -+ for (idx = EP_NMH_NUMHASH-1; idx >= 0; idx--, hsize <<= 1) -+ { -+ tbl->tbl_size[idx] = (hsize < EP_NMH_HASHSIZE) ? hsize : EP_NMH_HASHSIZE; -+ -+ KMEM_ZALLOC (tbl->tbl_hash[idx], struct list_head *, sizeof (struct list_head) * tbl->tbl_size[idx], 1); -+ -+ if (tbl->tbl_hash == NULL) -+ { -+ while (++idx < EP_NMH_NUMHASH) -+ KMEM_FREE (tbl->tbl_hash[idx], sizeof (struct list_head) * tbl->tbl_size[idx]); -+ return (ENOMEM); -+ } -+ -+ for (i = 0; i < tbl->tbl_size[idx]; i++) -+ INIT_LIST_HEAD (&tbl->tbl_hash[idx][i]); -+ } -+ -+ return (0); -+} -+ -+void -+ep_nmh_fini (EP_NMH_TABLE *tbl) -+{ -+ int idx; -+ -+ for (idx = 0; idx < EP_NMH_NUMHASH; idx++) -+ if (tbl->tbl_hash[idx]) -+ KMEM_FREE (tbl->tbl_hash[idx], sizeof (struct list_head) * tbl->tbl_size[idx]); -+ -+ bzero (tbl, sizeof (EP_NMH_TABLE)); -+} -+ -+void -+ep_nmh_insert (EP_NMH_TABLE *tbl, EP_NMH *nmh) -+{ -+ EP_ADDR base = nmh->nmh_nmd.nmd_addr; -+ EP_ADDR top = base + nmh->nmh_nmd.nmd_len - 1; -+ int idx; -+ -+ for (idx = 0, base >>= 12, top >>= 12; base != top && idx < EP_NMH_NUMHASH; idx++, base >>= 1, top >>= 1) -+ ; -+ -+ list_add_tail (&nmh->nmh_link, &tbl->tbl_hash[idx][EP_NMH_HASH(tbl, idx, base)]); -+} -+ -+void -+ep_nmh_remove (EP_NMH_TABLE *tbl, EP_NMH *nmh) -+{ -+ list_del (&nmh->nmh_link); -+} -+ -+EP_NMH * -+ep_nmh_find (EP_NMH_TABLE *tbl, EP_NMD *nmd) -+{ -+ EP_ADDR base = nmd->nmd_addr; -+ EP_ADDR top = base + nmd->nmd_len - 1; -+ int idx; -+ struct list_head *le; -+ -+ for (idx = 0, base >>= 12, top >>= 12; base != top && idx < EP_NMH_NUMHASH; idx++, base >>= 1, top >>= 1) -+ ; -+ -+ for (; idx < EP_NMH_NUMHASH; idx++, base >>= 1, top >>= 1) { -+ -+ list_for_each (le, &tbl->tbl_hash[idx][EP_NMH_HASH(tbl, idx, base)]) { -+ EP_NMH *nmh = list_entry (le, EP_NMH, nmh_link); -+ -+ if (EP_NMD_SPANS (&nmh->nmh_nmd, nmd->nmd_addr, nmd->nmd_addr + nmd->nmd_len - 1)) -+ return (nmh); -+ } -+ } -+ -+ return (0); -+} -+ -+void -+ep_nmd_subset (EP_NMD *subset, EP_NMD *nmd, unsigned off, unsigned len) -+{ -+ ASSERT ((off + len - 1) <= nmd->nmd_len); -+ -+ subset->nmd_addr = nmd->nmd_addr + off; -+ subset->nmd_len = len; -+ subset->nmd_attr = nmd->nmd_attr; -+} -+ -+int -+ep_nmd_merge (EP_NMD *merged, EP_NMD *a, EP_NMD *b) -+{ -+ if (EP_NMD_NODEID (a) != EP_NMD_NODEID (b)) /* not generated on the same node */ -+ return 0; -+ -+ if ((EP_NMD_RAILMASK (a) & EP_NMD_RAILMASK (b)) == 0) /* no common rails */ -+ return 0; -+ -+ if (b->nmd_addr == (a->nmd_addr + a->nmd_len)) -+ { -+ if (merged != NULL) -+ { -+ merged->nmd_addr = a->nmd_addr; -+ merged->nmd_len = a->nmd_len + b->nmd_len; -+ merged->nmd_attr = EP_NMD_ATTR(EP_NMD_NODEID(a), EP_NMD_RAILMASK(a) & EP_NMD_RAILMASK(b)); -+ } -+ return 1; -+ } -+ -+ if (a->nmd_addr == (b->nmd_addr + b->nmd_len)) -+ { -+ if (merged != NULL) -+ { -+ merged->nmd_addr = b->nmd_addr; -+ merged->nmd_len = b->nmd_len + a->nmd_len; -+ merged->nmd_attr = EP_NMD_ATTR(EP_NMD_NODEID(b), EP_NMD_RAILMASK(a) & EP_NMD_RAILMASK(b)); -+ } -+ -+ return 1; -+ } -+ -+ return 0; -+} -+ -+int -+ep_nmd_map_rails (EP_SYS *sys, EP_NMD *nmd, unsigned railmask) -+{ -+ EP_NMH *nmh = ep_nmh_find (&sys->MappingTable, nmd); -+ -+ if (nmh == NULL) -+ { -+ printk ("ep_nmd_map_rails: nmd=%08x.%08x.%08x cannot be found\n", -+ nmd->nmd_addr, nmd->nmd_len, nmd->nmd_attr); -+ return (-1); -+ } -+ -+ return (nmh->nmh_ops->op_map_rails (sys, nmh, nmd, railmask)); -+} -+ -+EP_RAILMASK -+ep_nmd2railmask (EP_NMD *frags, int nFrags) -+{ -+ EP_RAILMASK mask; -+ -+ if (nFrags == 0) -+ return ((EP_RAILMASK)-1); -+ -+ for (mask = EP_NMD_RAILMASK(frags); --nFrags; ) -+ mask &= EP_NMD_RAILMASK(++frags); -+ -+ return (mask); -+} -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/ep/probenetwork.c linux-2.6.9/drivers/net/qsnet/ep/probenetwork.c ---- clean/drivers/net/qsnet/ep/probenetwork.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/probenetwork.c 2004-04-19 11:43:15.000000000 -0400 -@@ -0,0 +1,446 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: probenetwork.c,v 1.43 2004/04/19 15:43:15 david Exp $" -+/* $Source: /cvs/master/quadrics/epmod/probenetwork.c,v $ */ -+ -+#include -+ -+#include -+#include "debug.h" -+ -+int PositionCheck = 1; -+ -+#define NUM_DOWN_FROM_VAL(NumDownLinksVal, level) (((NumDownLinksVal) >> ((level) << 2)) & 0xF) -+ -+int -+ProbeNetwork (EP_RAIL *rail, ELAN_POSITION *pos) -+{ -+ int lvl, i; -+ int level; -+ int nodeid; -+ int numnodes; -+ int randomRoutingDisabled; -+ int sw; -+ int nacks; -+ int nowayup; -+ int nalias; -+ int upmask; -+ int partial; -+ int link; -+ int invalid; -+ int linkdown[ELAN_MAX_LEVELS]; -+ int linkup[ELAN_MAX_LEVELS]; -+ EP_SWITCH *switches[ELAN_MAX_LEVELS]; -+ int switchCount[ELAN_MAX_LEVELS+1]; -+ int lowestBcast; -+ int numUpLinks[ELAN_MAX_LEVELS]; -+ int routedown [ELAN_MAX_LEVELS]; -+ -+ EPRINTF1 (DBG_PROBE, "%s: ProbeNetwork started\n", rail->Name); -+ -+ switchCount[0] = 1; -+ numUpLinks [0] = 4; -+ -+ for (level = 0; level < ELAN_MAX_LEVELS; level++) -+ { -+ int ndown = NUM_DOWN_FROM_VAL (rail->Devinfo.dev_num_down_links_value, level); -+ -+ KMEM_ZALLOC (switches[level], EP_SWITCH *, sizeof (EP_SWITCH) * switchCount[level], 1); -+ -+ for (sw = 0, nacks = 0, nowayup = 0, lowestBcast=7; sw < switchCount[level]; sw++) -+ { -+ EP_SWITCH *lsw = &switches[level][sw]; -+ int good = 1; -+ int tsw; -+ -+ for (nodeid = 0,tsw = sw, lvl = level-1 ; lvl >= 0 ; lvl--) -+ { -+ EP_SWITCH *lsw; -+ int link = (8-numUpLinks[lvl]) + (tsw % numUpLinks[lvl]); -+ -+ tsw = tsw / numUpLinks[lvl]; -+ lsw = &switches[lvl][tsw]; -+ -+ if (lsw->present == 0 || (lsw->lnr & (1 << link))) -+ { -+ EPRINTF4 (DBG_PROBE, "lvl %d sw %d present=%d lnr=%x\n", lvl, sw, lsw->present, lsw->lnr); -+ good = 0; -+ } -+ -+ linkup[lvl] = link; -+ linkdown[lvl] = lsw->link; -+ -+ if ( lvl ) nodeid = ((nodeid + linkdown[lvl]) * (8-numUpLinks[lvl-1])); -+ else nodeid += linkdown[0]; -+ -+ } -+ -+ /* -+ * don't bother probing routes which we we've already seen are unreachable -+ * because a link upwards was in reset or the switch previously nacked us. -+ */ -+ if (! good) -+ { -+ lsw->present = 0; -+ -+ nacks++; -+ nowayup++; -+ -+ continue; -+ } -+ -+ lsw->present = rail->Operations.ProbeRoute (rail, level, sw, nodeid, linkup, linkdown, 5, lsw); -+ -+ if (! lsw->present) -+ { -+ EPRINTF3 (DBG_PROBE, "%s: level %d switch %d - unexpected nack\n", rail->Name, level, sw); -+ -+ nacks++; -+ nowayup++; -+ } -+ else -+ { -+ EPRINTF5 (DBG_PROBE, "%s: level %d switch %d - link %d bcast %d\n", rail->Name, level, sw, lsw->link, lsw->bcast); -+ -+ if (level == 2 && rail->Devinfo.dev_device_id == PCI_DEVICE_ID_ELAN3) -+ { -+ /* If we see broadcast top as 7, and we came in on a low link, then we can't -+ * determine whether we're in a 128 way or a un-configured 64u64d switch, so -+ * we treat it as a 64u64d and detect the 128 way case by "going over the top" -+ * below. Unless we've been told what it really is by NumDownLinksVal. -+ */ -+ if (lsw->bcast == 7 && lsw->link < 4) -+ lsw->bcast = ndown ? (ndown - 1) : 3; -+ } -+ -+ if ( lowestBcast > lsw->bcast ) -+ lowestBcast = lsw->bcast; -+ -+ if (lsw->link > (ndown ? (ndown-1) : (lowestBcast == 7 ? 3 : lowestBcast))) -+ { -+ /* We've arrived on a "up-link" - this could be either -+ * we're in the top half of a x8 top-switch - or we're -+ * in the bottom half and have gone "over the top". We -+ * differentiate these cases since the switches below -+ * a x8 top-switch will have broadcast top set to 3, -+ * and the x8 topswitch have broadcast top set to 7. -+ */ -+ if (lsw->bcast == 7) -+ nowayup++; -+ else -+ { -+ EPRINTF2 (DBG_PROBE, "%s: level %d - gone over the top\n", -+ rail->Name, level); -+ -+ if (level > 0) -+ { -+ KMEM_FREE (switches[level], sizeof (EP_SWITCH) * switchCount[level] ); -+ level--; -+ } -+ -+ numUpLinks[level] = 0; -+ goto finished; -+ } -+ } -+ -+ } -+ } -+ -+ numUpLinks[level] = ndown ? (8 - ndown) : (7 - lowestBcast); -+ switchCount[level+1] = switchCount[level] * numUpLinks[level]; -+ -+ /* Now we know which links are uplinks, we can see whether there is -+ * any possible ways up */ -+ upmask = (ndown ? (0xFF << ndown) & 0xFF : (0xFF << (8 - numUpLinks[level])) & 0xFF); -+ -+ for (sw = 0; sw < switchCount[level]; sw++) -+ { -+ EP_SWITCH *lsw = &switches[level][sw]; -+ -+ if (lsw->present && lsw->link <= (ndown ? (ndown-1) : (lowestBcast == 7 ? 3 : lowestBcast)) && (switches[level][sw].lnr & upmask) == upmask) -+ nowayup++; -+ } -+ -+ EPRINTF7 (DBG_PROBE, "%s: level %d - sw=%d nacks=%d nowayup=%d bcast=%d numup=%d\n", -+ rail->Name, level, sw, nacks, nowayup, lowestBcast, numUpLinks[level]); -+ -+ if (nacks == sw) -+ { -+ static bitmap_t printed[BT_BITOUL(EP_MAX_RAILS)]; -+ -+ if (! BT_TEST (printed, rail->Number)) -+ printk ("%s: cannot determine network position\n", rail->Name); -+ BT_SET (printed, rail->Number); -+ goto failed; -+ } -+ -+ if (nowayup == sw) -+ goto finished; -+ } -+ -+ printk ("%s: exceeded number of levels\n", rail->Name); -+ level = ELAN_MAX_LEVELS - 1; -+ -+ failed: -+ -+ for (lvl = 0; lvl <= level; lvl++) -+ KMEM_FREE (switches[lvl], sizeof (EP_SWITCH) * switchCount[lvl] ); -+ -+ return -EAGAIN; -+ -+ finished: -+ /* we've successfully probed the network - now calculate our node -+ * positon and what level of random routing is possible */ -+ nalias = 1; -+ for (lvl = 0, invalid = 0, partial = 0, randomRoutingDisabled = 0; lvl <= level; lvl++) -+ { -+ int ndown = NUM_DOWN_FROM_VAL (rail->Devinfo.dev_num_down_links_value, lvl); -+ int upmask = ndown ? (0xFF << ndown) & 0xFF : 0xF0; -+ -+ for (sw = 0, nalias = 0; sw < switchCount[lvl]; sw++) -+ { -+ EP_SWITCH *lsw = &switches[lvl][sw]; -+ -+ /* You can only use adaptive routing if links 4-7 are uplinks, and at least one of them is -+ * not in reset. Otherwise you can randomly select an "uplink" if all the uplinks are not -+ * in reset. */ -+ if (lsw->present && ((upmask == 0xF0) ? (lsw->lnr & upmask) == upmask : (lsw->lnr & upmask) != 0)) -+ randomRoutingDisabled |= (1 << lvl); -+ -+ if (!lsw->present) -+ partial++; -+ else -+ { -+ if (lsw->invalid) -+ { -+ printk ("%s: invalid switch detected (level %d switch %d)\n", rail->Name, lvl, sw); -+ invalid++; -+ } -+ -+ for (i = 0; i < nalias; i++) -+ if (linkdown[i] == lsw->link) -+ break; -+ if (i == nalias) -+ linkdown[nalias++] = lsw->link; -+ } -+ } -+ -+ link = linkdown[0]; -+ for (i = 1; i < nalias; i++) -+ if (linkdown[i] < link) -+ link = linkdown[i]; -+ -+ if (nalias > 1 && lvl != level) -+ { -+ printk ("%s: switch aliased below top level (level %d)\n", rail->Name, lvl); -+ invalid++; -+ } -+ -+ routedown[lvl] = link; -+ } -+ -+ for (lvl = 0; lvl <= level; lvl++) -+ KMEM_FREE (switches[lvl], sizeof (EP_SWITCH) * switchCount[lvl] ); -+ -+ if (invalid) -+ { -+ printk ("%s: invalid switch configuration\n", rail->Name); -+ return (EINVAL); -+ } -+ -+ /* Handle the aliasing case where a 16 way is used as multiple smaller switches */ -+ if (nalias == 1) -+ level++; -+ else if (nalias == 2) /* a 16 way as 2x8 ways */ -+ numUpLinks[level++] = 6; /* only 2 down links */ -+ else if (nalias > 4) /* a 16 way as 8x2 ways */ -+ numUpLinks[level-1] = 6; -+ -+ /* -+ * Compute my nodeid and number of nodes in the machine -+ * from the routedown and the number of downlinks at each level. -+ */ -+ for(nodeid=0, lvl = level - 1; lvl >= 0; lvl--) -+ { -+ if (lvl) nodeid = ((nodeid + routedown[lvl]) * (8-numUpLinks[lvl-1])); -+ else nodeid += routedown[0]; -+ } -+ -+ for (numnodes = 1, lvl = 0; lvl < level; lvl++) -+ numnodes *= (8 - numUpLinks[lvl]); -+ -+ sprintf (rail->Name, "ep%d[%d]", rail->Number, nodeid); -+ -+ if (randomRoutingDisabled & ((1 << (level-1))-1)) -+ printk ("%s: nodeid=%d level=%d numnodes=%d (random routing disabled 0x%x)\n", -+ rail->Name, nodeid, level, numnodes, randomRoutingDisabled); -+ else if (partial) -+ printk ("%s: nodeid=%d level=%d numnodes=%d (random routing ok)\n", -+ rail->Name, nodeid, level, numnodes); -+ else -+ printk ("%s: nodeid=%d level=%d numnodes=%d\n", -+ rail->Name, nodeid, level, numnodes); -+ -+ pos->pos_mode = ELAN_POS_MODE_SWITCHED; -+ pos->pos_nodeid = nodeid; -+ pos->pos_levels = level; -+ pos->pos_nodes = numnodes; -+ pos->pos_random_disabled = randomRoutingDisabled; -+ -+ for(lvl = 0; lvl < level; lvl++) -+ pos->pos_arity[level -lvl - 1] = (8-numUpLinks[lvl]); -+ pos->pos_arity[level] = 1; /* XXXX why does this need to be 1 ? */ -+ -+ return 0; -+} -+ -+/* -+ * broadcast top is invalid if it is not set to the number of downlinks-1, -+ * or at the topmost level it is less than ndown-1. -+ */ -+#define BCAST_TOP_INVALID(lvl, bcast, ndown) ((lvl) == 0 ? (bcast) < ((ndown)-1) : (bcast) != ((ndown) - 1)) -+ -+void -+CheckPosition (EP_RAIL *rail) -+{ -+ ELAN_POSITION *pos = &rail->Position; -+ unsigned int nodeid = pos->pos_nodeid; -+ unsigned int invalid = 0; -+ unsigned int changed = 0; -+ int lvl, slvl; -+ -+ if (! PositionCheck) -+ return; -+ -+ if (rail->Operations.CheckPosition(rail)) /* is update ready for this rail */ -+ { -+ EPRINTF2 (DBG_ROUTETABLE, "%s: check position: SwitchProbeLevel=%d\n", rail->Name, rail->SwitchProbeLevel); -+ -+ for (lvl = 0, slvl = pos->pos_levels-1; lvl <= rail->SwitchProbeLevel; lvl++, slvl--) -+ { -+ EP_SWITCHSTATE *state = &rail->SwitchState[lvl]; -+ EP_SWITCHSTATE *lstate = &rail->SwitchLast[lvl]; -+ unsigned int ndown = pos->pos_arity[slvl]; -+ unsigned int upmask = (0xFF << ndown) & 0xFF; -+ unsigned int mylink = nodeid % ndown; -+ unsigned int error = 0; -+ unsigned int binval = 0; -+ -+ nodeid /= ndown; -+ -+ /* -+ * broadcast top is invalid if it is not set to the number of downlinks-1, -+ * or at the topmost level it is less than ndown-1. -+ */ -+ if (BCAST_TOP_INVALID(lvl, state->bcast, ndown) || (state->LNR & upmask) == upmask) -+ { -+ /* no way up from here - we'd better be at the top */ -+ if (lvl != (pos->pos_levels-1)) -+ { -+ if (state->bcast != (ndown-1)) -+ printk ("%s: invalid broadcast top %d at level %d\n", rail->Name, state->bcast, lvl); -+ else if ((state->LNR & upmask) == upmask && (lstate->LNR & upmask) == upmask) -+ printk ("%s: no way up to switch at level %d (turned off ?)\n", rail->Name, lvl+1); -+ } -+ else -+ { -+ if (state->linkid != mylink) -+ printk ("%s: moved at top level was connected to link %d now connected to %d\n", rail->Name, mylink, state->linkid); -+ } -+ -+ if (state->linkid != mylink) -+ error++; -+ -+ if (BCAST_TOP_INVALID (lvl, state->bcast, ndown)) -+ binval++; -+ } -+ else -+ { -+ if (state->linkid != mylink) -+ { -+ if (state->linkid != rail->SwitchLast[lvl].linkid) -+ printk ("%s: moved at lvl %d was connected to link %d now connected to %d\n", rail->Name, lvl, mylink, state->linkid); -+ -+ error++; -+ } -+ } -+ -+ if (error == 0 && invalid == 0) -+ rail->SwitchProbeTick[lvl] = lbolt; -+ -+ EPRINTF10 (DBG_ROUTETABLE, "%s: lvl=%d (slvl=%d) linkid=%d bcast=%d lnr=%02x uplink=%d : error=%d binval=%d invalid=%d\n", -+ rail->Name, lvl, slvl, state->linkid, state->bcast, state->LNR, state->uplink, error, binval, invalid); -+ -+ invalid |= (error | binval); -+ } -+ -+ for (lvl = 0; lvl < rail->SwitchProbeLevel; lvl++) -+ if (rail->SwitchState[lvl].uplink != rail->SwitchLast[lvl].uplink) -+ changed++; -+ -+ if (changed) -+ { -+ printk ("%s: broadcast tree has changed from", rail->Name); -+ for (lvl = 0; lvl < rail->SwitchProbeLevel; lvl++) -+ printk ("%c%d", lvl == 0 ? ' ' : ',', rail->SwitchLast[lvl].uplink); -+ -+ for (lvl = 0; lvl < rail->SwitchProbeLevel; lvl++) -+ printk ("%s%d", lvl == 0 ? " to " : ",", rail->SwitchState[lvl].uplink); -+ printk ("\n"); -+ } -+ -+ if (rail->SwitchProbeLevel > 0) -+ bcopy (rail->SwitchState, rail->SwitchLast, rail->SwitchProbeLevel * sizeof (EP_SWITCHSTATE)); -+ } -+ -+ for (lvl = 0; lvl < pos->pos_levels; lvl++) -+ { -+ EPRINTF4 (DBG_ROUTETABLE, "%s: level %d lbolt=%lx ProbeLevelTick=%lx\n", -+ rail->Name, lvl, lbolt, rail->SwitchProbeTick[lvl]); -+ -+ if (AFTER (lbolt, rail->SwitchProbeTick[lvl] + EP_POSITION_TIMEOUT)) -+ { -+ if (lvl < rail->SwitchBroadcastLevel+1) -+ { -+ if (lvl == 0) -+ printk ("%s: cable disconnected\n", rail->Name); -+ else -+ printk ("%s: broadcast level has dropped to %d (should be %d)\n", -+ rail->Name, lvl, rail->Position.pos_levels); -+ } -+ break; -+ } -+ } -+ -+ if (lvl > rail->SwitchBroadcastLevel+1) -+ { -+ if (rail->SwitchBroadcastLevel < 0) -+ printk ("%s: cable reconnected\n", rail->Name); -+ if (lvl == rail->Position.pos_levels) -+ printk ("%s: broadcast level has recovered\n", rail->Name); -+ else -+ printk ("%s: broadcast level has recovered to %d (should be %d)\n", -+ rail->Name, lvl, rail->Position.pos_levels); -+ } -+ -+ if (rail->SwitchBroadcastLevel != (lvl - 1)) -+ { -+ EPRINTF2 (DBG_ROUTETABLE, "%s: setting SwitchBroadcastLevel to %d\n", rail->Name, lvl-1); -+ -+ rail->SwitchBroadcastLevel = lvl - 1; -+ rail->SwitchBroadcastLevelTick = lbolt; -+ } -+} -+ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/ep/probenetwork_elan3.c linux-2.6.9/drivers/net/qsnet/ep/probenetwork_elan3.c ---- clean/drivers/net/qsnet/ep/probenetwork_elan3.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/probenetwork_elan3.c 2005-04-26 05:36:19.000000000 -0400 -@@ -0,0 +1,302 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: probenetwork_elan3.c,v 1.41 2005/04/26 09:36:19 mike Exp $" -+/* $Source: /cvs/master/quadrics/epmod/probenetwork_elan3.c,v $ */ -+ -+#include -+ -+#include -+ -+#include "kcomm_vp.h" -+#include "kcomm_elan3.h" -+#include "debug.h" -+ -+#include -+ -+static void ep3_probe_event (EP3_RAIL *rail, void *arg); -+static EP3_COOKIE_OPS ep3_probe_ops = -+{ -+ ep3_probe_event -+} ; -+ -+int -+ep3_init_probenetwork (EP3_RAIL *rail) -+{ -+ sdramaddr_t stack; -+ E3_Addr sp; -+ E3_BlockCopyEvent event; -+ int i; -+ -+ if (! (stack = ep_alloc_elan (&rail->Generic, EP3_STACK_SIZE, 0, &rail->ProbeStack))) -+ return -ENOMEM; -+ -+ spin_lock_init (&rail->ProbeLock); -+ kcondvar_init (&rail->ProbeWait); -+ -+ /* Initialise the probe command structure */ -+ for (i = 0; i < TR_TRACEROUTE_ENTRIES; i++) -+ elan3_sdram_writew (rail->Device, rail->RailElan + offsetof (EP3_RAIL_ELAN, ProbeSource0[i]), 0); -+ for (i = 0; i < TR_TRACEROUTE_ENTRIES; i++) -+ elan3_sdram_writew (rail->Device, rail->RailElan + offsetof (EP3_RAIL_ELAN, ProbeSource1[i]), 1); -+ -+ RegisterCookie (&rail->CookieTable, &rail->ProbeCookie, rail->RailElanAddr + offsetof (EP3_RAIL_ELAN, ProbeDone), &ep3_probe_ops, rail); -+ -+ elan3_sdram_writel (rail->Device, rail->RailElan + offsetof (EP3_RAIL_ELAN, ProbeStart.ev_Type), 0); -+ elan3_sdram_writel (rail->Device, rail->RailElan + offsetof (EP3_RAIL_ELAN, ProbeStart.ev_Count), 0); -+ -+ EP3_INIT_COPY_EVENT (event, rail->ProbeCookie, rail->RailMainAddr + offsetof (EP3_RAIL_MAIN, ProbeDone), 1); -+ elan3_sdram_copyl_to_sdram (rail->Device, &event, rail->RailElan + offsetof (EP3_RAIL_ELAN, ProbeDone), sizeof (E3_BlockCopyEvent)); -+ -+ rail->RailMain->ProbeDone = EP3_EVENT_FREE; -+ -+ sp = ep3_init_thread (rail->Device, ep_symbol (&rail->ThreadCode, "kcomm_probe"), -+ rail->ProbeStack, stack, EP3_STACK_SIZE, -+ 3, rail->CommandPortAddr, rail->RailElanAddr, rail->RailMainAddr); -+ -+ IssueRunThread (rail, sp); -+ -+ return 0; -+} -+ -+void -+ep3_destroy_probenetwork (EP3_RAIL *rail) -+{ -+ if (rail->ProbeStack == (sdramaddr_t) 0) -+ return; -+ -+ /* XXXX: ensure that the network probe thread is stopped */ -+ -+ DeregisterCookie (&rail->CookieTable, &rail->ProbeCookie); -+ -+ kcondvar_destroy (&rail->ProbeWait); -+ spin_lock_destroy (&rail->ProbeLock); -+ -+ ep_free_elan (&rail->Generic, rail->ProbeStack, EP3_STACK_SIZE); -+} -+ -+static void -+ep3_probe_event (EP3_RAIL *rail, void *arg) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave (&rail->ProbeLock, flags); -+ rail->ProbeDone = 1; -+ kcondvar_wakeupone (&rail->ProbeWait, &rail->ProbeLock); -+ spin_unlock_irqrestore (&rail->ProbeLock, flags); -+} -+ -+int -+ep3_probe_route (EP_RAIL *r, int level, int sw, int nodeid, int *linkup, int *linkdown, int attempts, EP_SWITCH *lsw) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) r; -+ EP3_RAIL_MAIN *railMain = rail->RailMain; -+ sdramaddr_t railElan = rail->RailElan; -+ E3_uint16 flits[MAX_FLITS]; -+ E3_uint32 result; -+ int nflits; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&rail->ProbeLock, flags); -+ -+ nflits = GenerateProbeRoute ( flits, nodeid, level, linkup, linkdown, 0); -+ -+ if (LoadRoute (rail->Device, rail->RouteTable, EP_VP_PROBE(level), ELAN3_MRF_CONTEXT_NUM|SYS_CONTEXT_BIT, nflits, flits) != 0) -+ { -+ EPRINTF0 (DBG_ROUTETABLE, "ProbeRoute: cannot load route entry\n"); -+ spin_unlock_irqrestore (&rail->ProbeLock, flags); -+ return (EINVAL); -+ } -+ -+ do { -+ /* Initialise the probe source to include our partially computed nodeid */ -+ elan3_sdram_writew (rail->Device, railElan + offsetof (EP3_RAIL_ELAN, ProbeSource0[TR_TRACEROUTE_ENTRIES-1]), nodeid); -+ elan3_sdram_writew (rail->Device, railElan + offsetof (EP3_RAIL_ELAN, ProbeSource1[TR_TRACEROUTE_ENTRIES-1]), nodeid); -+ -+ /* Initialise the count result etc */ -+ elan3_sdram_writel (rail->Device, railElan + offsetof (EP3_RAIL_ELAN, ProbeType), PROBE_SINGLE); -+ elan3_sdram_writel (rail->Device, railElan + offsetof (EP3_RAIL_ELAN, ProbeLevel), level); -+ -+ railMain->ProbeResult = -1; -+ -+ /* Clear the receive area */ -+ bzero (railMain->ProbeDest0, sizeof (railMain->ProbeDest0)); -+ bzero (railMain->ProbeDest1, sizeof (railMain->ProbeDest1)); -+ -+ /* Re-arm the completion event */ -+ elan3_sdram_writel (rail->Device, railElan + offsetof (EP3_RAIL_ELAN, ProbeDone.ev_Count), 1); -+ railMain->ProbeDone = EP3_EVENT_ACTIVE; -+ rail->ProbeDone = 0; -+ -+ /* And wakeup the thread to do the probe */ -+ IssueSetevent (rail, rail->RailElanAddr + offsetof (EP3_RAIL_ELAN, ProbeStart)); -+ -+ /* Now wait for it to complete */ -+ while (! rail->ProbeDone) -+ kcondvar_wait (&rail->ProbeWait, &rail->ProbeLock, &flags); -+ -+ /* wait for block copy event to flush write buffers */ -+ while (! EP3_EVENT_FIRED (rail->ProbeCookie, railMain->ProbeDone)) -+ if (! EP3_EVENT_FIRING(rail->Device, railElan + offsetof (EP3_RAIL_ELAN, ProbeDone), rail->ProbeCookie, railMain->ProbeDone)) -+ panic ("ProbeRoute: network probe event failure\n"); -+ -+ result = railMain->ProbeResult; -+ -+ if (result == C_ACK_ERROR) -+ kcondvar_timedwait (&rail->ProbeWait, &rail->ProbeLock, &flags, lbolt + (hz/8)); -+ -+ railMain->ProbeDone = EP3_EVENT_FREE; -+ -+ } while (result != C_ACK_OK && --attempts); -+ -+ if (result == C_ACK_OK) -+ { -+ if (railMain->ProbeDest0[TR_TRACEROUTE_ENTRIES - ((2*level)+1) - 1] != nodeid || -+ railMain->ProbeDest1[TR_TRACEROUTE_ENTRIES - ((2*level)+1) - 1] != nodeid) -+ { -+ static unsigned long printed = 0; -+ if ((lbolt - printed) > (HZ*10)) -+ { -+ printk ("%s: lost nodeid at level %d switch %d - %d != %x\n", rail->Generic.Name, level, sw, -+ railMain->ProbeDest0[TR_TRACEROUTE_ENTRIES - ((2*level)+1) - 1], nodeid); -+ printed = lbolt; -+ } -+ result = C_ACK_ERROR; -+ } -+ else -+ { -+ E3_uint16 val0 = railMain->ProbeDest0[TR_TRACEROUTE_ENTRIES - level - 1]; -+ E3_uint16 val1 = railMain->ProbeDest1[TR_TRACEROUTE_ENTRIES - level - 1]; -+ -+ EPRINTF7 (DBG_PROBE, "%s: level %d switch %d - linkid=%d bcast=%d LNR=%02x%s\n", -+ rail->Generic.Name, level, sw, TR_TRACEROUTE0_LINKID(val0), -+ TR_TRACEROUTE1_BCAST_TOP(val1), TR_TRACEROUTE0_LNR(val0), -+ TR_TRACEROUTE0_REVID(val0) ? "" : " RevA Part"); -+ -+ lsw->lnr = TR_TRACEROUTE0_LNR(val0); -+ lsw->link = TR_TRACEROUTE0_LINKID(val0); -+ lsw->bcast = TR_TRACEROUTE1_BCAST_TOP(val1); -+ lsw->invalid = (TR_TRACEROUTE0_REVID(val0) == 0); -+ } -+ } -+ spin_unlock_irqrestore (&rail->ProbeLock, flags); -+ -+ return (result == C_ACK_OK); -+} -+ -+void -+ep3_probe_position_found (EP3_RAIL *rail, ELAN_POSITION *pos) -+{ -+ E3_uint16 flits[MAX_FLITS]; -+ int lvl, nflits; -+ -+ for (lvl = 0; lvl < pos->pos_levels; lvl++) -+ { -+ nflits = GenerateCheckRoute (pos, flits, pos->pos_levels - lvl - 1, 0); -+ -+ if (LoadRoute (rail->Device, rail->Ctxt->RouteTable, EP_VP_PROBE(lvl), ELAN3_MRF_CONTEXT_NUM|SYS_CONTEXT_BIT, nflits, flits) != 0) -+ panic ("ep3_probe_position_found: cannot load probe route entry\n"); -+ } -+ -+ /* Initialise the traceroute source data with our nodeid */ -+ elan3_sdram_writew (rail->Device, rail->RailElan + offsetof (EP3_RAIL_ELAN, ProbeSource0[TR_TRACEROUTE_ENTRIES-1]), pos->pos_nodeid); -+ elan3_sdram_writew (rail->Device, rail->RailElan + offsetof (EP3_RAIL_ELAN, ProbeSource1[TR_TRACEROUTE_ENTRIES-1]), pos->pos_nodeid); -+} -+ -+int -+ep3_check_position (EP_RAIL *r) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) r; -+ EP3_RAIL_MAIN *railMain = rail->RailMain; -+ sdramaddr_t railElan = rail->RailElan; -+ ELAN_POSITION *pos = &rail->Generic.Position; -+ unsigned int level = rail->RailMain->ProbeLevel; -+ unsigned int updated = EP3_EVENT_FIRED (rail->ProbeCookie, railMain->ProbeDone); -+ unsigned int lvl; -+ -+ if (updated) -+ { -+ if (railMain->ProbeResult != C_ACK_OK) -+ { -+ EPRINTF2 (DBG_PROBE, "%s: CheckNetworkPosition: packet nacked result=%d\n", rail->Generic.Name, railMain->ProbeResult); -+ -+ rail->Generic.SwitchProbeLevel = -1; -+ } -+ else -+ { -+ E3_uint16 val0 = railMain->ProbeDest0[TR_TRACEROUTE_ENTRIES - 2*(level+1)]; -+ E3_uint16 val1 = railMain->ProbeDest1[TR_TRACEROUTE_ENTRIES - 2*(level+1)]; -+ -+ if (val0 != pos->pos_nodeid || val1 != pos->pos_nodeid) -+ { -+ static unsigned long printed = 0; -+ -+ /* We've received a packet from another node - this probably means -+ * that we've moved */ -+ if ((lbolt - printed) > (HZ*10)) -+ { -+ printk ("%s: ep3_check_position - level %d lost nodeid\n", rail->Generic.Name, level); -+ printed = lbolt; -+ } -+ -+ rail->Generic.SwitchProbeLevel = -1; -+ } -+ else -+ { -+ for (lvl = 0; lvl <= level; lvl++) -+ { -+ E3_uint16 val0 = railMain->ProbeDest0[TR_TRACEROUTE_ENTRIES - ((2*level) - lvl + 1)]; -+ E3_uint16 val1 = railMain->ProbeDest1[TR_TRACEROUTE_ENTRIES - ((2*level) - lvl + 1)]; -+ -+ rail->Generic.SwitchState[lvl].linkid = TR_TRACEROUTE0_LINKID(val0); -+ rail->Generic.SwitchState[lvl].LNR = TR_TRACEROUTE0_LNR(val0); -+ rail->Generic.SwitchState[lvl].bcast = TR_TRACEROUTE1_BCAST_TOP(val1); -+ rail->Generic.SwitchState[lvl].uplink = 4; -+ -+ EPRINTF5 (DBG_PROBE, " --- lvl %d: linkid=%d LNR=%x bcast=%d uplink=%d\n", lvl, rail->Generic.SwitchState[lvl].linkid, -+ rail->Generic.SwitchState[lvl].LNR, rail->Generic.SwitchState[lvl].bcast ,rail->Generic.SwitchState[lvl].uplink); -+ } -+ rail->Generic.SwitchProbeLevel = level; -+ } -+ } -+ -+ railMain->ProbeDone = EP3_EVENT_FREE; -+ } -+ -+ if (railMain->ProbeDone == EP3_EVENT_FREE) -+ { -+ if (rail->Generic.SwitchBroadcastLevel == rail->Generic.Position.pos_levels-1) -+ level = rail->Generic.Position.pos_levels - 1; -+ else -+ level = rail->Generic.SwitchBroadcastLevel + 1; -+ -+ EPRINTF2 (DBG_PROBE, "%s: ep3_check_postiion: level %d\n", rail->Generic.Name, level); -+ -+ /* Initialise the count result etc */ -+ elan3_sdram_writel (rail->Device, railElan + offsetof (EP3_RAIL_ELAN, ProbeType), PROBE_MULTIPLE); -+ elan3_sdram_writel (rail->Device, railElan + offsetof (EP3_RAIL_ELAN, ProbeLevel), level); -+ -+ railMain->ProbeResult = -1; -+ railMain->ProbeLevel = -1; -+ -+ /* Clear the receive area */ -+ bzero (railMain->ProbeDest0, sizeof (railMain->ProbeDest0)); -+ bzero (railMain->ProbeDest1, sizeof (railMain->ProbeDest1)); -+ -+ /* Re-arm the completion event */ -+ elan3_sdram_writel (rail->Device, railElan + offsetof (EP3_RAIL_ELAN, ProbeDone.ev_Type), EV_TYPE_BCOPY); -+ elan3_sdram_writel (rail->Device, railElan + offsetof (EP3_RAIL_ELAN, ProbeDone.ev_Count), 1); -+ -+ railMain->ProbeDone = EP3_EVENT_ACTIVE; -+ -+ IssueSetevent (rail, rail->RailElanAddr + offsetof (EP3_RAIL_ELAN, ProbeStart)); -+ } -+ -+ return updated; -+} -+ -diff -urN clean/drivers/net/qsnet/ep/probenetwork_elan3_thread.c linux-2.6.9/drivers/net/qsnet/ep/probenetwork_elan3_thread.c ---- clean/drivers/net/qsnet/ep/probenetwork_elan3_thread.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/probenetwork_elan3_thread.c 2004-03-24 06:32:56.000000000 -0500 -@@ -0,0 +1,98 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: probenetwork_elan3_thread.c,v 1.19 2004/03/24 11:32:56 david Exp $" -+/* $Source: /cvs/master/quadrics/epmod/probenetwork_elan3_thread.c,v $*/ -+ -+#include -+#include -+#include -+#include -+ -+#include "kcomm_vp.h" -+#include "kcomm_elan3.h" -+ -+static int -+kcomm_probe_vp (EP3_RAIL_ELAN *railElan, EP3_RAIL_MAIN *railMain, int vp, int attempts, int timeouts) -+{ -+ int rc; -+ -+ /* Since we use %g1 to hold the "rxd" so the trap handler can -+ * complete the envelope processing - we pass zero to indicate we're -+ * not a receiver thread */ -+ asm volatile ("mov %g0, %g1"); -+ -+ while (attempts && timeouts) -+ { -+ c_open (vp); -+ c_sendmem (TR_TRACEROUTE, &railMain->ProbeDest0, &railElan->ProbeSource0); -+ c_sendmem (TR_TRACEROUTE, &railMain->ProbeDest1, &railElan->ProbeSource1); -+ c_sendtrans0 (TR_SENDACK | TR_SETEVENT, (E3_Addr) 0); -+ -+ switch (rc = c_close()) -+ { -+ case C_ACK_OK: -+ return (C_ACK_OK); -+ -+ case C_ACK_DISCARD: -+ attempts--; -+ break; -+ -+ default: /* output timeout */ -+ timeouts--; -+ } -+ -+ c_break_busywait(); -+ } -+ -+ return (timeouts == 0 ? C_ACK_ERROR : C_ACK_DISCARD); -+} -+ -+void -+kcomm_probe (E3_CommandPort *cport, EP3_RAIL_ELAN *railElan, EP3_RAIL_MAIN *railMain) -+{ -+ int level; -+ -+ for (;;) -+ { -+ c_waitevent (&railElan->ProbeStart, 1); -+ -+ switch (railElan->ProbeType) -+ { -+ case PROBE_SINGLE: -+ railMain->ProbeResult = kcomm_probe_vp (railElan, railMain, EP_VP_PROBE(railElan->ProbeLevel), -+ PROBE_SINGLE_ATTEMPTS, PROBE_SINGLE_TIMEOUTS); -+ -+ cport->SetEvent = (E3_Addr) &railElan->ProbeDone; -+ break; -+ -+ case PROBE_MULTIPLE: -+ for (level = railElan->ProbeLevel; level >= 0; level--) -+ { -+ if (kcomm_probe_vp (railElan, railMain, EP_VP_PROBE(level), -+ PROBE_MULTIPLE_ATTEMPTS, PROBE_MULTIPLE_TIMEOUTS) == C_ACK_OK) -+ { -+ railMain->ProbeLevel = level; -+ railMain->ProbeResult = C_ACK_OK; -+ break; -+ } -+ -+ c_break_busywait(); -+ } -+ cport->SetEvent = (E3_Addr) &railElan->ProbeDone; -+ break; -+ } -+ -+ } -+} -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/ep/probenetwork_elan4.c linux-2.6.9/drivers/net/qsnet/ep/probenetwork_elan4.c ---- clean/drivers/net/qsnet/ep/probenetwork_elan4.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/probenetwork_elan4.c 2005-07-20 07:35:37.000000000 -0400 -@@ -0,0 +1,401 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: probenetwork_elan4.c,v 1.10.2.1 2005/07/20 11:35:37 mike Exp $ $Name: QSNETMODULES-5-11-3_20050907 $" -+/* $Source: /cvs/master/quadrics/epmod/probenetwork_elan4.c,v $*/ -+ -+#include -+ -+#include -+ -+#include "kcomm_vp.h" -+#include "kcomm_elan4.h" -+#include "debug.h" -+ -+#include -+#include -+ -+static void -+probe_interrupt (EP4_RAIL *rail, void *arg) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave (&rail->r_probe_lock, flags); -+ rail->r_probe_done = 1; -+ kcondvar_wakeupone (&rail->r_probe_wait, &rail->r_probe_lock); -+ spin_unlock_irqrestore (&rail->r_probe_lock, flags); -+} -+ -+int -+ep4_probe_init (EP4_RAIL *rail) -+{ -+ spin_lock_init (&rail->r_probe_lock); -+ kcondvar_init (&rail->r_probe_wait); -+ -+ rail->r_probe_cq = ep4_alloc_ecq (rail, CQ_Size1K); -+ -+ if (rail->r_probe_cq == NULL) -+ return -ENOMEM; -+ -+ ep4_register_intcookie (rail, &rail->r_probe_intcookie, rail->r_elan_addr, probe_interrupt, rail); -+ -+ return 0; -+} -+ -+void -+ep4_probe_destroy (EP4_RAIL *rail) -+{ -+ if (rail->r_probe_cq) -+ ep4_free_ecq (rail, rail->r_probe_cq); -+ -+ if (rail->r_probe_intcookie.int_arg == NULL) -+ return; -+ ep4_deregister_intcookie (rail, &rail->r_probe_intcookie); -+ -+ kcondvar_destroy (&rail->r_probe_wait); -+ spin_lock_destroy (&rail->r_probe_lock); -+} -+ -+#define LINKDOWN(nodeid, level) ((nodeid >> (level << 1)) & 3) -+#define PROBE_PATTERN0(nodeid) (0xaddebabe ^ nodeid) -+#define PROBE_PATTERN1(nodeid) (0xfeedbeef ^ nodeid) -+ -+#define EP4_PROBE_RETRIES 4 -+ -+int -+ep4_probe_route (EP_RAIL *r, int level, int sw, int nodeid, int *linkup, int *linkdown, int attempts, EP_SWITCH *lsw) -+{ -+ EP4_RAIL *rail = (EP4_RAIL *) r; -+ EP4_RAIL_MAIN *rmain = rail->r_main; -+ E4_uint16 first = 0; -+ int rb = 0; -+ -+ E4_uint8 packed[ROUTE_NUM_PACKED]; -+ E4_VirtualProcessEntry route; -+ unsigned long flags; -+ int i; -+ -+ for (i = 0; i < ROUTE_NUM_PACKED; i++) -+ packed[i] = 0; -+ -+ /* Generate "up" routes */ -+ for (i = 0; i < level; i++) -+ if (first == 0) -+ first = linkup ? FIRST_ROUTE(linkup[i]) : FIRST_ADAPTIVE; -+ else -+ packed[rb++] = linkup ? PACKED_ROUTE(linkup[i]) : PACKED_ADAPTIVE; -+ -+ /* Generate a "to-me" route down */ -+ if (first == 0) -+ first = FIRST_MYLINK; -+ else -+ packed[rb++] = PACKED_MYLINK; -+ -+ /* Generate the "down" routes */ -+ for (i = level-1; i >= 0; i--) -+ packed[rb++] = linkdown ? PACKED_ROUTE(linkdown[i]) : PACKED_ROUTE(LINKDOWN(nodeid, i)); -+ -+ /* Pack up the routes into the virtual process entry */ -+ route.Values[0] = first | FIRST_HIGH_PRI | FIRST_SYSTEM_PACKET | FIRST_TIMEOUT(3); -+ route.Values[1] = ROUTE_CTXT_VALUE(ELAN4_KCOMM_CONTEXT_NUM); -+ -+ for (i = 0; i < (ROUTE_NUM_PACKED >> 1); i++) -+ { -+ route.Values[0] |= ((E4_uint64) packed[i]) << ((i << 2) + ROUTE_PACKED_OFFSET); -+ route.Values[1] |= ((E4_uint64) packed[i+(ROUTE_NUM_PACKED >> 1)]) << ((i << 2)); -+ } -+ -+ elan4_write_route (rail->r_ctxt.ctxt_dev, rail->r_routetable, EP_VP_PROBE(level), &route); -+ -+ while (attempts--) -+ { -+ rail->r_probe_done = 0; -+ -+ /* generate the STEN packet - note we use a datatype of dword as we're copying to elan in dwords -+ * NB - no flow control is required, since the max packet size is less than the command queue -+ * size and it's dedicated for network probing. -+ */ -+ -+ elan4_guard (rail->r_probe_cq->ecq_cq, GUARD_CHANNEL(1) | GUARD_RESET(EP4_PROBE_RETRIES)); -+ elan4_nop_cmd (rail->r_probe_cq->ecq_cq, 0); -+ -+ elan4_open_packet (rail->r_probe_cq->ecq_cq, OPEN_STEN_PKT_CMD | OPEN_PACKET(0, PACK_OK | RESTART_COUNT_ZERO, EP_VP_PROBE(level))); -+ elan4_sendtransn (rail->r_probe_cq->ecq_cq, TR_TRACEROUTE(TRACEROUTE_NDWORDS), -+ rail->r_main_addr + offsetof (EP4_RAIL_MAIN, r_probe_dest0), -+ 0x0000000000000000ull, 0x0000000000000000ull, 0x0000000000000000ull, 0x0000000000000000ull, -+ 0x0000000000000000ull, 0x0000000000000000ull, 0x0000000000000000ull, 0x0000000000000000ull | ((E4_uint64)PROBE_PATTERN0(nodeid) << 32)); -+ elan4_sendtransn (rail->r_probe_cq->ecq_cq, TR_TRACEROUTE(TRACEROUTE_NDWORDS), -+ rail->r_main_addr + offsetof (EP4_RAIL_MAIN, r_probe_dest1), -+ 0x0000000100000001ull, 0x0000000100000001ull, 0x0000000100000001ull, 0x0000000100000001ull, -+ 0x0000000100000001ull, 0x0000000100000001ull, 0x0000000100000001ull, 0x0000000000000001ull | ((E4_uint64)PROBE_PATTERN1(nodeid) << 32)); -+ elan4_sendtrans0 (rail->r_probe_cq->ecq_cq, TR_NOP_TRANS | TR_LAST_AND_SEND_ACK, 0); -+ -+ elan4_guard (rail->r_probe_cq->ecq_cq, GUARD_CHANNEL(1) | GUARD_TEST(0, PACK_OK) | GUARD_RESET(EP4_PROBE_RETRIES)); -+ elan4_write_dword_cmd (rail->r_probe_cq->ecq_cq, rail->r_main_addr + offsetof (EP4_RAIL_MAIN, r_probe_result), EP4_STATE_FINISHED); -+ -+ elan4_guard (rail->r_probe_cq->ecq_cq, GUARD_CHANNEL(1) | GUARD_TEST(0, RESTART_COUNT_ZERO) | GUARD_RESET(EP4_PROBE_RETRIES)); -+ elan4_write_dword_cmd (rail->r_probe_cq->ecq_cq, rail->r_main_addr + offsetof (EP4_RAIL_MAIN, r_probe_result), EP4_STATE_FAILED); -+ -+ elan4_interrupt_cmd (rail->r_probe_cq->ecq_cq, rail->r_probe_intcookie.int_val); -+ -+ spin_lock_irqsave (&rail->r_probe_lock, flags); -+ while (! rail->r_probe_done) -+ kcondvar_wait (&rail->r_probe_wait, &rail->r_probe_lock, &flags); -+ spin_unlock_irqrestore (&rail->r_probe_lock, flags); -+ -+ if (rmain->r_probe_result == EP4_STATE_FINISHED) -+ { -+ if (rmain->r_probe_dest0[TRACEROUTE_ENTRIES - ((2*level)+1) - 1] != PROBE_PATTERN0(nodeid) || -+ rmain->r_probe_dest1[TRACEROUTE_ENTRIES - ((2*level)+1) - 1] != PROBE_PATTERN1(nodeid)) -+ { -+ static unsigned long printed = 0; -+ if ((lbolt - printed) > (HZ*10)) -+ { -+ printk ("%s: lost nodeid at level %d switch %d - %d != %x\n", rail->r_generic.Name, level, sw, -+ rmain->r_probe_dest0[TRACEROUTE_ENTRIES - ((2*level)+1) - 1], PROBE_PATTERN0(nodeid)); -+ printed = lbolt; -+ } -+ } -+ else -+ { -+ E4_uint32 val0 = rmain->r_probe_dest0[TRACEROUTE_ENTRIES - level - 1]; -+ E4_uint32 val1 = rmain->r_probe_dest1[TRACEROUTE_ENTRIES - level - 1]; -+ -+ lsw->lnr = TR_TRACEROUTE0_LNR(val0); -+ lsw->link = TR_TRACEROUTE0_LINKID(val0); -+ lsw->bcast = TR_TRACEROUTE1_BCAST_TOP(val1); -+ lsw->invalid = 0; -+ -+ return 1; -+ } -+ } -+ -+ rmain->r_probe_result = EP4_STATE_FREE; -+ } -+ -+ return 0; -+} -+ -+ -+void -+ep4_probe_position_found (EP4_RAIL *rail, ELAN_POSITION *pos) -+{ -+ ELAN4_DEV *dev = rail->r_ctxt.ctxt_dev; -+ int lvl; -+ -+ for (lvl = 0; lvl < pos->pos_levels; lvl++) -+ { -+ /* Initialise the "probe" route to use the broadcast tree */ -+ ELAN_POSITION *pos = &rail->r_generic.Position; -+ unsigned char *arityp = &pos->pos_arity[pos->pos_levels - 1]; -+ unsigned int spanned = *arityp; -+ E4_uint16 first = 0; -+ int rb = 0; -+ -+ E4_uint8 packed[ROUTE_NUM_PACKED]; -+ E4_VirtualProcessEntry route; -+ int i; -+ -+ for (i = 0; i < ROUTE_NUM_PACKED; i++) -+ packed[i] = 0; -+ -+ /* Generate "up" routes */ -+ for (i = 0; i < lvl; i++, spanned *= *(--arityp)) -+ { -+ if (first == 0) -+ first = FIRST_BCAST_TREE; -+ else -+ packed[rb++] = PACKED_BCAST_TREE; -+ } -+ -+ /* Generate a "to-me" route down */ -+ if (first == 0) -+ first = FIRST_MYLINK; -+ else -+ packed[rb++] = PACKED_MYLINK; -+ -+ spanned /= *arityp++; -+ -+ /* Generate the "down" routes */ -+ for (i = lvl-1; i >= 0; i--) -+ { -+ spanned /= *arityp; -+ packed[rb++] = PACKED_ROUTE((pos->pos_nodeid / spanned) % *arityp); -+ arityp++; -+ } -+ -+ -+ /* Pack up the routes into the virtual process entry */ -+ route.Values[0] = first | FIRST_HIGH_PRI | FIRST_SYSTEM_PACKET | FIRST_TIMEOUT(3); -+ route.Values[1] = ROUTE_CTXT_VALUE(ELAN4_KCOMM_CONTEXT_NUM); -+ -+ for (i = 0; i < (ROUTE_NUM_PACKED >> 1); i++) -+ { -+ route.Values[0] |= ((E4_uint64) packed[i]) << ((i << 2) + ROUTE_PACKED_OFFSET); -+ route.Values[1] |= ((E4_uint64) packed[i+(ROUTE_NUM_PACKED >> 1)]) << ((i << 2)); -+ } -+ -+ elan4_write_route (rail->r_ctxt.ctxt_dev, rail->r_routetable, EP_VP_PROBE(lvl), &route); -+ -+ /* Initialise "start" event for this level */ -+ elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_start[lvl].ev_CountAndType), -+ E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_CHECK_STEN_NDWORDS)); -+ elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_start[lvl].ev_CopySource), -+ rail->r_elan_addr + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl])); -+ elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_start[lvl].ev_CopyDest), -+ rail->r_probe_cq->ecq_addr); -+ -+ /* Initiailise command stream - reset the start event */ -+ elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_reset_event_cmd), -+ WRITE_DWORD_CMD | (rail->r_elan_addr + offsetof (EP4_RAIL_ELAN, r_check_start[lvl]))); -+ elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_reset_event_value), -+ E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_CHECK_STEN_NDWORDS)); -+ -+ /* Initiailise command stream - sten traceroute packet */ -+ elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_open), -+ OPEN_STEN_PKT_CMD | OPEN_PACKET (0, PACK_OK | RESTART_COUNT_ZERO, EP_VP_PROBE(lvl))); -+ -+ /* Initiailise command stream - traceroute 0 */ -+ elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_trans_traceroute0), -+ SEND_TRANS_CMD | (TR_TRACEROUTE(TRACEROUTE_NDWORDS) << 16)); -+ elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_addr_traceroute0), -+ rail->r_main_addr + offsetof (EP4_RAIL_MAIN, r_probe_dest0)); -+ for (i = 0; i < (TRACEROUTE_NDWORDS-1); i++) -+ elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_data_traceroute0[i]), -+ 0x0000000000000000ull); -+ elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_data_traceroute0[i]), -+ 0x0000000000000000ull | ((E4_uint64) PROBE_PATTERN0(pos->pos_nodeid) << 32)); -+ -+ /* Initiailise command stream - traceroute 1 */ -+ elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_trans_traceroute1), -+ SEND_TRANS_CMD | (TR_TRACEROUTE(TRACEROUTE_NDWORDS) << 16)); -+ elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_addr_traceroute1), -+ rail->r_main_addr + offsetof (EP4_RAIL_MAIN, r_probe_dest1)); -+ for (i = 0; i < (TRACEROUTE_NDWORDS-1); i++) -+ elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_data_traceroute1[i]), -+ 0x0000000100000001ull); -+ elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_data_traceroute1[i]), -+ 0x0000000000000001ull | ((E4_uint64) PROBE_PATTERN1(pos->pos_nodeid) << 32)); -+ -+ /* Initiailise command stream - null sendack */ -+ elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_trans_sendack), -+ SEND_TRANS_CMD | ((TR_NOP_TRANS | TR_LAST_AND_SEND_ACK) << 16)); -+ elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_addr_sendack), -+ 0); -+ -+ /* Initiailise command stream - guard ok, write done */ -+ elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_guard_ok), -+ GUARD_CMD | GUARD_CHANNEL(1) | GUARD_TEST(0, PACK_OK) | GUARD_RESET(EP4_PROBE_RETRIES)); -+ elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_writedword_ok), -+ WRITE_DWORD_CMD | (rail->r_main_addr + offsetof (EP4_RAIL_MAIN, r_probe_level))); -+ elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_value_ok), -+ lvl); -+ -+ /* Initiailise command stream - guard fail, chain to next or write done */ -+ elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_guard_fail), -+ GUARD_CMD | GUARD_CHANNEL(1) | GUARD_TEST(0, RESTART_COUNT_ZERO) | GUARD_RESET(EP4_PROBE_RETRIES)); -+ -+ if (lvl > 0) -+ { -+ elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_setevent_fail), -+ SET_EVENT_CMD | (rail->r_elan_addr + offsetof (EP4_RAIL_ELAN, r_check_start[lvl-1]))); -+ elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_setevent_nop), -+ NOP_CMD); -+ } -+ else -+ { -+ elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_setevent_fail), -+ WRITE_DWORD_CMD | (rail->r_main_addr + offsetof (EP4_RAIL_MAIN, r_probe_level))); -+ elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_setevent_nop), -+ EP4_PROBE_FAILED); -+ } -+ elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_nop_pad), -+ NOP_CMD); -+ } -+ -+ -+ rail->r_main->r_probe_level = EP4_PROBE_ACTIVE; -+ -+ mb(); -+ ep4_set_event_cmd (rail->r_probe_cq, rail->r_elan_addr + offsetof (EP4_RAIL_ELAN, r_check_start[pos->pos_levels-1])); -+} -+ -+int -+ep4_check_position (EP_RAIL *r) -+{ -+ EP4_RAIL *rail = (EP4_RAIL *) r; -+ ELAN_POSITION *pos = &rail->r_generic.Position; -+ unsigned int level = rail->r_main->r_probe_level; -+ unsigned int lvl; -+ -+ EPRINTF2 (DBG_PROBE, "%s: ep4_check_position: level=%lld\n", rail->r_generic.Name, (long long)rail->r_main->r_probe_level); -+ -+ if (rail->r_main->r_probe_level != EP4_PROBE_ACTIVE) -+ { -+ if (rail->r_main->r_probe_level == EP4_PROBE_FAILED) -+ { -+ EPRINTF1 (DBG_PROBE, "%s: ep4_check_position: packets all nacked\n", rail->r_generic.Name); -+ -+ rail->r_generic.SwitchProbeLevel = -1; -+ } -+ else -+ { -+ E4_uint32 val0 = rail->r_main->r_probe_dest0[TRACEROUTE_ENTRIES - 2*(level+1)]; -+ E4_uint32 val1 = rail->r_main->r_probe_dest1[TRACEROUTE_ENTRIES - 2*(level+1)]; -+ -+ if (val0 != PROBE_PATTERN0 (pos->pos_nodeid) || val1 != PROBE_PATTERN1 (pos->pos_nodeid)) -+ { -+ static unsigned long printed = 0; -+ -+ /* We've received a packet from another node - this probably means -+ * that we've moved */ -+ if ((lbolt - printed) > (HZ*10)) -+ { -+ printk ("%s: ep4_check_position - level %d lost nodeid\n", rail->r_generic.Name, level); -+ printed = lbolt; -+ } -+ -+ rail->r_generic.SwitchProbeLevel = -1; -+ } -+ else -+ { -+ for (lvl = 0 ; lvl <= level; lvl++) -+ { -+ E4_uint32 uval0 = rail->r_main->r_probe_dest0[TRACEROUTE_ENTRIES - lvl - 1]; -+ E4_uint32 dval0 = rail->r_main->r_probe_dest0[TRACEROUTE_ENTRIES - ((2*level) - lvl + 1)]; -+ E4_uint32 dval1 = rail->r_main->r_probe_dest1[TRACEROUTE_ENTRIES - ((2*level) - lvl + 1)]; -+ -+ rail->r_generic.SwitchState[lvl].linkid = TR_TRACEROUTE0_LINKID (dval0); -+ rail->r_generic.SwitchState[lvl].LNR = TR_TRACEROUTE0_LNR(dval0); -+ rail->r_generic.SwitchState[lvl].bcast = TR_TRACEROUTE1_BCAST_TOP (dval1); -+ rail->r_generic.SwitchState[lvl].uplink = TR_TRACEROUTE0_LINKID (uval0); -+ -+ EPRINTF5 (DBG_PROBE, " --- lvl %d: linkid=%d LNR=%x bcast=%d uplink=%d\n", lvl, rail->r_generic.SwitchState[lvl].linkid, -+ rail->r_generic.SwitchState[lvl].LNR, rail->r_generic.SwitchState[lvl].bcast ,rail->r_generic.SwitchState[lvl].uplink); -+ -+ } -+ -+ rail->r_generic.SwitchProbeLevel = level; -+ } -+ } -+ -+ rail->r_main->r_probe_level = EP4_PROBE_ACTIVE; -+ mb(); -+ -+ if (rail->r_generic.SwitchBroadcastLevel == rail->r_generic.Position.pos_levels-1) -+ level = rail->r_generic.Position.pos_levels - 1; -+ else -+ level = rail->r_generic.SwitchBroadcastLevel + 1; -+ -+ ep4_set_event_cmd (rail->r_probe_cq, rail->r_elan_addr + offsetof (EP4_RAIL_ELAN, r_check_start[level])); -+ -+ return 1; -+ } -+ -+ return 0; -+} -diff -urN clean/drivers/net/qsnet/ep/procfs_linux.c linux-2.6.9/drivers/net/qsnet/ep/procfs_linux.c ---- clean/drivers/net/qsnet/ep/procfs_linux.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/procfs_linux.c 2005-09-07 10:35:03.000000000 -0400 -@@ -0,0 +1,632 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: procfs_linux.c,v 1.60.2.3 2005/09/07 14:35:03 mike Exp $" -+/* $Source: /cvs/master/quadrics/epmod/procfs_linux.c,v $*/ -+ -+#include -+#include -+ -+#include -+#include -+#include -+ -+#include "cm.h" -+#include "debug.h" -+#include "conf_linux.h" -+#include -+#include -+#include -+ -+#include -+ -+struct proc_dir_entry *ep_procfs_root; -+struct proc_dir_entry *ep_config_root; -+ -+/* -+ * We provide a slightly "special" interface for /proc/elan/device%d/nodeset, -+ * so that it can be included in a "poll" system call. On each "read" on the -+ * file, we generate a new nodeset if a) the previous one has been completely -+ * read and b) if it has changed since it was generated. -+ * -+ * Unfortunately ... this doesn't allow "tail -f" to work, since this uses -+ * fstat() on the fd, as we only hold the last nodeset string, we could not -+ * handle the case where two processes were reading a different rates. -+ * We could maybe have implemented this as a "sliding window", so that we -+ * add a new nodeset string, when it has changed and someone reads past -+ * end of the last one. Then if someone read from before out "window" -+ * we would produce "padding" data. The problem with this, is that a -+ * simple "cat" on /proc/elan/device%d/nodeset will read the whole "file" -+ * which will be mostly padding ! -+ * -+ * Just to not that the purpose of this interface is: -+ * 1) to allow cat /proc/elan/device%d/nodeset to show the current -+ * nodeset. -+ * 2) to allow rms (or similar) to poll() on the file, and when the -+ * nodeset changes read a new one. -+ * -+ * so ... we don't bother solving the troublesome "tail -f" problem. -+ */ -+ -+typedef struct nodeset_private -+{ -+ struct nodeset_private *pr_next; -+ EP_RAIL *pr_rail; -+ unsigned pr_changed; -+ char *pr_page; -+ unsigned pr_off; -+ unsigned pr_len; -+} NODESET_PRIVATE; -+ -+NODESET_PRIVATE *ep_nodeset_list; -+wait_queue_head_t ep_nodeset_wait; -+spinlock_t ep_nodeset_lock; -+ -+static int -+proc_write_state(struct file *file, const char *buffer, -+ unsigned long count, void *data) -+{ -+ EP_RAIL *rail = (EP_RAIL *) data; -+ char tmpbuf[128]; -+ int res; -+ -+ if (count > sizeof (tmpbuf)-1) -+ return (-EINVAL); -+ -+ MOD_INC_USE_COUNT; -+ -+ if (copy_from_user (tmpbuf, buffer, count)) -+ res = -EFAULT; -+ else -+ { -+ tmpbuf[count] = '\0'; -+ -+ if (tmpbuf[count-1] == '\n') -+ tmpbuf[count-1] = '\0'; -+ -+ if (! strcmp (tmpbuf, "start") && rail->State == EP_RAIL_STATE_UNINITIALISED) -+ ep_start_rail (rail); -+ -+ if (! strcmp (tmpbuf, "stop") && rail->State > EP_RAIL_STATE_UNINITIALISED) -+ ep_stop_rail (rail); -+ -+ if (! strcmp (tmpbuf, "offline") && rail->State > EP_RAIL_STATE_UNINITIALISED) -+ cm_force_offline (rail, 1, CM_OFFLINE_PROCFS); -+ -+ if (! strcmp (tmpbuf, "online") && rail->State > EP_RAIL_STATE_UNINITIALISED) -+ cm_force_offline (rail, 0, CM_OFFLINE_PROCFS); -+ -+ if (! strncmp (tmpbuf, "restart=", 8) && rail->State == EP_RAIL_STATE_RUNNING) -+ cm_restart_node (rail, simple_strtol (tmpbuf + 8, NULL, 0)); -+ -+ if (! strncmp (tmpbuf, "panic=", 6)) -+ ep_panic_node (rail->System, simple_strtol(tmpbuf + 6, NULL, 0), -+ strchr (tmpbuf, ',') ? strchr(tmpbuf, ',') + 1 : "remote panic request"); -+ -+ if (! strncmp (tmpbuf, "raise=", 6) && rail->State > EP_RAIL_STATE_UNINITIALISED) -+ rail->Operations.RaiseFilter (rail, simple_strtol (tmpbuf + 6, NULL, 0)); -+ -+ if (! strncmp (tmpbuf, "lower=", 6) && rail->State > EP_RAIL_STATE_UNINITIALISED) -+ rail->Operations.LowerFilter (rail, simple_strtol (tmpbuf + 6, NULL, 0)); -+ -+ res = count; -+ } -+ -+ MOD_DEC_USE_COUNT; -+ -+ return (res); -+} -+ -+static int -+proc_read_state(char *page, char **start, off_t off, -+ int count, int *eof, void *data) -+{ -+ EP_RAIL *rail = (EP_RAIL *) data; -+ int len; -+ -+ switch (rail->State) -+ { -+ case EP_RAIL_STATE_UNINITIALISED: -+ len = sprintf (page, "uninitialised\n"); -+ break; -+ case EP_RAIL_STATE_STARTED: -+ len = sprintf (page, "started\n"); -+ break; -+ case EP_RAIL_STATE_RUNNING: -+ len = sprintf (page, "running NodeId=%d NumNodes=%d\n", rail->Position.pos_nodeid, rail->Position.pos_nodes); -+ break; -+ case EP_RAIL_STATE_INCOMPATIBLE: -+ len = sprintf (page, "incompatible NodeId=%d NumNodes=%d\n", rail->Position.pos_nodeid, rail->Position.pos_nodes); -+ break; -+ default: -+ len = sprintf (page, "\n"); -+ break; -+ } -+ -+ return (qsnet_proc_calc_metrics (page, start, off, count, eof, len)); -+} -+ -+static int -+proc_write_display(struct file *file, const char *buffer, -+ unsigned long count, void *data) -+{ -+ EP_RAIL *rail = (EP_RAIL *) data; -+ char tmpbuf[128]; -+ int res; -+ -+ if (count > sizeof (tmpbuf)-1) -+ return (-EINVAL); -+ -+ MOD_INC_USE_COUNT; -+ -+ if (copy_from_user (tmpbuf, buffer, count)) -+ res = -EFAULT; -+ else -+ { -+ tmpbuf[count] = '\0'; -+ -+ if (tmpbuf[count-1] == '\n') -+ tmpbuf[count-1] = '\0'; -+ -+ if (! strcmp (tmpbuf, "rail")) -+ DisplayRail (rail); -+ if (! strcmp (tmpbuf, "segs")) -+ DisplaySegs (rail); -+ if (! strcmp (tmpbuf, "nodes")) -+ DisplayNodes (rail); -+ if (! strcmp (tmpbuf, "status")) -+ DisplayStatus (rail); -+ if (! strcmp (tmpbuf, "debug") && rail->Operations.Debug) -+ rail->Operations.Debug (rail); -+ if (! strncmp (tmpbuf, "epcomms", 7)) -+ ep_comms_display (rail->System, tmpbuf[7] == '=' ? tmpbuf + 8 : NULL); -+ res = count; -+ } -+ -+ MOD_DEC_USE_COUNT; -+ -+ return (res); -+} -+ -+static int -+proc_read_display(char *page, char **start, off_t off, -+ int count, int *eof, void *data) -+{ -+ int len = sprintf (page, "\n"); -+ -+ return (qsnet_proc_calc_metrics (page, start, off, count, eof, len)); -+} -+ -+ -+static int -+proc_read_stats(char *page, char **start, off_t off, -+ int count, int *eof, void *data) -+{ -+ EP_RAIL *rail = (EP_RAIL *) data; -+ -+ if ( rail == NULL ) { -+ strcpy(page,"proc_read_stats rail=NULL\n"); -+ } else { -+ page[0] = 0; -+ ep_fillout_stats(rail, page); -+ rail->Operations.FillOutStats (rail, page); -+ } -+ return (qsnet_proc_calc_metrics (page, start, off, count, eof, strlen(page))); -+} -+ -+static int -+proc_read_devinfo(char *page, char **start, off_t off, -+ int count, int *eof, void *data) -+{ -+ EP_RAIL *rail = (EP_RAIL *) data; -+ ELAN_DEVINFO *devinfo = &rail->Devinfo; -+ ELAN_POSITION *pos = &rail->Position; -+ char *p = page; -+ -+ switch (devinfo->dev_device_id) -+ { -+ case PCI_DEVICE_ID_ELAN3: -+ p += sprintf (p, "ep%d is elan3 %d rev %c\n", rail->Number, -+ devinfo->dev_instance, 'a' + devinfo->dev_revision_id); -+ break; -+ -+ case PCI_DEVICE_ID_ELAN4: -+ p += sprintf (p, "ep%d is elan4 %d rev %c\n", rail->Number, -+ devinfo->dev_instance, 'a' + devinfo->dev_revision_id); -+ break; -+ default: -+ p += sprintf (p, "ep%d is unknown %x/%x\n", rail->Number, devinfo->dev_vendor_id, devinfo->dev_device_id); -+ break; -+ } -+ -+ if (rail->State == EP_RAIL_STATE_RUNNING) -+ p += sprintf (p, "ep%d nodeid %d numnodes %d\n", rail->Number, pos->pos_nodeid, pos->pos_nodes); -+ -+ return (qsnet_proc_calc_metrics (page, start, off, count, eof, p - page)); -+} -+ -+static struct rail_info -+{ -+ char *name; -+ int (*read_func) (char *page, char **start, off_t off, int count, int *eof, void *data); -+ int (*write_func) (struct file *file, const char *buf, unsigned long count, void *data); -+} rail_info[] = { -+ {"state", proc_read_state, proc_write_state}, -+ {"display", proc_read_display, proc_write_display}, -+ {"stats", proc_read_stats, NULL}, -+ {"devinfo", proc_read_devinfo, NULL}, -+}; -+ -+static int -+nodeset_open (struct inode *inode, struct file *file) -+{ -+ NODESET_PRIVATE *pr; -+ -+ if ((pr = kmalloc (sizeof (NODESET_PRIVATE), GFP_KERNEL)) == NULL) -+ return (-ENOMEM); -+ -+ pr->pr_changed = 1; -+ pr->pr_off = 0; -+ pr->pr_len = 0; -+ pr->pr_page = NULL; -+ pr->pr_rail = (EP_RAIL *)( PDE(inode)->data ); -+ -+ spin_lock (&ep_nodeset_lock); -+ pr->pr_next = ep_nodeset_list; -+ ep_nodeset_list = pr; -+ spin_unlock (&ep_nodeset_lock); -+ -+ file->private_data = (void *) pr; -+ -+ MOD_INC_USE_COUNT; -+ return (0); -+} -+ -+static int -+nodeset_release (struct inode *inode, struct file *file) -+{ -+ NODESET_PRIVATE *pr = (NODESET_PRIVATE *) file->private_data; -+ NODESET_PRIVATE **ppr; -+ -+ spin_lock (&ep_nodeset_lock); -+ for (ppr = &ep_nodeset_list; (*ppr) != pr; ppr = &(*ppr)->pr_next) -+ ; -+ (*ppr) = pr->pr_next; -+ spin_unlock (&ep_nodeset_lock); -+ -+ if (pr->pr_page) -+ free_page ((unsigned long) pr->pr_page); -+ kfree (pr); -+ -+ MOD_DEC_USE_COUNT; -+ return (0); -+} -+ -+static ssize_t -+nodeset_read (struct file *file, char *buf, size_t count, loff_t *ppos) -+{ -+ NODESET_PRIVATE *pr = (NODESET_PRIVATE *) file->private_data; -+ EP_RAIL *rail = pr->pr_rail; -+ int error; -+ unsigned long flags; -+ -+ if (!pr->pr_changed && pr->pr_off >= pr->pr_len) -+ return (0); -+ -+ if ((error = verify_area (VERIFY_WRITE, buf, count)) != 0) -+ return (error); -+ -+ if (pr->pr_page == NULL && (pr->pr_page = (char *) __get_free_page (GFP_KERNEL)) == NULL) -+ return (-ENOMEM); -+ -+ if (pr->pr_off >= pr->pr_len) -+ { -+ kmutex_lock (&rail->CallbackLock); -+ if (rail->State == EP_RAIL_STATE_RUNNING) -+ { -+ spin_lock_irqsave (&rail->System->NodeLock, flags); -+ ep_sprintf_bitmap (pr->pr_page, PAGESIZE, statemap_tobitmap(rail->NodeSet), 0, 0, rail->Position.pos_nodes); -+ spin_unlock_irqrestore (&rail->System->NodeLock, flags); -+ -+ if (rail->SwitchBroadcastLevel == -1) -+ strcat (pr->pr_page, ""); -+ else if (rail->SwitchBroadcastLevel < (rail->Position.pos_levels-1)) -+ sprintf (pr->pr_page + strlen (pr->pr_page), "<%d>", rail->SwitchBroadcastLevel); -+ strcat (pr->pr_page, "\n"); -+ } -+ else -+ strcpy (pr->pr_page, "\n"); -+ kmutex_unlock (&rail->CallbackLock); -+ -+ pr->pr_len = strlen (pr->pr_page); -+ pr->pr_off = 0; -+ pr->pr_changed = 0; -+ } -+ -+ if (count >= (pr->pr_len - pr->pr_off)) -+ count = pr->pr_len - pr->pr_off; -+ -+ copy_to_user (buf, pr->pr_page + pr->pr_off, count); -+ -+ pr->pr_off += count; -+ *ppos += count; -+ -+ if (pr->pr_off >= pr->pr_len) -+ { -+ free_page ((unsigned long) pr->pr_page); -+ pr->pr_page = NULL; -+ } -+ -+ return (count); -+} -+ -+static unsigned int -+nodeset_poll (struct file *file, poll_table *wait) -+{ -+ NODESET_PRIVATE *pr = (NODESET_PRIVATE *) file->private_data; -+ -+ poll_wait (file, &ep_nodeset_wait, wait); -+ if (pr->pr_changed || pr->pr_off < pr->pr_len) -+ return (POLLIN | POLLRDNORM); -+ return (0); -+} -+ -+static void -+nodeset_callback (void *arg, statemap_t *map) -+{ -+ EP_RAIL *rail = (EP_RAIL *) arg; -+ NODESET_PRIVATE *pr; -+ -+ ep_display_bitmap (rail->Name, "Nodeset", statemap_tobitmap(map), 0, ep_numnodes(rail->System)); -+ -+ spin_lock (&ep_nodeset_lock); -+ for (pr = ep_nodeset_list; pr; pr = pr->pr_next) -+ if (pr->pr_rail == rail) -+ pr->pr_changed = 1; -+ spin_unlock (&ep_nodeset_lock); -+ -+ wake_up_interruptible (&ep_nodeset_wait); -+} -+ -+static int -+proc_open (struct inode *inode, struct file *file) -+{ -+ QSNET_PROC_PRIVATE *pr; -+ CM_RAIL *cmRail; -+ EP_RAIL *epRail; -+ int pages = 4; -+ unsigned long flags; -+ -+ if ((pr = kmalloc (sizeof (QSNET_PROC_PRIVATE), GFP_KERNEL)) == NULL) -+ return (-ENOMEM); -+ -+ epRail = (EP_RAIL *)(PDE(inode)->data); -+ -+ do { -+ pr->pr_data_len = PAGESIZE * pages; -+ -+ KMEM_ZALLOC (pr->pr_data, char *, pr->pr_data_len, 1); -+ if (pr->pr_data == NULL) -+ { -+ pr->pr_len = sprintf (pr->pr_data, "Out of Memory\n"); -+ break; -+ } -+ -+ pr->pr_off = 0; -+ pr->pr_len = 0; -+ pr->pr_data[0] = 0; -+ -+ if (epRail->State != EP_RAIL_STATE_RUNNING) -+ { -+ pr->pr_len = sprintf (pr->pr_data, "Rail not Running\n"); -+ break; -+ } -+ else -+ { -+ pr->pr_di.func = qsnet_proc_character_fill; -+ pr->pr_di.arg = (long)pr; -+ -+ if (!strcmp("maps", file->f_dentry->d_iname)) -+ { -+ cmRail = epRail->ClusterRail; -+ -+ spin_lock_irqsave (&cmRail->Lock, flags); -+ DisplayNodeMaps (&pr->pr_di, cmRail); -+ spin_unlock_irqrestore (&cmRail->Lock, flags); -+ } -+ -+ if (!strcmp("segs", file->f_dentry->d_iname)) -+ { -+ cmRail = epRail->ClusterRail; -+ -+ spin_lock_irqsave (&cmRail->Lock, flags); -+ DisplayNodeSgmts (&pr->pr_di, cmRail); -+ spin_unlock_irqrestore (&cmRail->Lock, flags); -+ } -+ -+ if (!strcmp("tree", file->f_dentry->d_iname)) -+ DisplayRailDo (&pr->pr_di, epRail); -+ } -+ -+ if ( pr->pr_len < pr->pr_data_len) -+ break; /* we managed to get all the output into the buffer */ -+ -+ pages++; -+ KMEM_FREE ( pr->pr_data, pr->pr_data_len); -+ } while (1); -+ -+ -+ file->private_data = (void *) pr; -+ -+ MOD_INC_USE_COUNT; -+ return (0); -+} -+ -+struct file_operations proc_nodeset_operations = -+{ -+ read: nodeset_read, -+ poll: nodeset_poll, -+ open: nodeset_open, -+ release: nodeset_release, -+}; -+ -+struct file_operations proc_operations = -+{ -+ read: qsnet_proc_read, -+ open: proc_open, -+ release: qsnet_proc_release, -+}; -+ -+void -+ep_procfs_rail_init (EP_RAIL *rail) -+{ -+ struct proc_dir_entry *dir; -+ struct proc_dir_entry *p; -+ char name[10]; -+ int i; -+ -+ sprintf (name, "rail%d", rail->Number); -+ -+ if ((dir = rail->ProcDir = proc_mkdir (name, ep_procfs_root)) == NULL) -+ return; -+ -+ for (i = 0; i < sizeof (rail_info)/sizeof (rail_info[0]); i++) -+ { -+ if ((p = create_proc_entry (rail_info[i].name, 0, dir)) != NULL) -+ { -+ p->read_proc = rail_info[i].read_func; -+ p->write_proc = rail_info[i].write_func; -+ p->data = rail; -+ p->owner = THIS_MODULE; -+ } -+ } -+ -+ if ((p = create_proc_entry ("nodeset", 0, dir)) != NULL) -+ { -+ p->proc_fops = &proc_nodeset_operations; -+ p->owner = THIS_MODULE; -+ p->data = rail; -+ -+ rail->CallbackRegistered = 1; -+ ep_register_callback (rail, EP_CB_NODESET, nodeset_callback, rail); -+ } -+ -+ if ((p = create_proc_entry ("maps", 0, dir)) != NULL) -+ { -+ p->proc_fops = &proc_operations; -+ p->owner = THIS_MODULE; -+ p->data = rail; -+ } -+ -+ if ((p = create_proc_entry ("segs", 0, dir)) != NULL) -+ { -+ p->proc_fops = &proc_operations; -+ p->owner = THIS_MODULE; -+ p->data = rail; -+ } -+ -+ if ((p = create_proc_entry ("tree", 0, dir)) != NULL) -+ { -+ p->proc_fops = &proc_operations; -+ p->owner = THIS_MODULE; -+ p->data = rail; -+ } -+ -+} -+ -+void -+ep_procfs_rail_fini (EP_RAIL *rail) -+{ -+ struct proc_dir_entry *dir = rail->ProcDir; -+ char name[10]; -+ int i; -+ -+ if (dir == NULL) -+ return; -+ -+ if (rail->CallbackRegistered) -+ { -+ ep_remove_callback (rail, EP_CB_NODESET, nodeset_callback, rail); -+ -+ remove_proc_entry ("nodeset", dir); -+ } -+ -+ remove_proc_entry ("maps", dir); -+ remove_proc_entry ("segs", dir); -+ remove_proc_entry ("tree", dir); -+ -+ for (i = 0; i < sizeof (rail_info)/sizeof (rail_info[0]); i++) -+ remove_proc_entry (rail_info[i].name, dir); -+ -+ sprintf (name, "rail%d", rail->Number); -+ remove_proc_entry (name, ep_procfs_root); -+} -+ -+#include "quadrics_version.h" -+static char quadrics_version[] = QUADRICS_VERSION; -+ -+void -+ep_procfs_init() -+{ -+ extern int txd_stabilise; -+ extern int MaxSwitchLevels; -+ -+ printk ("ep Module (version %s)\n", quadrics_version); -+ -+ spin_lock_init (&ep_nodeset_lock); -+ init_waitqueue_head (&ep_nodeset_wait); -+ -+ ep_procfs_root = proc_mkdir ("ep", qsnet_procfs_root); -+ ep_config_root = proc_mkdir ("config", ep_procfs_root); -+ -+ qsnet_proc_register_str (ep_procfs_root, "version", quadrics_version, 1); -+ -+ qsnet_proc_register_hex (ep_config_root, "epdebug", &epdebug, 0); -+ qsnet_proc_register_hex (ep_config_root, "epdebug_console", &epdebug_console, 0); -+ qsnet_proc_register_hex (ep_config_root, "epdebug_cmlevel", &epdebug_cmlevel, 0); -+#if ! defined(CONFIG_EP_NO_CHECK_SUM) -+ qsnet_proc_register_hex (ep_config_root, "epdebug_check_sum", &epdebug_check_sum, 0); -+#endif -+ qsnet_proc_register_hex (ep_config_root, "epcomms_forward_limit", &epcomms_forward_limit, 0); -+ qsnet_proc_register_int (ep_config_root, "txd_stabilise", &txd_stabilise, 0); -+ qsnet_proc_register_int (ep_config_root, "assfail_mode", &assfail_mode, 0); -+ qsnet_proc_register_int (ep_config_root, "max_switch_levels", &MaxSwitchLevels, 1); -+ -+ ep_procfs_rcvr_xmtr_init(); -+} -+ -+void -+ep_procfs_fini(void) -+{ -+ ep_procfs_rcvr_xmtr_fini(); -+ -+ remove_proc_entry ("max_switch_levels", ep_config_root); -+ remove_proc_entry ("assfail_mode", ep_config_root); -+ remove_proc_entry ("txd_stabilise", ep_config_root); -+ remove_proc_entry ("epcomms_forward_limit", ep_config_root); -+ -+#if ! defined(CONFIG_EP_NO_CHECK_SUM) -+ remove_proc_entry ("epdebug_check_sum", ep_config_root); -+#endif -+ remove_proc_entry ("epdebug_cmlevel", ep_config_root); -+ remove_proc_entry ("epdebug_console", ep_config_root); -+ remove_proc_entry ("epdebug", ep_config_root); -+ -+ remove_proc_entry ("version", ep_procfs_root); -+ -+ remove_proc_entry ("config", ep_procfs_root); -+ remove_proc_entry ("ep", qsnet_procfs_root); -+ -+ spin_lock_destroy (&ep_nodeset_lock); -+} -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/ep/quadrics_version.h linux-2.6.9/drivers/net/qsnet/ep/quadrics_version.h ---- clean/drivers/net/qsnet/ep/quadrics_version.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/quadrics_version.h 2005-09-07 10:39:49.000000000 -0400 -@@ -0,0 +1 @@ -+#define QUADRICS_VERSION "5.11.3qsnet" -diff -urN clean/drivers/net/qsnet/ep/railhints.c linux-2.6.9/drivers/net/qsnet/ep/railhints.c ---- clean/drivers/net/qsnet/ep/railhints.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/railhints.c 2004-02-06 17:37:06.000000000 -0500 -@@ -0,0 +1,103 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: railhints.c,v 1.5 2004/02/06 22:37:06 david Exp $ $Name: QSNETMODULES-5-11-3_20050907 $" -+/* $Source: /cvs/master/quadrics/epmod/railhints.c,v $*/ -+ -+#include -+ -+#include -+#include -+#include -+ -+#include "debug.h" -+ -+int -+ep_pickRail(EP_RAILMASK railmask) -+{ -+ static volatile int lastGlobal; -+ int i, rnum, last = lastGlobal; -+ -+ /* Pick a single rail out of the railmask */ -+ for (i = 0; i < EP_MAX_RAILS; i++) -+ if (railmask & (1 << ((last + i) % EP_MAX_RAILS))) -+ break; -+ -+ if (i == EP_MAX_RAILS) -+ return (-1); -+ -+ rnum = (last + i) % EP_MAX_RAILS; -+ -+ lastGlobal = (rnum + 1) % EP_MAX_RAILS; -+ -+ ASSERT (railmask & (1 << rnum)); -+ -+ return (rnum); -+} -+ -+int -+ep_xmtr_bcastrail (EP_XMTR *xmtr, EP_RAILMASK allowedRails) -+{ -+ /* Retrun a single rail out of allowed mask with the best connectivity for broadcast. */ -+ return (ep_pickRail (allowedRails & xmtr->RailMask)); -+} -+ -+int -+ep_xmtr_prefrail (EP_XMTR *xmtr, EP_RAILMASK allowedRails, unsigned nodeId) -+{ -+ EP_NODE *node = &xmtr->Subsys->Subsys.Sys->Nodes[nodeId]; -+ -+ EPRINTF5 (DBG_XMTR, "ep_xmtr_prefrail: xmtr=%p allowedRails=%x nodeId=%d xmtr->RailMaks=%x Connected=%x\n", -+ xmtr, allowedRails, nodeId, xmtr->RailMask, node->ConnectedRails); -+ -+ /* Return a single rail which is currently connected to nodeId (limited to rails -+ * in allowedmask) - if more than one rail is possible, then round-robin between -+ * them */ -+ return (ep_pickRail (allowedRails & xmtr->RailMask & node->ConnectedRails)); -+} -+ -+EP_RAILMASK -+ep_xmtr_availrails (EP_XMTR *xmtr) -+{ -+ /* Return which rails can be used to transmit one. */ -+ -+ return (xmtr->RailMask); -+} -+ -+EP_RAILMASK -+ep_xmtr_noderails (EP_XMTR *xmtr, unsigned nodeId) -+{ -+ EP_NODE *node = &xmtr->Subsys->Subsys.Sys->Nodes[nodeId]; -+ -+ /* Return which rails can be used to transmit to this node. */ -+ -+ return (xmtr->RailMask & node->ConnectedRails); -+} -+ -+int -+ep_rcvr_prefrail (EP_RCVR *rcvr, EP_RAILMASK allowedRails) -+{ -+ /* Return the "best" rail for queueing a receive buffer out on - this will be a -+ * rail with ThreadWaiting set or the rail with the least descriptors queued -+ * on it. */ -+ -+ return (ep_pickRail (allowedRails & rcvr->RailMask)); -+} -+ -+EP_RAILMASK -+ep_rcvr_availrails (EP_RCVR *rcvr) -+{ -+ /* Return which rails can be used to queue receive buffers. */ -+ return (rcvr->RailMask); -+} -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/ep/rmap.c linux-2.6.9/drivers/net/qsnet/ep/rmap.c ---- clean/drivers/net/qsnet/ep/rmap.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/rmap.c 2004-05-19 06:24:38.000000000 -0400 -@@ -0,0 +1,365 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: rmap.c,v 1.15 2004/05/19 10:24:38 david Exp $" -+/* $Source: /cvs/master/quadrics/epmod/rmap.c,v $ */ -+ -+#include -+#include -+ -+#include "debug.h" -+ -+void -+ep_display_rmap (EP_RMAP *mp) -+{ -+ EP_RMAP_ENTRY *bp; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&mp->m_lock, flags); -+ ep_debugf (DBG_DEBUG, "map: %s size %d free %d\n", mp->m_name, mp->m_size, mp->m_free); -+ for (bp = &mp->m_map[0]; bp->m_size; bp++) -+ ep_debugf (DBG_DEBUG, " [%lx - %lx]\n", bp->m_addr, bp->m_addr+bp->m_size-1); -+ spin_unlock_irqrestore (&mp->m_lock, flags); -+} -+ -+void -+ep_mapinit (EP_RMAP *mp, char *name, u_int mapsize) -+{ -+ spin_lock_init (&mp->m_lock); -+ kcondvar_init (&mp->m_wait); -+ -+ /* The final segment in the array has size 0 and acts as a delimiter -+ * we insure that we never use segments past the end of the array by -+ * maintaining a free segment count in m_free. When excess segments -+ * occur we discard some resources */ -+ -+ mp->m_size = mapsize; -+ mp->m_free = mapsize; -+ mp->m_name = name; -+ -+ bzero (mp->m_map, sizeof (EP_RMAP_ENTRY) * (mapsize+1)); -+} -+ -+EP_RMAP * -+ep_rmallocmap (size_t mapsize, char *name, int cansleep) -+{ -+ EP_RMAP *mp; -+ -+ KMEM_ZALLOC (mp, EP_RMAP *, sizeof (EP_RMAP) + mapsize*sizeof (EP_RMAP_ENTRY), cansleep); -+ -+ if (mp != NULL) -+ ep_mapinit (mp, name, mapsize); -+ -+ return (mp); -+} -+ -+void -+ep_rmfreemap (EP_RMAP *mp) -+{ -+ spin_lock_destroy (&mp->m_lock); -+ kcondvar_destroy (&mp->m_wait); -+ -+ KMEM_FREE (mp, sizeof (EP_RMAP) + mp->m_size * sizeof (EP_RMAP_ENTRY)); -+} -+ -+static u_long -+ep_rmalloc_locked (EP_RMAP *mp, size_t size) -+{ -+ EP_RMAP_ENTRY *bp; -+ u_long addr; -+ -+ ASSERT (size > 0); -+ ASSERT (SPINLOCK_HELD (&mp->m_lock)); -+ -+ for (bp = &mp->m_map[0]; bp->m_size; bp++) -+ { -+ if (bp->m_size >= size) -+ { -+ addr = bp->m_addr; -+ bp->m_addr += size; -+ -+ if ((bp->m_size -= size) == 0) -+ { -+ /* taken all of this slot - so shift the map down */ -+ do { -+ bp++; -+ (bp-1)->m_addr = bp->m_addr; -+ } while (((bp-1)->m_size = bp->m_size) != 0); -+ -+ mp->m_free++; -+ } -+ return (addr); -+ } -+ } -+ -+ return (0); -+} -+ -+u_long -+ep_rmalloc (EP_RMAP *mp, size_t size, int cansleep) -+{ -+ unsigned long addr; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&mp->m_lock, flags); -+ while ((addr = ep_rmalloc_locked (mp, size)) == 0 && cansleep) -+ { -+ mp->m_want = 1; -+ kcondvar_wait (&mp->m_wait, &mp->m_lock, &flags); -+ } -+ -+ spin_unlock_irqrestore (&mp->m_lock, flags); -+ -+ return (addr); -+} -+ -+ -+ -+u_long -+ep_rmalloc_constrained (EP_RMAP *mp, size_t size, u_long alo, u_long ahi, u_long align, int cansleep) -+{ -+ EP_RMAP_ENTRY *bp, *bp2, *lbp; -+ unsigned long addr=0; -+ size_t delta; -+ int ok; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&mp->m_lock, flags); -+ again: -+ for (bp = &mp->m_map[0]; bp->m_size; bp++) -+ { -+ delta = 0; -+ -+ if (alo < bp->m_addr) -+ { -+ addr = bp->m_addr; -+ -+ if (addr & (align-1)) -+ addr = (addr + (align-1)) & ~(align-1); -+ -+ delta = addr - bp->m_addr; -+ -+ if (ahi >= bp->m_addr + bp->m_size) -+ ok = (bp->m_size >= (size + delta)); -+ else -+ ok = ((bp->m_addr + size + delta) <= ahi); -+ } -+ else -+ { -+ addr = alo; -+ if (addr & (align-1)) -+ addr = (addr + (align-1)) & ~(align-1); -+ delta = addr - bp->m_addr; -+ -+ if (ahi >= bp->m_addr + bp->m_size) -+ ok = ((alo + size + delta) <= (bp->m_addr + bp->m_size)); -+ else -+ ok = ((alo + size + delta) <= ahi); -+ } -+ -+ if (ok) -+ break; -+ } -+ -+ if (bp->m_size == 0) -+ { -+ if (cansleep) -+ { -+ mp->m_want = 1; -+ kcondvar_wait (&mp->m_wait, &mp->m_lock, &flags); -+ goto again; -+ } -+ spin_unlock_irqrestore (&mp->m_lock, flags); -+ return (0); -+ } -+ -+ /* found an approriate map entry - so take the bit out which we want */ -+ if (bp->m_addr == addr) -+ { -+ if (bp->m_size == size) -+ { -+ /* allocate entire segment and compress map */ -+ bp2 = bp; -+ while (bp2->m_size) -+ { -+ bp2++; -+ (bp2-1)->m_addr = bp2->m_addr; -+ (bp2-1)->m_size = bp2->m_size; -+ } -+ mp->m_free++; -+ } -+ else -+ { -+ /* take from start of segment */ -+ bp->m_addr += size; -+ bp->m_size -= size; -+ } -+ } -+ else -+ { -+ if (bp->m_addr + bp->m_size == addr + size) -+ { -+ /* take from end of segment */ -+ bp->m_size -= size; -+ } -+ else -+ { -+ /* split the segment loosing the last entry if there's no space */ -+ if (mp->m_free == 0) -+ { -+ /* find last map entry */ -+ for (lbp = bp; lbp->m_size != 0; lbp++) -+ ; -+ lbp--; -+ -+ if (lbp->m_size > (lbp-1)->m_size) -+ lbp--; -+ -+ printk ("%s: lost resource map entry [%lx, %lx]\n", -+ mp->m_name, lbp->m_addr, lbp->m_addr + lbp->m_size); -+ -+ *lbp = *(lbp+1); -+ (lbp+1)->m_size = 0; -+ -+ mp->m_free++; -+ } -+ -+ for (bp2 = bp; bp2->m_size != 0; bp2++) -+ continue; -+ -+ for (bp2--; bp2 > bp; bp2--) -+ { -+ (bp2+1)->m_addr = bp2->m_addr; -+ (bp2+1)->m_size = bp2->m_size; -+ } -+ -+ mp->m_free--; -+ -+ (bp+1)->m_addr = addr + size; -+ (bp+1)->m_size = bp->m_addr + bp->m_size - (addr + size); -+ bp->m_size = addr - bp->m_addr; -+ } -+ } -+ -+ spin_unlock_irqrestore (&mp->m_lock, flags); -+ return (addr); -+} -+ -+void -+ep_rmfree (EP_RMAP *mp, size_t size, u_long addr) -+{ -+ EP_RMAP_ENTRY *bp; -+ unsigned long t; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&mp->m_lock, flags); -+ -+ ASSERT (addr != 0 && size > 0); -+ -+again: -+ /* find the piece of the map which starts after the returned space -+ * or the end of the map */ -+ for (bp = &mp->m_map[0]; bp->m_addr <= addr && bp->m_size != 0; bp++) -+ ; -+ -+ /* bp points to the piece to the right of where we want to go */ -+ -+ if (bp > &mp->m_map[0] && (bp-1)->m_addr + (bp-1)->m_size >= addr) -+ { -+ /* merge with piece on the left */ -+ -+ ASSERT ((bp-1)->m_addr + (bp-1)->m_size <= addr); -+ -+ (bp-1)->m_size += size; -+ -+ ASSERT (bp->m_size == 0 || addr+size <= bp->m_addr); -+ -+ if (bp->m_size && (addr + size) == bp->m_addr) -+ { -+ /* merge witht he piece on the right by -+ * growing the piece on the left and shifting -+ * the map down */ -+ -+ ASSERT ((addr + size) <= bp->m_addr); -+ -+ (bp-1)->m_size += bp->m_size; -+ while (bp->m_size) -+ { -+ bp++; -+ (bp-1)->m_addr = bp->m_addr; -+ (bp-1)->m_size = bp->m_size; -+ } -+ -+ mp->m_free++; -+ } -+ } -+ else if (addr + size >= bp->m_addr && bp->m_size) -+ { -+ /* merge with piece to the right */ -+ -+ ASSERT ((addr + size) <= bp->m_addr); -+ -+ bp->m_addr -= size; -+ bp->m_size += size; -+ } -+ else -+ { -+ /* doesn't join with left or right - check for map -+ overflow and discard the smallest of the last or -+ next to last entries */ -+ -+ if (mp->m_free == 0) -+ { -+ EP_RMAP_ENTRY *lbp; -+ -+ /* find last map entry */ -+ for (lbp = bp; lbp->m_size != 0; lbp++) -+ ; -+ lbp--; -+ -+ if (lbp->m_size > (lbp-1)->m_size) -+ lbp--; -+ -+ printk ("%s: lost resource map entry [%lx, %lx]\n", -+ mp->m_name, lbp->m_addr, lbp->m_addr + lbp->m_size); -+ -+ *lbp = *(lbp+1); -+ (lbp+1)->m_size = 0; -+ -+ mp->m_free++; -+ goto again; -+ } -+ -+ /* make a new entry and push the remaining ones up */ -+ do { -+ t = bp->m_addr; -+ bp->m_addr = addr; -+ addr = t; -+ t = bp->m_size; -+ bp->m_size = size; -+ bp++; -+ } while ((size = t) != 0); -+ -+ mp->m_free--; -+ } -+ -+ /* if anyone blocked on rmalloc failure, wake 'em up */ -+ if (mp->m_want) -+ { -+ mp->m_want = 0; -+ kcondvar_wakeupall (&mp->m_wait, &mp->m_lock); -+ } -+ -+ spin_unlock_irqrestore (&mp->m_lock, flags); -+} -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/ep/spinlock_elan3_thread.c linux-2.6.9/drivers/net/qsnet/ep/spinlock_elan3_thread.c ---- clean/drivers/net/qsnet/ep/spinlock_elan3_thread.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/spinlock_elan3_thread.c 2003-10-07 09:22:38.000000000 -0400 -@@ -0,0 +1,44 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: spinlock_elan3_thread.c,v 1.9 2003/10/07 13:22:38 david Exp $" -+/* $Source: /cvs/master/quadrics/epmod/spinlock_elan3_thread.c,v $ */ -+ -+#include -+ -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+ -+#include "kcomm_elan3.h" -+#include "epcomms_elan3.h" -+ -+void -+ep3_spinblock (EP3_SPINLOCK_ELAN *sle, EP3_SPINLOCK_MAIN *sl) -+{ -+ do { -+ sl->sl_seq = sle->sl_seq; /* Release my lock */ -+ -+ while (sle->sl_lock) /* Wait until the main */ -+ c_break(); /* releases the lock */ -+ -+ sle->sl_seq++; /* and try and relock */ -+ } while (sle->sl_lock); -+} -+ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/ep/statemap.c linux-2.6.9/drivers/net/qsnet/ep/statemap.c ---- clean/drivers/net/qsnet/ep/statemap.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/statemap.c 2004-09-01 12:13:43.000000000 -0400 -@@ -0,0 +1,385 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: statemap.c,v 1.12 2004/09/01 16:13:43 david Exp $" -+/* $Source: /cvs/master/quadrics/epmod/statemap.c,v $ */ -+ -+#include -+#include -+ -+/******************************** global state bitmap stuff **********************************/ -+static int -+statemap_setmapbit (bitmap_t *map, int offset, int bit) -+{ -+ bitmap_t *e = &map[offset >> BT_ULSHIFT]; -+ bitmap_t mask = ((bitmap_t)1) << (offset & BT_ULMASK); -+ int rc = ((*e) & mask) != 0; -+ -+ if (bit) -+ { -+ *e |= mask; -+ return (!rc); -+ } -+ -+ *e &= ~mask; -+ return (rc); -+} -+ -+static int -+statemap_firstsegbit (bitmap_t seg) -+{ -+ int bit = 0; -+ -+ if (seg == 0) -+ return (-1); -+ -+#if (BT_ULSHIFT == 6) -+ if ((seg & 0xffffffffL) == 0) -+ { -+ seg >>= 32; -+ bit += 32; -+ } -+#elif (BT_ULSHIFT != 5) -+# error "Unexpected value of BT_ULSHIFT" -+#endif -+ -+ if ((seg & 0xffff) == 0) -+ { -+ seg >>= 16; -+ bit += 16; -+ } -+ -+ if ((seg & 0xff) == 0) -+ { -+ seg >>= 8; -+ bit += 8; -+ } -+ -+ if ((seg & 0xf) == 0) -+ { -+ seg >>= 4; -+ bit += 4; -+ } -+ -+ if ((seg & 0x3) == 0) -+ { -+ seg >>= 2; -+ bit += 2; -+ } -+ -+ return (((seg & 0x1) == 0) ? bit + 1 : bit); -+} -+ -+bitmap_t -+statemap_getseg (statemap_t *map, unsigned int offset) -+{ -+ ASSERT (offset < map->size); -+ ASSERT ((offset & BT_ULMASK) == 0); -+ -+ return (map->bitmap[offset >> BT_ULSHIFT]); -+} -+ -+void -+statemap_setseg (statemap_t *map, unsigned int offset, bitmap_t seg) -+{ -+ ASSERT (offset < map->size); -+ ASSERT ((offset & BT_ULMASK) == 0); -+ -+ offset >>= BT_ULSHIFT; -+ if (map->bitmap[offset] == seg) -+ return; -+ -+ map->bitmap[offset] = seg; -+ -+ if (statemap_setmapbit (map->changemap2, offset, 1) && -+ statemap_setmapbit (map->changemap1, offset >>= BT_ULSHIFT, 1)) -+ statemap_setmapbit (map->changemap0, offset >>= BT_ULSHIFT, 1); -+} -+ -+bitmap_t -+statemap_getbits (statemap_t *map, unsigned int offset, int nbits) -+{ -+ int index = offset >> BT_ULSHIFT; -+ bitmap_t mask = (nbits == BT_NBIPUL) ? (bitmap_t) -1 : (((bitmap_t)1) << nbits) - 1; -+ -+ ASSERT (nbits <= BT_NBIPUL); -+ ASSERT (offset + nbits <= map->size); -+ -+ offset &= BT_ULMASK; -+ if (offset + nbits <= BT_NBIPUL) -+ return ((map->bitmap[index] >> offset) & mask); -+ -+ return (((map->bitmap[index] >> offset) | -+ (map->bitmap[index + 1] << (BT_NBIPUL - offset))) & mask); -+} -+ -+void -+statemap_setbits (statemap_t *map, unsigned int offset, bitmap_t bits, int nbits) -+{ -+ int index = offset >> BT_ULSHIFT; -+ bitmap_t mask; -+ bitmap_t seg; -+ bitmap_t newseg; -+ -+ ASSERT (nbits <= BT_NBIPUL); -+ ASSERT (offset + nbits <= map->size); -+ -+ offset &= BT_ULMASK; -+ if (offset + nbits <= BT_NBIPUL) -+ { -+ mask = ((nbits == BT_NBIPUL) ? -1 : ((((bitmap_t)1) << nbits) - 1)) << offset; -+ seg = map->bitmap[index]; -+ newseg = ((bits << offset) & mask) | (seg & ~mask); -+ -+ if (seg == newseg) -+ return; -+ -+ map->bitmap[index] = newseg; -+ -+ if (statemap_setmapbit (map->changemap2, index, 1) && -+ statemap_setmapbit (map->changemap1, index >>= BT_ULSHIFT, 1)) -+ statemap_setmapbit (map->changemap0, index >>= BT_ULSHIFT, 1); -+ return; -+ } -+ -+ mask = ((bitmap_t)-1) << offset; -+ seg = map->bitmap[index]; -+ newseg = ((bits << offset) & mask) | (seg & ~mask); -+ -+ if (seg != newseg) -+ { -+ map->bitmap[index] = newseg; -+ -+ if (statemap_setmapbit (map->changemap2, index, 1) && -+ statemap_setmapbit (map->changemap1, index >> BT_ULSHIFT, 1)) -+ statemap_setmapbit (map->changemap0, index >> (2 * BT_ULSHIFT), 1); -+ } -+ -+ index++; -+ offset = BT_NBIPUL - offset; -+ mask = (((bitmap_t)1) << (nbits - offset)) - 1; -+ seg = map->bitmap[index]; -+ newseg = ((bits >> offset) & mask) | (seg & ~mask); -+ -+ if (seg == newseg) -+ return; -+ -+ map->bitmap[index] = newseg; -+ -+ if (statemap_setmapbit (map->changemap2, index, 1) && -+ statemap_setmapbit (map->changemap1, index >>= BT_ULSHIFT, 1)) -+ statemap_setmapbit (map->changemap0, index >>= BT_ULSHIFT, 1); -+} -+ -+void -+statemap_zero (statemap_t *dst) -+{ -+ int size = dst->size; -+ int offset = 0; -+ bitmap_t *changemap0 = dst->changemap0; -+ bitmap_t *changemap1 = dst->changemap1; -+ bitmap_t *changemap2 = dst->changemap2; -+ bitmap_t *dstmap = dst->bitmap; -+ bitmap_t bit0; -+ bitmap_t bit1; -+ bitmap_t bit2; -+ -+ for (bit0 = 1; offset < size; bit0 <<= 1, changemap1++) -+ { -+ for (bit1 = 1; bit1 != 0 && offset < size; bit1 <<= 1, changemap2++) -+ { -+ for (bit2 = 1; bit2 != 0 && offset < size; bit2 <<= 1, dstmap++, offset += BT_NBIPUL) -+ { -+ *dstmap = 0; -+ *changemap2 |= bit2; -+ } -+ *changemap1 |= bit1; -+ } -+ *changemap0 |= bit0; -+ } -+} -+ -+void -+statemap_setmap (statemap_t *dst, statemap_t *src) -+{ -+ int size = dst->size; -+ int offset = 0; -+ bitmap_t *changemap0 = dst->changemap0; -+ bitmap_t *changemap1 = dst->changemap1; -+ bitmap_t *changemap2 = dst->changemap2; -+ bitmap_t *dstmap = dst->bitmap; -+ bitmap_t *srcmap = src->bitmap; -+ bitmap_t bit0; -+ bitmap_t bit1; -+ bitmap_t bit2; -+ -+ ASSERT (src->size == size); -+ -+ for (bit0 = 1; offset < size; bit0 <<= 1, changemap1++) -+ { -+ for (bit1 = 1; bit1 != 0 && offset < size; bit1 <<= 1, changemap2++) -+ { -+ for (bit2 = 1; bit2 != 0 && offset < size; bit2 <<= 1, dstmap++, srcmap++, offset += BT_NBIPUL) -+ if (*dstmap != *srcmap) -+ { -+ *dstmap = *srcmap; -+ *changemap2 |= bit2; -+ } -+ if (*changemap2 != 0) -+ *changemap1 |= bit1; -+ } -+ if (*changemap1 != 0) -+ *changemap0 |= bit0; -+ } -+} -+ -+void -+statemap_ormap (statemap_t *dst, statemap_t *src) -+{ -+ int size = dst->size; -+ int offset = 0; -+ bitmap_t *changemap0 = dst->changemap0; -+ bitmap_t *changemap1 = dst->changemap1; -+ bitmap_t *changemap2 = dst->changemap2; -+ bitmap_t *dstmap = dst->bitmap; -+ bitmap_t *srcmap = src->bitmap; -+ bitmap_t bit0; -+ bitmap_t bit1; -+ bitmap_t bit2; -+ bitmap_t seg; -+ -+ ASSERT (src->size == size); -+ -+ for (bit0 = 1; offset < size; bit0 <<= 1, changemap1++) -+ { -+ for (bit1 = 1; bit1 != 0 && offset < size; bit1 <<= 1, changemap2++) -+ { -+ for (bit2 = 1; bit2 != 0 && offset < size; bit2 <<= 1, dstmap++, srcmap++, offset += BT_NBIPUL) -+ { -+ seg = *dstmap | *srcmap; -+ if (*dstmap != seg) -+ { -+ *dstmap = seg; -+ *changemap2 |= bit2; -+ } -+ } -+ if (*changemap2 != 0) -+ *changemap1 |= bit1; -+ } -+ if (*changemap1 != 0) -+ *changemap0 |= bit0; -+ } -+} -+ -+int -+statemap_findchange (statemap_t *map, bitmap_t *newseg, int clearchange) -+{ -+ int bit0; -+ bitmap_t *cm1; -+ int bit1; -+ bitmap_t *cm2; -+ int bit2; -+ unsigned int offset; -+ -+ bit0 = statemap_firstsegbit (*(map->changemap0)); -+ if (bit0 < 0) -+ return (-1); -+ -+ offset = bit0; -+ cm1 = map->changemap1 + offset; -+ bit1 = statemap_firstsegbit (*cm1); -+ ASSERT (bit1 >= 0); -+ -+ offset = (offset << BT_ULSHIFT) + bit1; -+ cm2 = map->changemap2 + offset; -+ bit2 = statemap_firstsegbit (*cm2); -+ ASSERT (bit2 >= 0); -+ -+ offset = (offset << BT_ULSHIFT) + bit2; -+ *newseg = map->bitmap[offset]; -+ -+ if (clearchange && -+ (*cm2 &= ~(((bitmap_t)1) << bit2)) == 0 && -+ (*cm1 &= ~(((bitmap_t)1) << bit1)) == 0) -+ map->changemap0[0] &= ~(((bitmap_t)1) << bit0); -+ -+ return (offset << BT_ULSHIFT); -+} -+ -+int -+statemap_changed (statemap_t *map) -+{ -+ return ((*(map->changemap0) != 0)); -+} -+ -+void -+statemap_reset (statemap_t *map) -+{ -+ bzero (map->changemap0, map->changemap_nob + map->bitmap_nob); -+} -+ -+void -+statemap_copy (statemap_t *dst, statemap_t *src) -+{ -+ ASSERT (dst->size == src->size); -+ bcopy (src->changemap0, dst->changemap0, src->changemap_nob + src->bitmap_nob); -+} -+ -+void -+statemap_clearchanges (statemap_t *map) -+{ -+ if (statemap_changed (map)) -+ bzero (map->changemap0, map->changemap_nob); -+} -+ -+bitmap_t * -+statemap_tobitmap (statemap_t *map) -+{ -+ return (map->bitmap); -+} -+ -+statemap_t * -+statemap_create (int size) -+{ -+ int struct_entries = (sizeof (statemap_t) * 8 + (BT_NBIPUL-1)) >> BT_ULSHIFT; -+ int bitmap_entries = (size + (BT_NBIPUL-1)) >> BT_ULSHIFT; -+ int changemap2_entries = (bitmap_entries + (BT_NBIPUL-1)) >> BT_ULSHIFT; -+ int changemap1_entries = (changemap2_entries + (BT_NBIPUL-1)) >> BT_ULSHIFT; -+ int changemap0_entries = (changemap1_entries + (BT_NBIPUL-1)) >> BT_ULSHIFT; -+ int changemap_entries = changemap0_entries + changemap1_entries + changemap2_entries; -+ int nob = (struct_entries + bitmap_entries + changemap_entries) * sizeof (bitmap_t); -+ statemap_t *map; -+ -+ ASSERT ((1 << BT_ULSHIFT) == BT_NBIPUL); -+ ASSERT (changemap0_entries == 1); -+ -+ KMEM_ZALLOC (map, statemap_t *, nob, 1); -+ -+ map->size = size; -+ map->nob = nob; -+ map->changemap_nob = changemap_entries * sizeof (bitmap_t); -+ map->bitmap_nob = bitmap_entries * sizeof (bitmap_t); -+ map->changemap0 = ((bitmap_t *)map) + struct_entries; -+ map->changemap1 = map->changemap0 + changemap0_entries; -+ map->changemap2 = map->changemap1 + changemap1_entries; -+ map->bitmap = map->changemap2 + changemap2_entries; -+ -+ return (map); -+} -+ -+void -+statemap_destroy (statemap_t *map) -+{ -+ KMEM_FREE (map, map->nob); -+} -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/ep/statusmon.h linux-2.6.9/drivers/net/qsnet/ep/statusmon.h ---- clean/drivers/net/qsnet/ep/statusmon.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/statusmon.h 2003-10-07 09:22:38.000000000 -0400 -@@ -0,0 +1,44 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: statusmon.h,v 1.6 2003/10/07 13:22:38 david Exp $" -+/* $Source: /cvs/master/quadrics/epmod/statusmon.h,v $*/ -+ -+#ifndef __ELAN3_STATUSMON_H -+#define __ELAN3_STATUSMON_H -+ -+typedef struct statusmon_node -+{ -+ u_int NodeId; -+ u_int State; -+} STATUSMON_SGMT; -+ -+typedef struct statusmon_level -+{ -+ unsigned Width; -+ STATUSMON_SGMT Nodes[CM_SGMTS_PER_LEVEL]; -+} STATUSMON_LEVEL; -+ -+typedef struct statusmon_msg -+{ -+ unsigned Type; -+ unsigned NodeId; -+ unsigned NumLevels; -+ unsigned TopLevel; -+ unsigned Role; -+ STATUSMON_LEVEL Levels[CM_MAX_LEVELS]; -+} STATUSMON_MSG; -+ -+ -+#endif /* __ELAN3_STATUSMON_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/ep/support.c linux-2.6.9/drivers/net/qsnet/ep/support.c ---- clean/drivers/net/qsnet/ep/support.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/support.c 2004-09-30 10:59:15.000000000 -0400 -@@ -0,0 +1,109 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: support.c,v 1.39 2004/09/30 14:59:15 david Exp $" -+/* $Source: /cvs/master/quadrics/epmod/support.c,v $ */ -+ -+#include -+#include -+ -+/****************************************************************************************/ -+/* -+ * Nodeset/flush callbacks. -+ */ -+int -+ep_register_callback (EP_RAIL *rail, unsigned idx, void (*routine)(void *, statemap_t *), void *arg) -+{ -+ EP_CALLBACK *cb; -+ -+ KMEM_ALLOC (cb, EP_CALLBACK *, sizeof (EP_CALLBACK), 1); -+ -+ cb->Routine = routine; -+ cb->Arg = arg; -+ -+ kmutex_lock (&rail->CallbackLock); -+ cb->Next = rail->CallbackList[idx]; -+ rail->CallbackList[idx] = cb; -+ kmutex_unlock (&rail->CallbackLock); -+ -+ return (ESUCCESS); -+} -+ -+void -+ep_remove_callback (EP_RAIL *rail, unsigned idx, void (*routine)(void *, statemap_t *), void *arg) -+{ -+ EP_CALLBACK *cb; -+ EP_CALLBACK **predp; -+ -+ kmutex_lock (&rail->CallbackLock); -+ for (predp = &rail->CallbackList[idx]; (cb = *predp); predp = &cb->Next) -+ if (cb->Routine == routine && cb->Arg == arg) -+ break; -+ -+ if (cb == NULL) -+ panic ("ep_remove_member_callback"); -+ -+ *predp = cb->Next; -+ kmutex_unlock (&rail->CallbackLock); -+ -+ KMEM_FREE (cb, sizeof (EP_CALLBACK)); -+} -+ -+void -+ep_call_callbacks (EP_RAIL *rail, unsigned idx, statemap_t *map) -+{ -+ EP_CALLBACK *cb; -+ -+ kmutex_lock (&rail->CallbackLock); -+ -+ rail->CallbackStep = idx; -+ -+ for (cb = rail->CallbackList[idx]; cb; cb = cb->Next) { -+ (cb->Routine) (cb->Arg, map); -+ } -+ kmutex_unlock (&rail->CallbackLock); -+} -+ -+unsigned int -+ep_backoff (EP_BACKOFF *backoff, int type) -+{ -+ static int bcount[EP_NUM_BACKOFF] = {1, 16, 32, 64, 128, 256, 512, 1024}; -+ -+ if (backoff->type != type) -+ { -+ backoff->type = type; -+ backoff->indx = 0; -+ backoff->count = 0; -+ } -+ -+ if (++backoff->count > bcount[backoff->indx] && backoff->indx < (EP_NUM_BACKOFF-1)) -+ { -+ backoff->indx++; -+ backoff->count = 0; -+ } -+ -+ return (backoff->indx); -+} -+ -+/* Generic checksum algorithm */ -+uint16_t -+CheckSum (char *msg, int nob) -+{ -+ uint16_t sum = 0; -+ -+ while (nob-- > 0) -+ sum = sum * 13 + *msg++; -+ -+ return (sum); -+} -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/ep/support_elan3.c linux-2.6.9/drivers/net/qsnet/ep/support_elan3.c ---- clean/drivers/net/qsnet/ep/support_elan3.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/support_elan3.c 2005-07-20 07:35:37.000000000 -0400 -@@ -0,0 +1,2123 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: support_elan3.c,v 1.47.2.1 2005/07/20 11:35:37 mike Exp $" -+/* $Source: /cvs/master/quadrics/epmod/support_elan3.c,v $ */ -+ -+#include -+#include -+ -+#include -+#include -+#include -+ -+#include "kcomm_vp.h" -+#include "kcomm_elan3.h" -+#include "epcomms_elan3.h" -+#include "debug.h" -+ -+#include -+#include -+ -+/****************************************************************************************/ -+#define DMA_RING_NEXT_POS(ring) ((ring)->Position+1 == ring->Entries ? 0 : ((ring)->Position+1)) -+#define DMA_RING_PREV_POS(ring,pos) ((pos) == 0 ? (ring)->Entries-1 : (pos) - 1) -+ -+static int -+DmaRingCreate (EP3_RAIL *rail, EP3_DMA_RING *ring, int ctxnum, int entries) -+{ -+ unsigned long pgnum = (ctxnum * sizeof (E3_CommandPort)) / PAGE_SIZE; -+ unsigned long pgoff = (ctxnum * sizeof (E3_CommandPort)) & (PAGE_SIZE-1); -+ int s; -+ -+ /* set up the initial position */ -+ ring->Entries = entries; -+ ring->Position = 0; -+ -+ if (! (ring->pEvent = ep_alloc_elan (&rail->Generic, entries * sizeof (E3_BlockCopyEvent), 0, &ring->epEvent))) -+ { -+ ring->CommandPort = (ioaddr_t) NULL; -+ return (ENOMEM); -+ } -+ -+ if (! (ring->pDma = ep_alloc_elan (&rail->Generic, entries * sizeof (E3_DMA), 0, &ring->epDma))) -+ { -+ ep_free_elan (&rail->Generic, ring->epEvent, entries * sizeof (E3_BlockCopyEvent)); -+ -+ ring->CommandPort = (ioaddr_t) NULL; -+ return (ENOMEM); -+ } -+ -+ if (! (ring->pDoneBlk = ep_alloc_main (&rail->Generic, entries * sizeof (E3_uint32), 0, &ring->epDoneBlk))) -+ { -+ ep_free_elan (&rail->Generic, ring->epEvent, entries * sizeof (E3_BlockCopyEvent)); -+ ep_free_elan (&rail->Generic, ring->epDma, entries * sizeof (E3_DMA)); -+ -+ ring->CommandPort = (ioaddr_t) NULL; -+ return (ENOMEM); -+ } -+ -+ if (MapDeviceRegister (rail->Device, ELAN3_BAR_COMMAND_PORT, &ring->CommandPage, pgnum * PAGE_SIZE, PAGE_SIZE, &ring->CommandPageHandle) != ESUCCESS) -+ { -+ ep_free_elan (&rail->Generic, ring->epEvent, entries * sizeof (E3_BlockCopyEvent)); -+ ep_free_elan (&rail->Generic, ring->epDma, entries * sizeof (E3_DMA)); -+ ep_free_main (&rail->Generic, ring->epDoneBlk, entries * sizeof (E3_uint32)); -+ -+ ring->CommandPort = (ioaddr_t) NULL; -+ return (ENOMEM); -+ } -+ ring->CommandPort = ring->CommandPage + pgoff; -+ -+ for (s = 0; s < entries; s++) -+ { -+ /* setup the event */ -+ elan3_sdram_writel(rail->Device, DMA_RING_EVENT(ring,s) + offsetof(E3_BlockCopyEvent,ev_Type), -+ EV_TYPE_BCOPY | EV_TYPE_DMA | DMA_RING_DMA_ELAN(ring, s)); -+ elan3_sdram_writel(rail->Device, DMA_RING_EVENT(ring,s) + offsetof(E3_BlockCopyEvent,ev_Source), DMA_RING_DMA_ELAN(ring,s) | EV_WCOPY); -+ elan3_sdram_writel(rail->Device, DMA_RING_EVENT(ring,s) + offsetof(E3_BlockCopyEvent,ev_Dest), DMA_RING_DONE_ELAN(ring,s) | EV_TYPE_BCOPY_WORD ); -+ -+ /* need to set all the doneBlks to appear that they have completed */ -+ ring->pDoneBlk[s] = DMA_RING_DMA_ELAN(ring,s) | EV_WCOPY; -+ } -+ -+ return 0; /* success */ -+} -+ -+static void -+DmaRingRelease(EP3_RAIL *rail, EP3_DMA_RING *ring) -+{ -+ if (ring->CommandPage != (ioaddr_t) 0) -+ { -+ UnmapDeviceRegister(rail->Device, &ring->CommandPageHandle); -+ -+ ep_free_elan (&rail->Generic, ring->epEvent, ring->Entries * sizeof (E3_BlockCopyEvent)); -+ ep_free_elan (&rail->Generic, ring->epDma, ring->Entries * sizeof (E3_DMA)); -+ ep_free_main (&rail->Generic, ring->epDoneBlk, ring->Entries * sizeof (E3_uint32)); -+ } -+ ring->CommandPage = (ioaddr_t) 0; -+} -+ -+void -+DmaRingsRelease (EP3_RAIL *rail) -+{ -+ DmaRingRelease (rail, &rail->DmaRings[EP3_RING_CRITICAL]); -+ DmaRingRelease (rail, &rail->DmaRings[EP3_RING_HIGH_PRI]); -+ DmaRingRelease (rail, &rail->DmaRings[EP3_RING_LOW_PRI]); -+} -+ -+int -+DmaRingsCreate (EP3_RAIL *rail) -+{ -+ if (DmaRingCreate (rail, &rail->DmaRings[EP3_RING_CRITICAL], ELAN3_DMARING_BASE_CONTEXT_NUM + EP3_RING_CRITICAL, EP3_RING_CRITICAL_LEN) || -+ DmaRingCreate (rail, &rail->DmaRings[EP3_RING_HIGH_PRI], ELAN3_DMARING_BASE_CONTEXT_NUM + EP3_RING_HIGH_PRI, EP3_RING_HIGH_PRI_LEN) || -+ DmaRingCreate (rail, &rail->DmaRings[EP3_RING_LOW_PRI], ELAN3_DMARING_BASE_CONTEXT_NUM + EP3_RING_LOW_PRI, EP3_RING_LOW_PRI_LEN)) -+ { -+ DmaRingsRelease (rail); -+ return (ENOMEM); -+ } -+ -+ return 0; -+} -+ -+static int -+DmaRingNextSlot (EP3_DMA_RING *ring) -+{ -+ int pos = ring->Position; -+ int npos = DMA_RING_NEXT_POS(ring); -+ -+ if (ring->pDoneBlk[npos] == EP3_EVENT_ACTIVE) -+ return (-1); -+ -+ ring->pDoneBlk[pos] = EP3_EVENT_ACTIVE; -+ -+ ring->Position = npos; /* move on one */ -+ -+ return (pos); -+} -+ -+ -+/****************************************************************************************/ -+/* -+ * Dma/event command issueing - these handle cproc queue overflow traps. -+ */ -+static int -+DmaRunQueueSizeCheck (EP3_RAIL *rail, E3_uint32 len) -+{ -+ E3_uint64 FandBPtr = read_reg64 (rail->Device, DProc_SysCntx_FPtr); -+ E3_uint32 FPtr, BPtr; -+ E3_uint32 qlen; -+ -+#if (BYTE_ORDER == LITTLE_ENDIAN) || defined(__LITTLE_ENDIAN__) -+ FPtr = (FandBPtr & 0xFFFFFFFFull); -+ BPtr = (FandBPtr >> 32); -+#else -+ FPtr = (FandBPtr >> 32); -+ BPtr = (FandBPtr & 0xFFFFFFFFull); -+#endif -+ -+ qlen = (((BPtr - FPtr)/sizeof (E3_DMA)) & (E3_SysCntxQueueSize-1)); -+ -+ if (qlen < 4) IncrStat (rail, DmaQueueLength[0]); -+ else if (qlen < 8) IncrStat (rail, DmaQueueLength[1]); -+ else if (qlen < 16) IncrStat (rail, DmaQueueLength[2]); -+ else if (qlen < 32) IncrStat (rail, DmaQueueLength[3]); -+ else if (qlen < 64) IncrStat (rail, DmaQueueLength[4]); -+ else if (qlen < 128) IncrStat (rail, DmaQueueLength[5]); -+ else if (qlen < 240) IncrStat (rail, DmaQueueLength[6]); -+ else IncrStat (rail, DmaQueueLength[7]); -+ -+ return (qlen < len); -+} -+ -+int -+IssueDma (EP3_RAIL *rail, E3_DMA_BE * dmabe, int type, int retryThread) -+{ -+ ELAN3_DEV *dev = rail->Device; -+ EP3_RETRY_DMA *retry; -+ EP3_DMA_RING *ring; -+ int slot; -+ int i, res; -+ unsigned long flags; -+ -+ ASSERT (dmabe->s.dma_direction == DMA_WRITE || dmabe->s.dma_direction == DMA_READ_REQUEUE); -+ -+ ASSERT (! EP_VP_ISDATA(dmabe->s.dma_destVProc) || -+ (dmabe->s.dma_direction == DMA_WRITE ? -+ EP_VP_TO_NODE(dmabe->s.dma_srcVProc) == rail->Generic.Position.pos_nodeid : -+ EP_VP_TO_NODE(dmabe->s.dma_destVProc) == rail->Generic.Position.pos_nodeid)); -+ -+ /* -+ * If we're not the retry thread - then don't issue this DMA -+ * if there are any already queued on the retry lists with -+ * higher or equal priority than this one that are ready to -+ * retry. -+ */ -+ if (! retryThread) -+ { -+ for (i = EP_RETRY_BASE; i < type; i++) -+ { -+ if (list_empty (&rail->DmaRetries[i])) -+ continue; -+ -+ retry = list_entry (rail->DmaRetries[i].next, EP3_RETRY_DMA, Link); -+ -+ if (AFTER (lbolt, retry->RetryTime)) -+ { -+ IncrStat (rail, IssueDmaFail[type]); -+ return (ISSUE_COMMAND_RETRY); -+ } -+ } -+ } -+ -+ /* -+ * Depending on the type of DMA we're issuing - throttle back -+ * issueing of it if the DMA run queue is too full. This then -+ * prioritises the "special" messages and completing data -+ * transfers which have matched a receive buffer. -+ */ -+ -+ if (type >= EP_RETRY_LOW_PRI_RETRY) -+ { -+ if (! DmaRunQueueSizeCheck (rail, E3_SysCntxQueueSize / 2)) -+ { -+ IncrStat (rail, IssueDmaFail[type]); -+ return (ISSUE_COMMAND_RETRY); -+ } -+ ring = &rail->DmaRings[EP3_RING_LOW_PRI]; -+ } -+ else if (type == EP_RETRY_LOW_PRI) -+ { -+ if (! DmaRunQueueSizeCheck (rail, E3_SysCntxQueueSize / 3)) -+ { -+ IncrStat (rail, IssueDmaFail[type]); -+ return (ISSUE_COMMAND_RETRY); -+ } -+ ring = &rail->DmaRings[EP3_RING_LOW_PRI]; -+ } -+ else if (type >= EP_RETRY_HIGH_PRI) -+ ring = &rail->DmaRings[EP3_RING_HIGH_PRI]; -+ else -+ ring = &rail->DmaRings[EP3_RING_CRITICAL]; -+ -+ local_irq_save (flags); -+ if (! spin_trylock (&dev->CProcLock)) -+ { -+ IncrStat (rail, IssueDmaFail[type]); -+ -+ res = ISSUE_COMMAND_RETRY; -+ } -+ else -+ { -+ if ((slot = DmaRingNextSlot (ring)) == -1) -+ { -+ IncrStat (rail, IssueDmaFail[type]); -+ -+ res = ISSUE_COMMAND_RETRY; -+ } -+ else -+ { -+ EPRINTF4 (DBG_COMMAND, "IssueDma: type %08x size %08x Elan source %08x Elan dest %08x\n", -+ dmabe->s.dma_type, dmabe->s.dma_size, dmabe->s.dma_source, dmabe->s.dma_dest); -+ EPRINTF2 (DBG_COMMAND, " dst event %08x cookie/proc %08x\n", -+ dmabe->s.dma_destEvent, dmabe->s.dma_destCookieVProc); -+ EPRINTF2 (DBG_COMMAND, " src event %08x cookie/proc %08x\n", -+ dmabe->s.dma_srcEvent, dmabe->s.dma_srcCookieVProc); -+ -+ elan3_sdram_copyq_to_sdram (dev, dmabe, DMA_RING_DMA(ring, slot), sizeof (E3_DMA)); /* PCI write block */ -+ elan3_sdram_writel (dev, DMA_RING_EVENT(ring, slot) + offsetof (E3_BlockCopyEvent, ev_Count), 1); /* PCI write */ -+ -+ mb(); /* ensure writes to main memory completed */ -+ writel (DMA_RING_EVENT_ELAN(ring,slot), (void *)(ring->CommandPort + offsetof (E3_CommandPort, SetEvent))); -+ mmiob(); /* and flush through IO writes */ -+ -+ res = ISSUE_COMMAND_OK; -+ } -+ spin_unlock (&dev->CProcLock); -+ } -+ local_irq_restore (flags); -+ -+ return (res); -+} -+ -+int -+IssueWaitevent (EP3_RAIL *rail, E3_Addr value) -+{ -+ ELAN3_DEV *dev = rail->Device; -+ int res; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&dev->IntrLock, flags); -+ -+ ASSERT (rail->CommandPortEventTrap == FALSE); -+ -+ /* -+ * Disable the command processor interrupts, so that we don't see -+ * spurious interrupts appearing. -+ */ -+ DISABLE_INT_MASK (dev, INT_CProc | INT_ComQueue); -+ -+ EPRINTF1 (DBG_COMMAND, "IssueWaitevent: %08x\n", value); -+ -+ mb(); /* ensure writes to main memory completed */ -+ writel (value, (void *)(rail->CommandPort + offsetof (E3_CommandPort, WaitEvent0))); -+ mmiob(); /* and flush through IO writes */ -+ -+ do { -+ res = CheckCommandQueueFlushed (rail->Ctxt, EventComQueueNotEmpty, ISSUE_COMMAND_CANT_WAIT, &flags); -+ -+ EPRINTF1 (DBG_COMMAND, "IssueWaitevent: CheckCommandQueueFlushed -> %d\n", res); -+ -+ if (res == ISSUE_COMMAND_WAIT) -+ HandleCProcTrap (dev, 0, NULL); -+ } while (res != ISSUE_COMMAND_OK); -+ -+ if (! rail->CommandPortEventTrap) -+ res = ISSUE_COMMAND_OK; -+ else -+ { -+ rail->CommandPortEventTrap = FALSE; -+ res = ISSUE_COMMAND_TRAPPED; -+ } -+ -+ EPRINTF1 (DBG_COMMAND, "IssueWaitevent: -> %d\n", res); -+ -+ /* -+ * Re-enable the command processor interrupt as we've finished -+ * polling it. -+ */ -+ ENABLE_INT_MASK (dev, INT_CProc | INT_ComQueue); -+ -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+ -+ return (res); -+} -+ -+void -+IssueSetevent (EP3_RAIL *rail, E3_Addr value) -+{ -+ EPRINTF1 (DBG_COMMAND, "IssueSetevent: %08x\n", value); -+ -+ mb(); /* ensure writes to main memory completed */ -+ writel (value, (void *)(rail->CommandPort + offsetof (E3_CommandPort, SetEvent))); -+ mmiob(); /* and flush through IO writes */ -+} -+ -+void -+IssueRunThread (EP3_RAIL *rail, E3_Addr value) -+{ -+ EPRINTF1 (DBG_COMMAND, "IssueRunThread: %08x\n", value); -+ -+ mb(); /* ensure writes to main memory completed */ -+ writel (value, (void *)(rail->CommandPort + offsetof (E3_CommandPort, RunThread))); -+ mmiob(); /* and flush through IO writes */ -+} -+ -+/****************************************************************************************/ -+/* -+ * DMA retry list management -+ */ -+static unsigned DmaRetryTimes[EP_NUM_RETRIES]; -+ -+static void -+ep3_dma_retry (EP3_RAIL *rail) -+{ -+ EP3_COOKIE *cp; -+ int res; -+ int vp; -+ unsigned long flags; -+ int i; -+ -+ kernel_thread_init("ep3_dma_retry"); -+ -+ spin_lock_irqsave (&rail->DmaRetryLock, flags); -+ -+ for (;;) -+ { -+ long yieldAt = lbolt + (hz/10); -+ long retryTime = 0; -+ -+ if (rail->DmaRetryThreadShouldStop) -+ break; -+ -+ for (i = EP_RETRY_BASE; i < EP_NUM_RETRIES; i++) -+ { -+ while (! list_empty (&rail->DmaRetries[i])) -+ { -+ EP3_RETRY_DMA *retry = list_entry (rail->DmaRetries[i].next, EP3_RETRY_DMA, Link); -+ -+ if (! AFTER (lbolt, retry->RetryTime)) -+ break; -+ -+ if (rail->DmaRetryThreadShouldStall || AFTER (lbolt, yieldAt)) -+ goto cant_do_more; -+ -+ EPRINTF2 (DBG_RETRY, "%s: DmaRetryThread: retry %p\n", rail->Generic.Name, retry); -+ EPRINTF5 (DBG_RETRY, "%s: %08x %08x %08x %08x\n", -+ rail->Generic.Name, retry->Dma.s.dma_type, retry->Dma.s.dma_size, retry->Dma.s.dma_source, retry->Dma.s.dma_dest); -+ EPRINTF5 (DBG_RETRY, "%s: %08x %08x %08x %08x\n", -+ rail->Generic.Name, retry->Dma.s.dma_destEvent, retry->Dma.s.dma_destCookieVProc, -+ retry->Dma.s.dma_srcEvent, retry->Dma.s.dma_srcCookieVProc); -+#if defined(DEBUG) -+ if (retry->Dma.s.dma_direction == DMA_WRITE) -+ cp = LookupEventCookie (rail, &rail->CookieTable, retry->Dma.s.dma_srcEvent); -+ else -+ cp = LookupEventCookie (rail, &rail->CookieTable, retry->Dma.s.dma_destEvent); -+ -+ ASSERT (cp != NULL || (retry->Dma.s.dma_srcEvent == 0 && retry->Dma.s.dma_direction == DMA_WRITE && retry->Dma.s.dma_isRemote)); -+ -+ if (cp && cp->Operations->DmaVerify) -+ cp->Operations->DmaVerify (rail, cp->Arg, &retry->Dma); -+#endif -+ -+#if defined(DEBUG_ASSERT) -+ if (retry->Dma.s.dma_direction == DMA_WRITE) -+ vp = retry->Dma.s.dma_destVProc; -+ else -+ vp = retry->Dma.s.dma_srcVProc; -+ -+ ASSERT (!EP_VP_ISDATA(vp) || -+ (rail->Generic.Nodes[EP_VP_TO_NODE(vp)].State >= EP_NODE_CONNECTED && -+ rail->Generic.Nodes[EP_VP_TO_NODE(vp)].State <= EP_NODE_LOCAL_PASSIVATE)); -+#endif -+ spin_unlock_irqrestore (&rail->DmaRetryLock, flags); -+ res = IssueDma (rail, &(retry->Dma), i, TRUE); -+ spin_lock_irqsave (&rail->DmaRetryLock, flags); -+ -+ if (res != ISSUE_COMMAND_OK) -+ goto cant_do_more; -+ -+ /* Command issued, so remove from list, and add to free list */ -+ list_del (&retry->Link); -+ list_add (&retry->Link, &rail->DmaRetryFreeList); -+ } -+ } -+ cant_do_more: -+ -+ for (i = EP_RETRY_BASE; i < EP_NUM_RETRIES; i++) -+ { -+ if (!list_empty (&rail->DmaRetries[i])) -+ { -+ EP3_RETRY_DMA *retry = list_entry (rail->DmaRetries[i].next, EP3_RETRY_DMA, Link); -+ -+ retryTime = retryTime ? MIN(retryTime, retry->RetryTime) : retry->RetryTime; -+ } -+ } -+ -+ if (retryTime && !AFTER (retryTime, lbolt)) -+ retryTime = lbolt + 1; -+ -+ do { -+ EPRINTF3 (DBG_RETRY, "%s: ep_cm_retry: %s %lx\n", rail->Generic.Name, rail->DmaRetryThreadShouldStall ? "stalled" : "sleeping", retryTime); -+ -+ if (rail->DmaRetryTime == 0 || (retryTime != 0 && retryTime < rail->DmaRetryTime)) -+ rail->DmaRetryTime = retryTime; -+ -+ rail->DmaRetrySleeping = TRUE; -+ -+ if (rail->DmaRetryThreadShouldStall) /* wakeup threads waiting in StallDmaRetryThread */ -+ kcondvar_wakeupall (&rail->DmaRetryWait, &rail->DmaRetryLock); /* for us to really go to sleep for good. */ -+ -+ if (rail->DmaRetryTime == 0 || rail->DmaRetryThreadShouldStall) -+ kcondvar_wait (&rail->DmaRetryWait, &rail->DmaRetryLock, &flags); -+ else -+ kcondvar_timedwait (&rail->DmaRetryWait, &rail->DmaRetryLock, &flags, rail->DmaRetryTime); -+ -+ rail->DmaRetrySleeping = FALSE; -+ -+ } while (rail->DmaRetryThreadShouldStall); -+ -+ rail->DmaRetryTime = 0; -+ } -+ -+ rail->DmaRetryThreadStopped = 1; -+ kcondvar_wakeupall (&rail->DmaRetryWait, &rail->DmaRetryLock); -+ spin_unlock_irqrestore (&rail->DmaRetryLock, flags); -+ -+ kernel_thread_exit(); -+} -+ -+void -+StallDmaRetryThread (EP3_RAIL *rail) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave (&rail->DmaRetryLock, flags); -+ rail->DmaRetryThreadShouldStall++; -+ -+ while (! rail->DmaRetrySleeping) -+ kcondvar_wait (&rail->DmaRetryWait, &rail->DmaRetryLock, &flags); -+ spin_unlock_irqrestore (&rail->DmaRetryLock, flags); -+} -+ -+void -+ResumeDmaRetryThread (EP3_RAIL *rail) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave (&rail->DmaRetryLock, flags); -+ -+ ASSERT (rail->DmaRetrySleeping); -+ -+ if (--rail->DmaRetryThreadShouldStall == 0) -+ { -+ rail->DmaRetrySleeping = 0; -+ kcondvar_wakeupone (&rail->DmaRetryWait, &rail->DmaRetryLock); -+ } -+ spin_unlock_irqrestore (&rail->DmaRetryLock, flags); -+} -+ -+int -+InitialiseDmaRetries (EP3_RAIL *rail) -+{ -+ int i; -+ -+ spin_lock_init (&rail->DmaRetryLock); -+ kcondvar_init (&rail->DmaRetryWait); -+ -+ for (i = 0; i < EP_NUM_RETRIES; i++) -+ INIT_LIST_HEAD (&rail->DmaRetries[i]); -+ -+ INIT_LIST_HEAD (&rail->DmaRetryFreeList); -+ -+ DmaRetryTimes[EP_RETRY_HIGH_PRI] = EP_RETRY_HIGH_PRI_TIME; -+ -+ for (i =0 ; i < EP_NUM_BACKOFF; i++) -+ DmaRetryTimes[EP_RETRY_HIGH_PRI_RETRY+i] = EP_RETRY_HIGH_PRI_TIME << i; -+ -+ DmaRetryTimes[EP_RETRY_LOW_PRI] = EP_RETRY_LOW_PRI_TIME; -+ -+ for (i =0 ; i < EP_NUM_BACKOFF; i++) -+ DmaRetryTimes[EP_RETRY_LOW_PRI_RETRY+i] = EP_RETRY_LOW_PRI_TIME << i; -+ -+ DmaRetryTimes[EP_RETRY_ANONYMOUS] = EP_RETRY_ANONYMOUS_TIME; -+ DmaRetryTimes[EP_RETRY_NETERR] = EP_RETRY_NETERR_TIME; -+ -+ rail->DmaRetryInitialised = 1; -+ -+ if (kernel_thread_create (ep3_dma_retry, (void *) rail) == 0) -+ { -+ spin_lock_destroy (&rail->DmaRetryLock); -+ return (ENOMEM); -+ } -+ -+ rail->DmaRetryThreadStarted = 1; -+ -+ return (ESUCCESS); -+} -+ -+void -+DestroyDmaRetries (EP3_RAIL *rail) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave (&rail->DmaRetryLock, flags); -+ rail->DmaRetryThreadShouldStop = 1; -+ while (rail->DmaRetryThreadStarted && !rail->DmaRetryThreadStopped) -+ { -+ kcondvar_wakeupall (&rail->DmaRetryWait, &rail->DmaRetryLock); -+ kcondvar_wait (&rail->DmaRetryWait, &rail->DmaRetryLock, &flags); -+ } -+ rail->DmaRetryThreadStarted = 0; -+ rail->DmaRetryThreadStopped = 0; -+ rail->DmaRetryThreadShouldStop = 0; -+ rail->DmaRetryInitialised = 0; -+ -+ spin_unlock_irqrestore (&rail->DmaRetryLock, flags); -+ -+ /* Everyone should have given back their retry dma's by now */ -+ ASSERT (rail->DmaRetryReserved == 0); -+ -+ while (! list_empty (&rail->DmaRetryFreeList)) -+ { -+ EP3_RETRY_DMA *retry = list_entry (rail->DmaRetryFreeList.next, EP3_RETRY_DMA, Link); -+ -+ list_del (&retry->Link); -+ -+ KMEM_FREE (retry, sizeof (EP3_RETRY_DMA)); -+ } -+ -+ kcondvar_destroy (&rail->DmaRetryWait); -+ spin_lock_destroy (&rail->DmaRetryLock); -+} -+ -+int -+ReserveDmaRetries (EP3_RAIL *rail, int count, EP_ATTRIBUTE attr) -+{ -+ EP3_RETRY_DMA *retry; -+ int remaining = count; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&rail->DmaRetryLock, flags); -+ -+ if (remaining <= (rail->DmaRetryCount - rail->DmaRetryReserved)) -+ { -+ rail->DmaRetryReserved += remaining; -+ -+ spin_unlock_irqrestore (&rail->DmaRetryLock, flags); -+ return (ESUCCESS); -+ } -+ -+ remaining -= (rail->DmaRetryCount - rail->DmaRetryReserved); -+ -+ rail->DmaRetryReserved = rail->DmaRetryCount; -+ -+ spin_unlock_irqrestore (&rail->DmaRetryLock, flags); -+ -+ while (remaining) -+ { -+ KMEM_ALLOC (retry, EP3_RETRY_DMA *, sizeof (EP3_RETRY_DMA), !(attr & EP_NO_SLEEP)); -+ -+ if (retry == NULL) -+ goto failed; -+ -+ /* clear E3_DMA */ -+ bzero((char *)(&(retry->Dma.s)), sizeof(E3_DMA)); -+ -+ remaining--; -+ -+ spin_lock_irqsave (&rail->DmaRetryLock, flags); -+ -+ list_add (&retry->Link, &rail->DmaRetryFreeList); -+ -+ rail->DmaRetryCount++; -+ rail->DmaRetryReserved++; -+ -+ spin_unlock_irqrestore (&rail->DmaRetryLock, flags); -+ } -+ return (ESUCCESS); -+ -+ failed: -+ spin_lock_irqsave (&rail->DmaRetryLock, flags); -+ rail->DmaRetryReserved -= (count - remaining); -+ spin_unlock_irqrestore (&rail->DmaRetryLock, flags); -+ return (ENOMEM); -+} -+ -+void -+ReleaseDmaRetries (EP3_RAIL *rail, int count) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave (&rail->DmaRetryLock, flags); -+ rail->DmaRetryReserved -= count; -+ spin_unlock_irqrestore (&rail->DmaRetryLock, flags); -+} -+ -+void -+QueueDmaForRetry (EP3_RAIL *rail, E3_DMA_BE *dma, int interval) -+{ -+ EP3_RETRY_DMA *retry; -+ unsigned long flags; -+ -+ /* -+ * When requeueing DMAs they must never be "READ" dma's since -+ * these would fetch the DMA descriptor from the retryn descriptor -+ */ -+ ASSERT (dma->s.dma_direction == DMA_WRITE || dma->s.dma_direction == DMA_READ_REQUEUE); -+ ASSERT (dma->s.dma_direction == DMA_WRITE ? -+ EP_VP_TO_NODE(dma->s.dma_srcVProc) == rail->Generic.Position.pos_nodeid : -+ EP_VP_TO_NODE(dma->s.dma_destVProc) == rail->Generic.Position.pos_nodeid); -+ -+ spin_lock_irqsave (&rail->DmaRetryLock, flags); -+ -+ EP_ASSERT (&rail->Generic, !list_empty (&rail->DmaRetryFreeList)); -+ -+ /* take an item of the free list */ -+ retry = list_entry (rail->DmaRetryFreeList.next, EP3_RETRY_DMA, Link); -+ -+ list_del (&retry->Link); -+ -+ EPRINTF5 (DBG_RETRY, "%s: QueueDmaForRetry: %08x %08x %08x %08x\n", rail->Generic.Name, -+ dma->s.dma_type, dma->s.dma_size, dma->s.dma_source, dma->s.dma_dest); -+ EPRINTF5 (DBG_RETRY, "%s: %08x %08x %08x %08x\n",rail->Generic.Name, -+ dma->s.dma_destEvent, dma->s.dma_destCookieVProc, -+ dma->s.dma_srcEvent, dma->s.dma_srcCookieVProc); -+ -+ /* copy the DMA into the retry descriptor */ -+ retry->Dma.s.dma_type = dma->s.dma_type; -+ retry->Dma.s.dma_size = dma->s.dma_size; -+ retry->Dma.s.dma_source = dma->s.dma_source; -+ retry->Dma.s.dma_dest = dma->s.dma_dest; -+ retry->Dma.s.dma_destEvent = dma->s.dma_destEvent; -+ retry->Dma.s.dma_destCookieVProc = dma->s.dma_destCookieVProc; -+ retry->Dma.s.dma_srcEvent = dma->s.dma_srcEvent; -+ retry->Dma.s.dma_srcCookieVProc = dma->s.dma_srcCookieVProc; -+ -+ retry->RetryTime = lbolt + DmaRetryTimes[interval]; -+ -+ /* chain onto the end of the approriate retry list */ -+ list_add_tail (&retry->Link, &rail->DmaRetries[interval]); -+ -+ /* now wakeup the retry thread */ -+ if (rail->DmaRetryTime == 0 || retry->RetryTime < rail->DmaRetryTime) -+ rail->DmaRetryTime = retry->RetryTime; -+ -+ if (rail->DmaRetrySleeping && !rail->DmaRetryThreadShouldStall) -+ { -+ rail->DmaRetrySleeping = 0; -+ kcondvar_wakeupone (&rail->DmaRetryWait, &rail->DmaRetryLock); -+ } -+ -+ spin_unlock_irqrestore (&rail->DmaRetryLock, flags); -+} -+ -+void -+QueueDmaOnStalledList (EP3_RAIL *rail, E3_DMA_BE *dma) -+{ -+ EP_NODE_RAIL *nodeRail = &rail->Generic.Nodes[dma->s.dma_direction == DMA_WRITE ? -+ EP_VP_TO_NODE(dma->s.dma_srcVProc) : -+ EP_VP_TO_NODE(dma->s.dma_destVProc)]; -+ EP3_RETRY_DMA *retry; -+ unsigned long flags; -+ -+ /* -+ * When requeueing DMAs they must never be "READ" dma's since -+ * these would fetch the DMA descriptor from the retryn descriptor -+ */ -+ ASSERT (dma->s.dma_direction == DMA_WRITE || dma->s.dma_direction == DMA_READ_REQUEUE); -+ ASSERT (dma->s.dma_direction == DMA_WRITE ? -+ EP_VP_TO_NODE(dma->s.dma_srcVProc) == rail->Generic.Position.pos_nodeid : -+ EP_VP_TO_NODE(dma->s.dma_destVProc) == rail->Generic.Position.pos_nodeid); -+ -+ spin_lock_irqsave (&rail->DmaRetryLock, flags); -+ -+ EP_ASSERT (&rail->Generic, !list_empty (&rail->DmaRetryFreeList)); -+ -+ /* take an item of the free list */ -+ retry = list_entry (rail->DmaRetryFreeList.next, EP3_RETRY_DMA, Link); -+ -+ list_del (&retry->Link); -+ -+ EPRINTF5 (DBG_RETRY, "%s: QueueDmaOnStalledList: %08x %08x %08x %08x\n", rail->Generic.Name, -+ dma->s.dma_type, dma->s.dma_size, dma->s.dma_source, dma->s.dma_dest); -+ EPRINTF5 (DBG_RETRY, "%s: %08x %08x %08x %08x\n", rail->Generic.Name, -+ dma->s.dma_destEvent, dma->s.dma_destCookieVProc, -+ dma->s.dma_srcEvent, dma->s.dma_srcCookieVProc); -+ -+ /* copy the DMA into the retry descriptor */ -+ retry->Dma.s.dma_type = dma->s.dma_type; -+ retry->Dma.s.dma_size = dma->s.dma_size; -+ retry->Dma.s.dma_source = dma->s.dma_source; -+ retry->Dma.s.dma_dest = dma->s.dma_dest; -+ retry->Dma.s.dma_destEvent = dma->s.dma_destEvent; -+ retry->Dma.s.dma_destCookieVProc = dma->s.dma_destCookieVProc; -+ retry->Dma.s.dma_srcEvent = dma->s.dma_srcEvent; -+ retry->Dma.s.dma_srcCookieVProc = dma->s.dma_srcCookieVProc; -+ -+ /* chain onto the node cancelled dma list */ -+ list_add_tail (&retry->Link, &nodeRail->StalledDmas); -+ -+ spin_unlock_irqrestore (&rail->DmaRetryLock, flags); -+} -+ -+void -+FreeStalledDmas (EP3_RAIL *rail, unsigned int nodeId) -+{ -+ EP_NODE_RAIL *nodeRail = &rail->Generic.Nodes[nodeId]; -+ struct list_head *el, *nel; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&rail->DmaRetryLock, flags); -+ list_for_each_safe (el, nel, &nodeRail->StalledDmas) { -+ list_del (el); -+ list_add (el, &rail->DmaRetryFreeList); -+ } -+ spin_unlock_irqrestore (&rail->DmaRetryLock, flags); -+} -+ -+/****************************************************************************************/ -+/* -+ * Connection management. -+ */ -+static void -+DiscardingHaltOperation (ELAN3_DEV *dev, void *arg) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) arg; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&dev->IntrLock, flags); -+ rail->HaltOpCompleted = 1; -+ kcondvar_wakeupall (&rail->HaltOpSleep, &dev->IntrLock); -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+} -+ -+typedef struct { -+ EP3_RAIL *rail; -+ sdramaddr_t qaddr; -+} SetQueueFullData; -+ -+static void -+SetQueueLockedOperation (ELAN3_DEV *dev, void *arg) -+{ -+ SetQueueFullData *data = (SetQueueFullData *) arg; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&dev->IntrLock, flags); -+ -+ elan3_sdram_writel (dev, data->qaddr, E3_QUEUE_LOCKED | elan3_sdram_readl(dev, data->qaddr)); -+ -+ data->rail->HaltOpCompleted = 1; -+ kcondvar_wakeupall (&data->rail->HaltOpSleep, &dev->IntrLock); -+ -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+} -+ -+static void -+FlushDmaQueuesHaltOperation (ELAN3_DEV *dev, void *arg) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) arg; -+ sdramaddr_t FPtr, BPtr; -+ sdramaddr_t Base, Top; -+ E3_DMA_BE dma; -+ EP_NODE_RAIL *node; -+ int vp; -+ unsigned long flags; -+ -+ ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProc.s.FSR)) == 0); -+ ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData0.s.FSR.Status)) == 0); -+ ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData1.s.FSR.Status)) == 0); -+ ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData2.s.FSR.Status)) == 0); -+ ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData3.s.FSR.Status)) == 0); -+ -+ FPtr = read_reg32 (dev, DProc_SysCntx_FPtr); -+ BPtr = read_reg32 (dev, DProc_SysCntx_BPtr); -+ Base = dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxDmaQueue[0]); -+ Top = dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxDmaQueue[E3_SysCntxQueueSize-1]); -+ -+ while (FPtr != BPtr) -+ { -+ elan3_sdram_copyq_from_sdram (dev, FPtr, &dma, sizeof (E3_DMA_BE)); -+ -+ EPRINTF5 (DBG_DISCON, "%s: FlushDmaQueuesHaltOperation: %08x %08x %08x %08x\n", rail->Generic.Name, -+ dma.s.dma_type, dma.s.dma_size, dma.s.dma_source, dma.s.dma_dest); -+ EPRINTF5 (DBG_DISCON, "%s: %08x %08x %08x %08x\n", rail->Generic.Name, -+ dma.s.dma_destEvent, dma.s.dma_destCookieVProc, -+ dma.s.dma_srcEvent, dma.s.dma_srcCookieVProc); -+ -+ ASSERT ((dma.s.dma_u.s.Context & SYS_CONTEXT_BIT) != 0); -+ -+ if (dma.s.dma_direction == DMA_WRITE) -+ vp = dma.s.dma_destVProc; -+ else -+ vp = dma.s.dma_srcVProc; -+ -+ node = &rail->Generic.Nodes[EP_VP_TO_NODE(vp)]; -+ -+ ASSERT (!EP_VP_ISDATA(vp) || (node->State >= EP_NODE_CONNECTED && node->State <= EP_NODE_LOCAL_PASSIVATE)); -+ -+ if (EP_VP_ISDATA(vp) && node->State == EP_NODE_LOCAL_PASSIVATE) -+ { -+ /* -+ * This is a DMA going to the node which is being removed, -+ * so move it onto the node dma list where it will get -+ * handled later. -+ */ -+ EPRINTF1 (DBG_DISCON, "%s: FlushDmaQueuesHaltOperation: move dma to cancelled list\n", rail->Generic.Name); -+ -+ if (dma.s.dma_direction != DMA_WRITE) -+ { -+ /* for read dma's set the DMA_READ_REQUEUE bits as the dma_source has been -+ * modified by the elan to point at the dma in the rxd where it was issued -+ * from */ -+ dma.s.dma_direction = (dma.s.dma_direction & ~DMA_READ) | DMA_READ_REQUEUE; -+ } -+ -+ QueueDmaOnStalledList (rail, &dma); -+ -+ /* -+ * Remove the DMA from the queue by replacing it with one with -+ * zero size and no events. -+ * -+ * NOTE: we must preserve the SYS_CONTEXT_BIT since the Elan uses this -+ * to mark the approriate run queue as empty. -+ */ -+ dma.s.dma_type = (SYS_CONTEXT_BIT << 16); -+ dma.s.dma_size = 0; -+ dma.s.dma_source = (E3_Addr) 0; -+ dma.s.dma_dest = (E3_Addr) 0; -+ dma.s.dma_destEvent = (E3_Addr) 0; -+ dma.s.dma_destCookieVProc = 0; -+ dma.s.dma_srcEvent = (E3_Addr) 0; -+ dma.s.dma_srcCookieVProc = 0; -+ -+ elan3_sdram_copyq_to_sdram (dev, &dma, FPtr, sizeof (E3_DMA_BE)); -+ } -+ -+ FPtr = (FPtr == Top) ? Base : FPtr + sizeof (E3_DMA); -+ } -+ -+ spin_lock_irqsave (&dev->IntrLock, flags); -+ rail->HaltOpCompleted = 1; -+ kcondvar_wakeupall (&rail->HaltOpSleep, &dev->IntrLock); -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+} -+ -+void -+SetQueueLocked (EP3_RAIL *rail, sdramaddr_t qaddr) -+{ -+ ELAN3_DEV *dev = rail->Device; -+ SetQueueFullData data; -+ unsigned long flags; -+ -+ /* Ensure that the context filter changes have been seen by halting -+ * then restarting the inputters - this also ensures that any setevent -+ * commands used to issue dma's have completed and any trap has been -+ * handled. */ -+ data.rail = rail; -+ data.qaddr = qaddr; -+ -+ kmutex_lock (&rail->HaltOpMutex); -+ spin_lock_irqsave (&dev->IntrLock, flags); -+ QueueHaltOperation (dev, 0, NULL, INT_DiscardingSysCntx | INT_TProcHalted, SetQueueLockedOperation, &data); -+ -+ while (! rail->HaltOpCompleted) -+ kcondvar_wait (&rail->HaltOpSleep, &dev->IntrLock, &flags); -+ rail->HaltOpCompleted = 0; -+ -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+ kmutex_unlock (&rail->HaltOpMutex); -+} -+ -+void -+ep3_flush_filters (EP_RAIL *r) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) r; -+ ELAN3_DEV *dev = rail->Device; -+ unsigned long flags; -+ -+ /* Ensure that the context filter changes have been seen by halting -+ * then restarting the inputters - this also ensures that any setevent -+ * commands used to issue dma's have completed and any trap has been -+ * handled. */ -+ kmutex_lock (&rail->HaltOpMutex); -+ spin_lock_irqsave (&dev->IntrLock, flags); -+ QueueHaltOperation (dev, 0, NULL, INT_DiscardingSysCntx, DiscardingHaltOperation, rail); -+ -+ while (! rail->HaltOpCompleted) -+ kcondvar_wait (&rail->HaltOpSleep, &dev->IntrLock, &flags); -+ rail->HaltOpCompleted = 0; -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+ kmutex_unlock (&rail->HaltOpMutex); -+} -+ -+void -+ep3_flush_queues (EP_RAIL *r) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) r; -+ ELAN3_DEV *dev = rail->Device; -+ struct list_head *el; -+ struct list_head *nel; -+ EP_NODE_RAIL *node; -+ unsigned long flags; -+ int vp, i; -+ -+ ASSERT (NO_LOCKS_HELD); -+ -+ /* First - stall the dma retry thread, so that it will no longer -+ * restart any dma's from the rety lists. */ -+ StallDmaRetryThread (rail); -+ -+ /* Second - queue a halt operation to flush through all DMA's which are executing -+ * or on the run queue. */ -+ kmutex_lock (&rail->HaltOpMutex); -+ spin_lock_irqsave (&dev->IntrLock, flags); -+ QueueHaltOperation (dev, 0, NULL, INT_DProcHalted | INT_TProcHalted, FlushDmaQueuesHaltOperation, rail); -+ while (! rail->HaltOpCompleted) -+ kcondvar_wait (&rail->HaltOpSleep, &dev->IntrLock, &flags); -+ rail->HaltOpCompleted = 0; -+ spin_unlock_irqrestore (&dev->IntrLock, flags); -+ kmutex_unlock (&rail->HaltOpMutex); -+ -+ /* Third - run down the dma retry lists and move all entries to the cancelled -+ * list. Any dma's which were on the run queues have already been -+ * moved there */ -+ spin_lock_irqsave (&rail->DmaRetryLock, flags); -+ for (i = EP_RETRY_BASE; i < EP_NUM_RETRIES; i++) -+ { -+ list_for_each_safe (el, nel, &rail->DmaRetries[i]) { -+ EP3_RETRY_DMA *retry = list_entry (el, EP3_RETRY_DMA, Link); -+ -+ if (retry->Dma.s.dma_direction == DMA_WRITE) -+ vp = retry->Dma.s.dma_destVProc; -+ else -+ vp = retry->Dma.s.dma_srcVProc; -+ -+ node = &rail->Generic.Nodes[EP_VP_TO_NODE(vp)]; -+ -+ ASSERT (!EP_VP_ISDATA(vp) || (node->State >= EP_NODE_CONNECTED && node->State <= EP_NODE_LOCAL_PASSIVATE)); -+ -+ if (EP_VP_ISDATA(vp) && node->State == EP_NODE_LOCAL_PASSIVATE) -+ { -+ EPRINTF5 (DBG_DISCON, "%s: FlushDmaQueues: %08x %08x %08x %08x\n",rail->Generic.Name, -+ retry->Dma.s.dma_type, retry->Dma.s.dma_size, retry->Dma.s.dma_source, retry->Dma.s.dma_dest); -+ EPRINTF5 (DBG_DISCON, "%s: %08x %08x %08x %08x\n", rail->Generic.Name, -+ retry->Dma.s.dma_destEvent, retry->Dma.s.dma_destCookieVProc, -+ retry->Dma.s.dma_srcEvent, retry->Dma.s.dma_srcCookieVProc); -+ -+ list_del (&retry->Link); -+ -+ list_add_tail (&retry->Link, &node->StalledDmas); -+ } -+ } -+ } -+ spin_unlock_irqrestore (&rail->DmaRetryLock, flags); -+ -+ /* Finally - allow the dma retry thread to run again */ -+ ResumeDmaRetryThread (rail); -+} -+ -+/****************************************************************************************/ -+/* NOTE - we require that all cookies are non-zero, which is -+ * achieved because EP_VP_DATA() is non-zero for all -+ * nodes */ -+E3_uint32 -+LocalCookie (EP3_RAIL *rail, unsigned remoteNode) -+{ -+ E3_uint32 cookie; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&rail->CookieLock, flags); -+ cookie = DMA_COOKIE (rail->MainCookies[remoteNode], EP_VP_DATA(rail->Generic.Position.pos_nodeid)); -+ spin_unlock_irqrestore (&rail->CookieLock, flags); -+ -+ /* Main processor cookie for srcCookie - this is what is sent -+ * to the remote node along with the setevent from the put -+ * or the dma descriptor for a get */ -+ return (cookie); -+} -+ -+E3_uint32 -+RemoteCookie (EP3_RAIL *rail, u_int remoteNode) -+{ -+ uint32_t cookie; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&rail->CookieLock, flags); -+ cookie = DMA_REMOTE_COOKIE (rail->MainCookies[remoteNode], EP_VP_DATA(remoteNode)); -+ spin_unlock_irqrestore (&rail->CookieLock, flags); -+ -+ /* Main processor cookie for dstCookie - this is the cookie -+ * that the "remote put" dma uses for it's setevent packets for -+ * a get dma */ -+ -+ return (cookie); -+} -+ -+/****************************************************************************************/ -+/* -+ * Event Cookie management. -+ * -+ * We find the ep_cookie in one of two ways: -+ * 1) for block copy events -+ * the cookie value is stored in the ev_Source - for EVIRQ events -+ * it is also stored in the ev_Type -+ * 2) for normal events -+ * we just use the event address. -+ */ -+void -+InitialiseCookieTable (EP3_COOKIE_TABLE *table) -+{ -+ register int i; -+ -+ spin_lock_init (&table->Lock); -+ -+ for (i = 0; i < EP3_COOKIE_HASH_SIZE; i++) -+ table->Entries[i] = NULL; -+} -+ -+void -+DestroyCookieTable (EP3_COOKIE_TABLE *table) -+{ -+ register int i; -+ -+ for (i = 0; i < EP3_COOKIE_HASH_SIZE; i++) -+ if (table->Entries[i]) -+ printk ("DestroyCookieTable: entry %d not empty\n", i); -+ -+ spin_lock_destroy (&table->Lock); -+} -+ -+void -+RegisterCookie (EP3_COOKIE_TABLE *table, EP3_COOKIE *cp, E3_uint32 cookie, EP3_COOKIE_OPS *ops, void *arg) -+{ -+ EP3_COOKIE *tcp; -+ int hashval = EP3_HASH_COOKIE(cookie); -+ unsigned long flags; -+ -+ spin_lock_irqsave (&table->Lock, flags); -+ -+ cp->Operations = ops; -+ cp->Arg = arg; -+ cp->Cookie = cookie; -+ -+#if defined(DEBUG) -+ /* Check that the cookie is unique */ -+ for (tcp = table->Entries[hashval]; tcp; tcp = tcp->Next) -+ if (tcp->Cookie == cookie) -+ panic ("RegisterEventCookie: non unique cookie\n"); -+#endif -+ cp->Next = table->Entries[hashval]; -+ -+ table->Entries[hashval] = cp; -+ -+ spin_unlock_irqrestore (&table->Lock, flags); -+} -+ -+void -+DeregisterCookie (EP3_COOKIE_TABLE *table, EP3_COOKIE *cp) -+{ -+ EP3_COOKIE **predCookiep; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&table->Lock, flags); -+ -+ for (predCookiep = &table->Entries[EP3_HASH_COOKIE (cp->Cookie)]; *predCookiep; predCookiep = &(*predCookiep)->Next) -+ { -+ if (*predCookiep == cp) -+ { -+ *predCookiep = cp->Next; -+ break; -+ } -+ } -+ -+ spin_unlock_irqrestore (&table->Lock, flags); -+ -+ cp->Operations = NULL; -+ cp->Arg = NULL; -+ cp->Cookie = 0; -+ cp->Next = NULL; -+} -+ -+EP3_COOKIE * -+LookupCookie (EP3_COOKIE_TABLE *table, E3_Addr cookie) -+{ -+ EP3_COOKIE *cp; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&table->Lock, flags); -+ -+ for (cp = table->Entries[EP3_HASH_COOKIE(cookie)]; cp; cp = cp->Next) -+ if (cp->Cookie == cookie) -+ break; -+ -+ spin_unlock_irqrestore (&table->Lock, flags); -+ return (cp); -+} -+ -+EP3_COOKIE * -+LookupEventCookie (EP3_RAIL *rail, EP3_COOKIE_TABLE *table, E3_Addr eaddr) -+{ -+ sdramaddr_t event; -+ E3_uint32 type; -+ -+ if ((event = ep_elan2sdram (&rail->Generic, eaddr)) != (sdramaddr_t) 0) -+ { -+ type = elan3_sdram_readl (rail->Device, event + offsetof (E3_BlockCopyEvent, ev_Type)); -+ -+ if (type & EV_TYPE_BCOPY) -+ return (LookupCookie (table, elan3_sdram_readl (rail->Device, event + offsetof (E3_BlockCopyEvent, ev_Source)) & ~EV_WCOPY)); -+ else -+ return (LookupCookie (table, eaddr)); -+ } -+ -+ return (NULL); -+} -+ -+/****************************************************************************************/ -+/* -+ * Elan context operations - note only support interrupt ops. -+ */ -+static int ep3_event (ELAN3_CTXT *ctxt, E3_uint32 cookie, int flag); -+static int ep3_dprocTrap (ELAN3_CTXT *ctxt, DMA_TRAP *trap); -+static int ep3_tprocTrap (ELAN3_CTXT *ctxt, THREAD_TRAP *trap); -+static int ep3_iprocTrap (ELAN3_CTXT *ctxt, INPUT_TRAP *trap, int chan); -+static int ep3_cprocTrap (ELAN3_CTXT *ctxt, COMMAND_TRAP *trap); -+static int ep3_cprocReissue (ELAN3_CTXT *ctxt, CProcTrapBuf_BE *tbuf); -+ -+static E3_uint8 ep3_load8 (ELAN3_CTXT *ctxt, E3_Addr addr); -+static void ep3_store8 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint8 val); -+static E3_uint16 ep3_load16 (ELAN3_CTXT *ctxt, E3_Addr addr); -+static void ep3_store16 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint16 val); -+static E3_uint32 ep3_load32 (ELAN3_CTXT *ctxt, E3_Addr addr); -+static void ep3_store32 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint32 val); -+static E3_uint64 ep3_load64 (ELAN3_CTXT *ctxt, E3_Addr addr); -+static void ep3_store64 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint64 val); -+ -+ELAN3_OPS ep3_elan3_ops = -+{ -+ ELAN3_OPS_VERSION, /* Version */ -+ -+ NULL, /* Exception */ -+ NULL, /* GetWordItem */ -+ NULL, /* GetBlockItem */ -+ NULL, /* PutWordItem */ -+ NULL, /* PutBlockItem */ -+ NULL, /* PutbackItem */ -+ NULL, /* FreeWordItem */ -+ NULL, /* FreeBlockItem */ -+ NULL, /* CountItems */ -+ ep3_event, /* Event */ -+ NULL, /* SwapIn */ -+ NULL, /* SwapOut */ -+ NULL, /* FreePrivate */ -+ NULL, /* FixupNetworkError */ -+ ep3_dprocTrap, /* DProcTrap */ -+ ep3_tprocTrap, /* TProcTrap */ -+ ep3_iprocTrap, /* IProcTrap */ -+ ep3_cprocTrap, /* CProcTrap */ -+ ep3_cprocReissue, /* CProcReissue */ -+ NULL, /* StartFaultCheck */ -+ NULL, /* EndFaulCheck */ -+ ep3_load8, /* Load8 */ -+ ep3_store8, /* Store8 */ -+ ep3_load16, /* Load16 */ -+ ep3_store16, /* Store16 */ -+ ep3_load32, /* Load32 */ -+ ep3_store32, /* Store32 */ -+ ep3_load64, /* Load64 */ -+ ep3_store64, /* Store64 */ -+}; -+ -+static int -+ep3_event (ELAN3_CTXT *ctxt, E3_uint32 cookie, int flag) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) ctxt->Private; -+ EP3_COOKIE *cp = LookupCookie (&rail->CookieTable, cookie); -+ -+ if (cp == NULL) -+ { -+ printk ("ep3_event: cannot find event cookie for %x\n", cookie); -+ return (OP_HANDLED); -+ } -+ -+ if (cp->Operations->Event) -+ cp->Operations->Event(rail, cp->Arg); -+ -+ return (OP_HANDLED); -+} -+ -+/* Trap interface */ -+int -+ep3_dprocTrap (ELAN3_CTXT *ctxt, DMA_TRAP *trap) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) ctxt->Private; -+ ELAN3_DEV *dev = rail->Device; -+ EP3_COOKIE *cp; -+ E3_FaultSave_BE *FaultArea; -+ E3_uint16 vp; -+ int validTrap; -+ int numFaults; -+ int i; -+ sdramaddr_t event; -+ E3_uint32 type; -+ sdramaddr_t dma; -+ E3_DMA_BE dmabe; -+ int status = EAGAIN; -+ -+ EPRINTF4 (DBG_EPTRAP, "ep3_dprocTrap: WakeupFnt=%x Cntx=%x SuspAddr=%x TrapType=%s\n", -+ trap->Status.s.WakeupFunction, trap->Status.s.Context, -+ trap->Status.s.SuspendAddr, MiToName (trap->Status.s.TrapType)); -+ EPRINTF4 (DBG_EPTRAP, " type %08x size %08x source %08x dest %08x\n", -+ trap->Desc.s.dma_type, trap->Desc.s.dma_size, trap->Desc.s.dma_source, trap->Desc.s.dma_dest); -+ EPRINTF2 (DBG_EPTRAP, " Dest event %08x cookie/proc %08x\n", -+ trap->Desc.s.dma_destEvent, trap->Desc.s.dma_destCookieVProc); -+ EPRINTF2 (DBG_EPTRAP, " Source event %08x cookie/proc %08x\n", -+ trap->Desc.s.dma_srcEvent, trap->Desc.s.dma_srcCookieVProc); -+ -+ ASSERT (trap->Status.s.Context & SYS_CONTEXT_BIT); -+ -+ switch (trap->Status.s.TrapType) -+ { -+ case MI_DmaPacketTimedOutOrPacketError: -+ if (trap->Desc.s.dma_direction == DMA_WRITE) -+ vp = trap->Desc.s.dma_destVProc; -+ else -+ vp = trap->Desc.s.dma_srcVProc; -+ -+ if (! trap->PacketInfo.s.PacketTimeout) -+ status = ETIMEDOUT; -+ else -+ { -+ status = EHOSTDOWN; -+ -+ /* XXXX: dma timedout - might want to "restart" tree ? */ -+ } -+ goto retry_dma; -+ -+ case MI_DmaFailCountError: -+ goto retry_dma; -+ -+ case MI_TimesliceDmaQueueOverflow: -+ IncrStat (rail, DprocDmaQueueOverflow); -+ -+ goto retry_dma; -+ -+ case MI_RemoteDmaCommand: -+ case MI_RunDmaCommand: -+ case MI_DequeueNonSysCntxDma: -+ case MI_DequeueSysCntxDma: -+ /* -+ * The DMA processor has trapped due to outstanding prefetches from the previous -+ * dma. The "current" dma has not been consumed, so we just ignore the trap -+ */ -+ return (OP_HANDLED); -+ -+ case MI_EventQueueOverflow: -+ IncrStat (rail, DprocEventQueueOverflow); -+ -+ if ((event = ep_elan2sdram (&rail->Generic, trap->Desc.s.dma_srcEvent)) != (sdramaddr_t) 0 && -+ ((type = elan3_sdram_readl (dev, event + offsetof(E3_Event,ev_Type))) & EV_TYPE_MASK_EVIRQ) == EV_TYPE_EVIRQ) -+ { -+ spin_unlock (&ctxt->Device->IntrLock); -+ ep3_event (ctxt, (type & ~(EV_TYPE_MASK_EVIRQ | EV_TYPE_MASK_BCOPY)), OP_LWP); -+ spin_lock (&ctxt->Device->IntrLock); -+ } -+ return (OP_HANDLED); -+ -+ case MI_DmaQueueOverflow: -+ IncrStat (rail, DprocDmaQueueOverflow); -+ -+ if ((event = ep_elan2sdram (&rail->Generic, trap->Desc.s.dma_srcEvent)) != (sdramaddr_t) 0 && -+ ((type = elan3_sdram_readl (dev, event + offsetof (E3_Event, ev_Type))) & EV_TYPE_MASK_DMA) == EV_TYPE_DMA && -+ (dma = ep_elan2sdram (&rail->Generic, (type & ~EV_TYPE_MASK2))) != (sdramaddr_t) 0) -+ { -+ elan3_sdram_copyq_from_sdram (dev, dma, &dmabe, sizeof (E3_DMA)); -+ -+ /* We only chain together DMA's of the same direction, so since -+ * we took a DmaQueueOverflow trap - this means that DMA which -+ * trapped was a WRITE dma - hence the one we chain to must also -+ * be a WRITE dma. -+ */ -+ ASSERT (dmabe.s.dma_direction == DMA_WRITE); -+ -+ cp = LookupEventCookie (rail, &rail->CookieTable, dmabe.s.dma_srcEvent); -+ -+#ifdef DEBUG_ASSERT -+ { -+ E3_uint16 vp = dmabe.s.dma_destVProc; -+ EP_NODE_RAIL *nodeRail = &rail->Generic.Nodes[EP_VP_TO_NODE(vp)]; -+ -+ ASSERT (cp != NULL && (!EP_VP_ISDATA(vp) || (nodeRail->State >= EP_NODE_CONNECTED && nodeRail->State <= EP_NODE_LOCAL_PASSIVATE))); -+ } -+#endif -+ cp->Operations->DmaRetry (rail, cp->Arg, &dmabe, EAGAIN); -+ -+ return (OP_HANDLED); -+ } -+ -+ panic ("ep3_dprocTrap\n"); -+ return (OP_HANDLED); -+ -+ default: -+ break; -+ } -+ -+ /* If it's a dma which traps past the end of the source, then */ -+ /* just re-issue it */ -+ numFaults = validTrap = (trap->FaultSave.s.FSR.Status != 0); -+ for (i = 0, FaultArea = &trap->Data0; i < 4; i++, FaultArea++) -+ { -+ if (FaultArea->s.FSR.Status != 0) -+ { -+ numFaults++; -+ -+ /* XXXX: Rev B Elans can prefetch data past the end of the dma descriptor */ -+ /* if the fault relates to this, then just ignore it */ -+ if (FaultArea->s.FaultAddress >= (trap->Desc.s.dma_source+trap->Desc.s.dma_size)) -+ { -+ static int i; -+ if (i < 10 && i++ < 10) -+ printk ("ep3_dprocTrap: Rev B prefetch trap error %08x %08x\n", -+ FaultArea->s.FaultAddress, (trap->Desc.s.dma_source+trap->Desc.s.dma_size)); -+ continue; -+ } -+ -+ validTrap++; -+ } -+ } -+ -+ /* -+ * NOTE: for physical errors (uncorrectable ECC/PCI parity errors) the FSR will -+ * be zero - hence we will not see any faults - and none will be valid, -+ * so only ignore a Rev B prefetch trap if we've seen some faults. Otherwise -+ * we can reissue a DMA which has already sent it's remote event ! -+ */ -+ if (numFaults != 0 && validTrap == 0) -+ { -+ retry_dma: -+ if (trap->Desc.s.dma_direction == DMA_WRITE) -+ { -+ vp = trap->Desc.s.dma_destVProc; -+ cp = LookupEventCookie (rail, &rail->CookieTable, trap->Desc.s.dma_srcEvent); -+ } -+ else -+ { -+ ASSERT (EP3_CONTEXT_ISDATA(trap->Desc.s.dma_queueContext) || trap->Desc.s.dma_direction == DMA_READ_REQUEUE); -+ -+ vp = trap->Desc.s.dma_srcVProc; -+ cp = LookupEventCookie (rail, &rail->CookieTable, trap->Desc.s.dma_destEvent); -+ -+ /* for read dma's set the DMA_READ_REQUEUE bits as the dma_source has been -+ * modified by the elan to point at the dma in the rxd where it was issued -+ * from */ -+ trap->Desc.s.dma_direction = (trap->Desc.s.dma_direction & ~DMA_READ) | DMA_READ_REQUEUE; -+ } -+ -+#ifdef DEBUG_ASSERT -+ { -+ EP_NODE_RAIL *nodeRail = &rail->Generic.Nodes[EP_VP_TO_NODE(vp)]; -+ -+ ASSERT (!EP_VP_ISDATA(vp) || (nodeRail->State >= EP_NODE_CONNECTED && nodeRail->State <= EP_NODE_LOCAL_PASSIVATE)); -+ } -+#endif -+ -+ if (cp != NULL) -+ cp->Operations->DmaRetry (rail, cp->Arg, &trap->Desc, status); -+ else -+ { -+ ASSERT (trap->Desc.s.dma_direction == DMA_WRITE && trap->Desc.s.dma_srcEvent == 0 && trap->Desc.s.dma_isRemote); -+ -+ QueueDmaForRetry (rail, &trap->Desc, EP_RETRY_ANONYMOUS); -+ } -+ -+ return (OP_HANDLED); -+ } -+ -+ printk ("ep3_dprocTrap: WakeupFnt=%x Cntx=%x SuspAddr=%x TrapType=%s\n", -+ trap->Status.s.WakeupFunction, trap->Status.s.Context, -+ trap->Status.s.SuspendAddr, MiToName (trap->Status.s.TrapType)); -+ printk (" FaultAddr=%x EventAddr=%x FSR=%x\n", -+ trap->FaultSave.s.FaultAddress, trap->FaultSave.s.EventAddress, -+ trap->FaultSave.s.FSR.Status); -+ for (i = 0, FaultArea = &trap->Data0; i < 4; i++, FaultArea++) -+ printk (" %d FaultAddr=%x EventAddr=%x FSR=%x\n", i, -+ FaultArea->s.FaultAddress, FaultArea->s.EventAddress, FaultArea->s.FSR.Status); -+ -+ printk (" type %08x size %08x source %08x dest %08x\n", -+ trap->Desc.s.dma_type, trap->Desc.s.dma_size, trap->Desc.s.dma_source, trap->Desc.s.dma_dest); -+ printk (" Dest event %08x cookie/proc %08x\n", -+ trap->Desc.s.dma_destEvent, trap->Desc.s.dma_destCookieVProc); -+ printk (" Source event %08x cookie/proc %08x\n", -+ trap->Desc.s.dma_srcEvent, trap->Desc.s.dma_srcCookieVProc); -+ -+// panic ("ep3_dprocTrap"); -+ -+ return (OP_HANDLED); -+} -+ -+int -+ep3_tprocTrap (ELAN3_CTXT *ctxt, THREAD_TRAP *trap) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) ctxt->Private; -+ -+ EPRINTF6 (DBG_EPTRAP, "ep3_tprocTrap: SP=%08x PC=%08x NPC=%08x DIRTY=%08x TRAP=%08x MI=%s\n", -+ trap->sp, trap->pc, trap->npc, trap->DirtyBits.Bits, trap->TrapBits.Bits, MiToName (trap->mi)); -+ EPRINTF4 (DBG_EPTRAP, " g0=%08x g1=%08x g2=%08x g3=%08x\n", -+ trap->Registers[REG_GLOBALS+(0^WordEndianFlip)], trap->Registers[REG_GLOBALS+(1^WordEndianFlip)], -+ trap->Registers[REG_GLOBALS+(2^WordEndianFlip)], trap->Registers[REG_GLOBALS+(3^WordEndianFlip)]); -+ EPRINTF4 (DBG_EPTRAP, " g4=%08x g5=%08x g6=%08x g7=%08x\n", -+ trap->Registers[REG_GLOBALS+(4^WordEndianFlip)], trap->Registers[REG_GLOBALS+(5^WordEndianFlip)], -+ trap->Registers[REG_GLOBALS+(6^WordEndianFlip)], trap->Registers[REG_GLOBALS+(7^WordEndianFlip)]); -+ EPRINTF4 (DBG_EPTRAP, " o0=%08x o1=%08x o2=%08x o3=%08x\n", -+ trap->Registers[REG_OUTS+(0^WordEndianFlip)], trap->Registers[REG_OUTS+(1^WordEndianFlip)], -+ trap->Registers[REG_OUTS+(2^WordEndianFlip)], trap->Registers[REG_OUTS+(3^WordEndianFlip)]); -+ EPRINTF4 (DBG_EPTRAP, " o4=%08x o5=%08x o6=%08x o7=%08x\n", -+ trap->Registers[REG_OUTS+(4^WordEndianFlip)], trap->Registers[REG_OUTS+(5^WordEndianFlip)], -+ trap->Registers[REG_OUTS+(6^WordEndianFlip)], trap->Registers[REG_OUTS+(7^WordEndianFlip)]); -+ EPRINTF4 (DBG_EPTRAP, " l0=%08x l1=%08x l2=%08x l3=%08x\n", -+ trap->Registers[REG_LOCALS+(0^WordEndianFlip)], trap->Registers[REG_LOCALS+(1^WordEndianFlip)], -+ trap->Registers[REG_LOCALS+(2^WordEndianFlip)], trap->Registers[REG_LOCALS+(3^WordEndianFlip)]); -+ EPRINTF4 (DBG_EPTRAP, " l4=%08x l5=%08x l6=%08x l7=%08x\n", -+ trap->Registers[REG_LOCALS+(4^WordEndianFlip)], trap->Registers[REG_LOCALS+(5^WordEndianFlip)], -+ trap->Registers[REG_LOCALS+(6^WordEndianFlip)], trap->Registers[REG_LOCALS+(7^WordEndianFlip)]); -+ EPRINTF4 (DBG_EPTRAP, " i0=%08x i1=%08x i2=%08x i3=%08x\n", -+ trap->Registers[REG_INS+(0^WordEndianFlip)], trap->Registers[REG_INS+(1^WordEndianFlip)], -+ trap->Registers[REG_INS+(2^WordEndianFlip)], trap->Registers[REG_INS+(3^WordEndianFlip)]); -+ EPRINTF4 (DBG_EPTRAP, " i4=%08x i5=%08x i6=%08x i7=%08x\n", -+ trap->Registers[REG_INS+(4^WordEndianFlip)], trap->Registers[REG_INS+(5^WordEndianFlip)], -+ trap->Registers[REG_INS+(6^WordEndianFlip)], trap->Registers[REG_INS+(7^WordEndianFlip)]); -+ -+ ASSERT (trap->Status.s.Context & SYS_CONTEXT_BIT); -+ -+ switch (trap->mi) -+ { -+ case MI_UnimplementedError: -+ if (trap->TrapBits.s.ForcedTProcTrap) -+ { -+ ASSERT (trap->TrapBits.s.OutputWasOpen == 0); -+ -+ EPRINTF0 (DBG_EPTRAP, "ep3_tprocTrap: ForcedTProcTrap\n"); -+ -+ IssueRunThread (rail, SaveThreadToStack (ctxt, trap, FALSE)); -+ return (OP_HANDLED); -+ } -+ -+ if (trap->TrapBits.s.ThreadTimeout) -+ { -+ EPRINTF0 (DBG_EPTRAP, "ep3_tprocTrap: ThreadTimeout\n"); -+ -+ if (trap->Registers[REG_GLOBALS + (1^WordEndianFlip)] == 0) -+ RollThreadToClose (ctxt, trap, trap->TrapBits.s.PacketAckValue); -+ else -+ { -+ CompleteEnvelope (rail, trap->Registers[REG_GLOBALS + (1^WordEndianFlip)], trap->TrapBits.s.PacketAckValue); -+ -+ RollThreadToClose (ctxt, trap, EP3_PAckStolen); -+ } -+ -+ IssueRunThread (rail, SaveThreadToStack (ctxt, trap, FALSE)); -+ return (OP_HANDLED); -+ } -+ -+ if (trap->TrapBits.s.Unimplemented) -+ { -+ E3_uint32 instr = ELAN3_OP_LOAD32 (ctxt, trap->pc & PC_MASK); -+ -+ PRINTF1 (ctxt, DBG_EPTRAP, "ep3_tprocTrap: unimplemented instruction %08x\n", instr); -+ -+ if ((instr & OPCODE_MASK) == OPCODE_Ticc && -+ (instr & OPCODE_IMM) == OPCODE_IMM && -+ (Ticc_COND(instr) == Ticc_TA)) -+ { -+ switch (INSTR_IMM(instr)) -+ { -+ case EP3_UNIMP_TRAP_NO_DESCS: -+ StallThreadForNoDescs (rail, trap->Registers[REG_GLOBALS + (1^WordEndianFlip)], -+ SaveThreadToStack (ctxt, trap, TRUE)); -+ return (OP_HANDLED); -+ -+ case EP3_UNIMP_TRAP_PACKET_NACKED: -+ CompleteEnvelope (rail, trap->Registers[REG_GLOBALS + (1^WordEndianFlip)], E3_PAckDiscard); -+ -+ IssueRunThread (rail, SaveThreadToStack (ctxt, trap, TRUE)); -+ return (OP_HANDLED); -+ -+ case EP3_UNIMP_THREAD_HALTED: -+ StallThreadForHalted (rail, trap->Registers[REG_GLOBALS + (1^WordEndianFlip)], -+ SaveThreadToStack (ctxt, trap, TRUE)); -+ return (OP_HANDLED); -+ -+ default: -+ break; -+ -+ } -+ } -+ } -+ break; -+ -+ default: -+ break; -+ } -+ -+ /* All other traps should not happen for kernel comms */ -+ printk ("ep3_tprocTrap: SP=%08x PC=%08x NPC=%08x DIRTY=%08x TRAP=%08x MI=%s\n", -+ trap->sp, trap->pc, trap->npc, trap->DirtyBits.Bits, -+ trap->TrapBits.Bits, MiToName (trap->mi)); -+ printk (" FaultSave : FaultAddress %08x EventAddress %08x FSR %08x\n", -+ trap->FaultSave.s.FaultAddress, trap->FaultSave.s.EventAddress, trap->FaultSave.s.FSR.Status); -+ printk (" DataFault : FaultAddress %08x EventAddress %08x FSR %08x\n", -+ trap->DataFaultSave.s.FaultAddress, trap->DataFaultSave.s.EventAddress, trap->DataFaultSave.s.FSR.Status); -+ printk (" InstFault : FaultAddress %08x EventAddress %08x FSR %08x\n", -+ trap->InstFaultSave.s.FaultAddress, trap->InstFaultSave.s.EventAddress, trap->InstFaultSave.s.FSR.Status); -+ printk (" OpenFault : FaultAddress %08x EventAddress %08x FSR %08x\n", -+ trap->OpenFaultSave.s.FaultAddress, trap->OpenFaultSave.s.EventAddress, trap->OpenFaultSave.s.FSR.Status); -+ -+ if (trap->DirtyBits.s.GlobalsDirty) -+ { -+ printk (" g0=%08x g1=%08x g2=%08x g3=%08x\n", -+ trap->Registers[REG_GLOBALS+(0^WordEndianFlip)], trap->Registers[REG_GLOBALS+(1^WordEndianFlip)], -+ trap->Registers[REG_GLOBALS+(2^WordEndianFlip)], trap->Registers[REG_GLOBALS+(3^WordEndianFlip)]); -+ printk (" g4=%08x g5=%08x g6=%08x g7=%08x\n", -+ trap->Registers[REG_GLOBALS+(4^WordEndianFlip)], trap->Registers[REG_GLOBALS+(5^WordEndianFlip)], -+ trap->Registers[REG_GLOBALS+(6^WordEndianFlip)], trap->Registers[REG_GLOBALS+(7^WordEndianFlip)]); -+ } -+ if (trap->DirtyBits.s.OutsDirty) -+ { -+ printk (" o0=%08x o1=%08x o2=%08x o3=%08x\n", -+ trap->Registers[REG_OUTS+(0^WordEndianFlip)], trap->Registers[REG_OUTS+(1^WordEndianFlip)], -+ trap->Registers[REG_OUTS+(2^WordEndianFlip)], trap->Registers[REG_OUTS+(3^WordEndianFlip)]); -+ printk (" o4=%08x o5=%08x o6=%08x o7=%08x\n", -+ trap->Registers[REG_OUTS+(4^WordEndianFlip)], trap->Registers[REG_OUTS+(5^WordEndianFlip)], -+ trap->Registers[REG_OUTS+(6^WordEndianFlip)], trap->Registers[REG_OUTS+(7^WordEndianFlip)]); -+ } -+ if (trap->DirtyBits.s.LocalsDirty) -+ { -+ printk (" l0=%08x l1=%08x l2=%08x l3=%08x\n", -+ trap->Registers[REG_LOCALS+(0^WordEndianFlip)], trap->Registers[REG_LOCALS+(1^WordEndianFlip)], -+ trap->Registers[REG_LOCALS+(2^WordEndianFlip)], trap->Registers[REG_LOCALS+(3^WordEndianFlip)]); -+ printk (" l4=%08x l5=%08x l6=%08x l7=%08x\n", -+ trap->Registers[REG_LOCALS+(4^WordEndianFlip)], trap->Registers[REG_LOCALS+(5^WordEndianFlip)], -+ trap->Registers[REG_LOCALS+(6^WordEndianFlip)], trap->Registers[REG_LOCALS+(7^WordEndianFlip)]); -+ } -+ if (trap->DirtyBits.s.InsDirty) -+ { -+ printk (" i0=%08x i1=%08x i2=%08x i3=%08x\n", -+ trap->Registers[REG_INS+(0^WordEndianFlip)], trap->Registers[REG_INS+(1^WordEndianFlip)], -+ trap->Registers[REG_INS+(2^WordEndianFlip)], trap->Registers[REG_INS+(3^WordEndianFlip)]); -+ printk (" i4=%08x i5=%08x i6=%08x i7=%08x\n", -+ trap->Registers[REG_INS+(4^WordEndianFlip)], trap->Registers[REG_INS+(5^WordEndianFlip)], -+ trap->Registers[REG_INS+(6^WordEndianFlip)], trap->Registers[REG_INS+(7^WordEndianFlip)]); -+ } -+ -+// panic ("ep3_tprocTrap"); -+ -+ return (OP_HANDLED); -+} -+ -+int -+ep3_iprocTrap (ELAN3_CTXT *ctxt, INPUT_TRAP *trap, int channel) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) ctxt->Private; -+ ELAN3_DEV *dev = ctxt->Device; -+ EP3_COOKIE *cp; -+ sdramaddr_t event; -+ E3_uint32 type; -+ sdramaddr_t dma; -+ E3_DMA_BE dmabe; -+ -+ ASSERT (trap->Transactions[0].s.TrTypeCntx.s.Context & SYS_CONTEXT_BIT); -+ -+ /* -+ * first process the trap to determine the cause -+ */ -+ InspectIProcTrap (ctxt, trap); -+ -+ if (! trap->AckSent && trap->LockQueuePointer) /* Must be a network error in a queueing DMA */ -+ { /* packet - unlock the queue */ -+ IncrStat (rail, QueueingPacketTrap); -+ -+ SimulateUnlockQueue (ctxt, trap->LockQueuePointer, FALSE); -+ return (OP_HANDLED); -+ } -+ -+ if (trap->AckSent && trap->BadTransaction) -+ { -+ spin_unlock (&dev->IntrLock); -+ -+ /* NOTE - no network error fixup is necessary for system context -+ * messages since they are idempotent and are single packet -+ * dmas -+ */ -+ if (EP3_CONTEXT_ISDATA (trap->Transactions[0].s.TrTypeCntx.s.Context)) -+ { -+ int nodeId = EP3_CONTEXT_TO_NODE(trap->Transactions[0].s.TrTypeCntx.s.Context); -+ -+ if (trap->DmaIdentifyTransaction) -+ { -+ printk ("%s: network error on dma identify <%x> from node %d\n", rail->Generic.Name, trap->DmaIdentifyTransaction->s.TrAddr, nodeId); -+ -+ ep_queue_network_error (&rail->Generic, nodeId, EP_NODE_NETERR_ATOMIC_PACKET, channel, trap->DmaIdentifyTransaction->s.TrAddr); -+ } -+ else if (trap->ThreadIdentifyTransaction) -+ { -+ printk ("%s: network error on thread identify <%x> from node %d\n", rail->Generic.Name, trap->ThreadIdentifyTransaction->s.TrAddr, nodeId); -+ -+ ep_queue_network_error (&rail->Generic, nodeId, EP_NODE_NETERR_ATOMIC_PACKET, channel, trap->ThreadIdentifyTransaction->s.TrAddr); -+ } -+ else -+ { -+ printk ("%s: network error on dma packet from node %d\n", rail->Generic.Name, nodeId); -+ -+ ep_queue_network_error (&rail->Generic, nodeId, EP_NODE_NETERR_DMA_PACKET, channel, 0); -+ } -+ } -+ -+ spin_lock (&dev->IntrLock); -+ return (OP_HANDLED); -+ } -+ -+ if (trap->AckSent) -+ { -+ if (trap->TrappedTransaction == NULL) -+ return (OP_HANDLED); -+ -+ while (! trap->TrappedTransaction->s.TrTypeCntx.s.LastTrappedTrans) -+ { -+ E3_IprocTrapHeader_BE *hdrp = trap->TrappedTransaction; -+ E3_IprocTrapData_BE *datap = trap->TrappedDataBuffer; -+ -+ ASSERT (hdrp->s.TrTypeCntx.s.StatusRegValid != 0); -+ -+ if ((hdrp->s.TrTypeCntx.s.Type & TR_WRITEBLOCK_BIT) != 0) -+ { -+ printk ("ep3_iprocTrap: WRITEBLOCK : Addr %x\n", hdrp->s.TrAddr); -+// panic ("ep3_iprocTrap\n"); -+ } -+ else -+ { -+ switch (hdrp->s.TrTypeCntx.s.Type & TR_OPCODE_TYPE_MASK) -+ { -+ case TR_SETEVENT & TR_OPCODE_TYPE_MASK: -+ switch (GET_STATUS_TRAPTYPE (hdrp->s.IProcTrapStatus)) -+ { -+ case MI_DmaQueueOverflow: -+ IncrStat (rail, IprocDmaQueueOverflow); -+ -+ if ((event = ep_elan2sdram (&rail->Generic, hdrp->s.TrAddr)) != (sdramaddr_t) 0 && -+ ((type = elan3_sdram_readl (dev, event + offsetof (E3_Event, ev_Type))) & EV_TYPE_MASK_DMA) == EV_TYPE_DMA && -+ (dma = ep_elan2sdram (&rail->Generic, (type & ~EV_TYPE_MASK2))) != (sdramaddr_t) 0) -+ { -+ elan3_sdram_copyq_from_sdram (dev, dma, &dmabe, sizeof (E3_DMA)); -+ -+ if (dmabe.s.dma_direction == DMA_WRITE) -+ cp = LookupEventCookie (rail, &rail->CookieTable, dmabe.s.dma_srcEvent); -+ else -+ { -+ cp = LookupEventCookie (rail, &rail->CookieTable, dmabe.s.dma_destEvent); -+ -+ /* we MUST convert this into a DMA_READ_REQUEUE dma as if we don't the -+ * DMA descriptor will be read from the EP3_RETRY_DMA rather than the -+ * original DMA - this can then get reused and an incorrect DMA -+ * descriptor sent -+ * eventp->ev_Type contains the dma address with type in the lower bits -+ */ -+ -+ dmabe.s.dma_source = (type & ~EV_TYPE_MASK2); -+ dmabe.s.dma_direction = (dmabe.s.dma_direction & ~DMA_READ) | DMA_READ_REQUEUE; -+ } -+ -+#ifdef DEBUG_ASSERT -+ { -+ E3_uint16 vp = (dmabe.s.dma_direction == DMA_WRITE ? dmabe.s.dma_destVProc : dmabe.s.dma_srcVProc); -+ EP_NODE_RAIL *nodeRail = &rail->Generic.Nodes[EP_VP_TO_NODE(vp)]; -+ -+ ASSERT (!EP_VP_ISDATA(vp) || (nodeRail->State >= EP_NODE_CONNECTED && nodeRail->State <= EP_NODE_LOCAL_PASSIVATE)); -+ } -+#endif -+ -+ if (cp != NULL) -+ cp->Operations->DmaRetry (rail, cp->Arg, &dmabe, EAGAIN); -+ else -+ { -+ ASSERT (dmabe.s.dma_direction == DMA_WRITE && dmabe.s.dma_srcEvent == 0 && dmabe.s.dma_isRemote); -+ -+ QueueDmaForRetry (rail, &dmabe, EP_RETRY_ANONYMOUS); -+ } -+ break; -+ } -+ -+ printk ("ep3_iprocTrap: SETEVENT : %x - cannot find dma to restart\n", hdrp->s.TrAddr); -+// panic ("ep3_iprocTrap\n"); -+ break; -+ -+ case MI_EventQueueOverflow: -+ { -+ sdramaddr_t event; -+ E3_uint32 type; -+ -+ IncrStat (rail, IprocEventQueueOverflow); -+ -+ if ((event = ep_elan2sdram (&rail->Generic, hdrp->s.TrAddr)) != (sdramaddr_t) 0 && -+ ((type = elan3_sdram_readl (dev, event + offsetof (E3_Event, ev_Type))) & EV_TYPE_MASK_EVIRQ) == EV_TYPE_EVIRQ) -+ { -+ spin_unlock (&dev->IntrLock); -+ ep3_event (ctxt, (type & ~(EV_TYPE_MASK_EVIRQ|EV_TYPE_MASK_BCOPY)), OP_LWP); -+ spin_lock (&dev->IntrLock); -+ -+ break; -+ } -+ -+ printk ("ep3_iprocTrap: SETEVENT : %x - cannot find event\n", hdrp->s.TrAddr); -+// panic ("ep3_iprocTrap\n"); -+ break; -+ } -+ -+ default: -+ printk ("ep3_iprocTrap: SETEVENT : %x MI=%x\n", hdrp->s.TrAddr, GET_STATUS_TRAPTYPE(hdrp->s.IProcTrapStatus)); -+// panic ("ep3_iprocTrap\n"); -+ break; -+ } -+ break; -+ -+ case TR_SENDDISCARD & TR_OPCODE_TYPE_MASK: -+ /* Just ignore send-discard transactions */ -+ break; -+ -+ case TR_REMOTEDMA & TR_OPCODE_TYPE_MASK: -+ { -+ E3_DMA_BE *dmap = (E3_DMA_BE *) datap; -+ -+ if (GET_STATUS_TRAPTYPE(hdrp->s.IProcTrapStatus) != MI_DmaQueueOverflow) -+ { -+ printk ("ep3_iprocTrap: MI=%x\n", GET_STATUS_TRAPTYPE(hdrp->s.IProcTrapStatus)); -+ break; -+ } -+ -+ IncrStat (rail, IprocDmaQueueOverflow); -+ -+ cp = LookupEventCookie (rail, &rail->CookieTable, dmap->s.dma_srcEvent); -+ -+ /* modify the dma type since it will still be a "read" dma */ -+ dmap->s.dma_type = (dmap->s.dma_type & ~DMA_TYPE_READ) | DMA_TYPE_ISREMOTE; -+ -+#ifdef DEBUG_ASSERT -+ { -+ E3_uint16 vp = dmap->s.dma_destVProc; -+ EP_NODE_RAIL *nodeRail = &rail->Generic.Nodes[EP_VP_TO_NODE(vp)]; -+ -+ ASSERT (!EP_VP_ISDATA(vp) || (nodeRail->State >= EP_NODE_CONNECTED && nodeRail->State <= EP_NODE_LOCAL_PASSIVATE)); -+ } -+#endif -+ if (cp != NULL) -+ cp->Operations->DmaRetry (rail, cp->Arg, dmap, EAGAIN); -+ else -+ { -+ ASSERT (dmap->s.dma_direction == DMA_WRITE && dmap->s.dma_srcEvent == 0 && dmap->s.dma_isRemote); -+ -+ QueueDmaForRetry (rail, dmap, EP_RETRY_ANONYMOUS); -+ } -+ break; -+ } -+ default: -+ printk ("ep3_iprocTrap: %s\n", IProcTrapString (hdrp, datap)); -+ break; -+ } -+ } -+ -+ /* -+ * We've successfully processed this transaction, so move onto the -+ * next one. -+ */ -+ trap->TrappedTransaction++; -+ trap->TrappedDataBuffer++; -+ } -+ -+ return (OP_HANDLED); -+ } -+ -+ /* Workaround WRITEBLOCK transaction executed when LOCKQUEUE transaction missed */ -+ if ((trap->TrappedTransaction->s.TrTypeCntx.s.Type & TR_WRITEBLOCK_BIT) && /* a DMA packet */ -+ trap->LockQueuePointer == 0 && trap->UnlockQueuePointer && /* a queueing DMA */ -+ trap->TrappedTransaction->s.TrAddr == trap->FaultSave.s.FaultAddress) /* and missed lockqueue */ -+ { -+ printk ("ep3_iprocTrap: missed lockqueue transaction for queue %x\n", trap->UnlockQueuePointer); -+ return (OP_HANDLED); -+ } -+ -+ if (trap->FaultSave.s.FaultContext != 0) -+ printk ("ep3_iprocTrap: pagefault at %08x in context %x\n", -+ trap->FaultSave.s.FaultAddress, trap->FaultSave.s.FaultContext); -+ -+// panic ("ep3_iprocTrap: unexpected inputter trap\n"); -+ -+ return (OP_HANDLED); -+} -+ -+/* -+ * Command processor trap -+ * kernel comms should only be able to generate -+ * queue overflow traps -+ */ -+int -+ep3_cprocTrap (ELAN3_CTXT *ctxt, COMMAND_TRAP *trap) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) ctxt->Private; -+ int ctxnum = (trap->TrapBuf.r.Breg >> 16) & MAX_ROOT_CONTEXT_MASK; -+ ELAN3_DEV *dev = rail->Device; -+ EP3_DMA_RING *ring; -+ EP3_COOKIE *cp; -+ E3_DMA_BE dmabe; -+ int vp, slot; -+ unsigned long flags; -+ -+ switch (trap->Status.s.TrapType) -+ { -+ case MI_DmaQueueOverflow: -+ IncrStat (rail, CprocDmaQueueOverflow); -+ -+ /* Use the context number that the setevent was issued in, -+ * to find the appropriate dma ring, then since they are guaranteed -+ * to be issued in order, we just search backwards till we find the -+ * last one which has completed its word copy - this must be the -+ * one which had caused the DmaQueueOverflow trap ! */ -+ -+ ASSERT (ctxnum >= ELAN3_DMARING_BASE_CONTEXT_NUM && ctxnum < (ELAN3_DMARING_BASE_CONTEXT_NUM+EP3_NUM_RINGS)); -+ -+ spin_lock_irqsave (&dev->CProcLock, flags); -+ -+ ring = &rail->DmaRings[ctxnum - ELAN3_DMARING_BASE_CONTEXT_NUM]; -+ slot = DMA_RING_PREV_POS(ring, ring->Position); -+ -+ while (ring->pDoneBlk[slot] == EP3_EVENT_ACTIVE) -+ slot = DMA_RING_PREV_POS(ring, slot); -+ -+ elan3_sdram_copyq_from_sdram (rail->Device , DMA_RING_DMA(ring,slot), &dmabe, sizeof (E3_DMA)); -+ -+#if defined(DEBUG_ASSERT) -+ while (slot != DMA_RING_PREV_POS(ring, ring->Position)) -+ { -+ ASSERT (ring->pDoneBlk[slot] != EP3_EVENT_ACTIVE); -+ -+ slot = DMA_RING_PREV_POS(ring, slot); -+ } -+#endif -+ spin_unlock_irqrestore (&dev->CProcLock, flags); -+ -+ if (dmabe.s.dma_direction == DMA_WRITE) -+ cp = LookupEventCookie (rail, &rail->CookieTable, dmabe.s.dma_srcEvent); -+ else -+ { -+ ASSERT (dmabe.s.dma_direction = DMA_READ_REQUEUE); -+ -+ cp = LookupEventCookie (rail, &rail->CookieTable, dmabe.s.dma_destEvent); -+ } -+ -+#if defined(DEBUG_ASSERT) -+ if (dmabe.s.dma_direction == DMA_WRITE) -+ vp = dmabe.s.dma_destVProc; -+ else -+ vp = dmabe.s.dma_srcVProc; -+ -+ ASSERT (!EP_VP_ISDATA(vp) || (rail->Generic.Nodes[EP_VP_TO_NODE(vp)].State >= EP_NODE_CONNECTED && -+ rail->Generic.Nodes[EP_VP_TO_NODE(vp)].State <= EP_NODE_LOCAL_PASSIVATE)); -+#endif -+ -+ if (cp != NULL) -+ cp->Operations->DmaRetry (rail, cp->Arg, &dmabe, EAGAIN); -+ else -+ { -+ ASSERT (dmabe.s.dma_direction == DMA_WRITE && dmabe.s.dma_srcEvent == 0 && dmabe.s.dma_isRemote); -+ -+ QueueDmaForRetry (rail, &dmabe, EP_RETRY_ANONYMOUS); -+ } -+ -+ return (OP_HANDLED); -+ -+ case MI_EventQueueOverflow: -+ ASSERT (ctxnum == ELAN3_MRF_CONTEXT_NUM); -+ -+ IncrStat (rail, CprocEventQueueOverflow); -+ -+ rail->CommandPortEventTrap = TRUE; -+ return (OP_HANDLED); -+ -+#if defined(PER_CPU_TIMEOUT) -+ case MI_SetEventReadWait: -+ if (ctxnum == ELAN3_MRF_CONTEXT_NUM && trap->FaultSave.s.EventAddress == EP_PACEMAKER_EVENTADDR) -+ { -+ HeartbeatPacemaker (rail); -+ return (OP_HANDLED); -+ } -+#endif -+ -+ default: -+ printk ("ep3_cprocTrap : Context=%x Status=%x TrapType=%x\n", ctxnum, trap->Status.Status, trap->Status.s.TrapType); -+ printk (" FaultAddr=%x EventAddr=%x FSR=%x\n", -+ trap->FaultSave.s.FaultAddress, trap->FaultSave.s.EventAddress, -+ trap->FaultSave.s.FSR.Status); -+ break; -+ } -+ -+// panic ("ep3_cprocTrap"); -+ -+ return (OP_HANDLED); -+} -+ -+static int -+ep3_cprocReissue (ELAN3_CTXT *ctxt, CProcTrapBuf_BE *tbuf) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) ctxt->Private; -+ unsigned cmdoff = (tbuf->s.ContextType >> 5) & 0xFF; -+ int ctxnum = (tbuf->s.ContextType >> 16) & MAX_ROOT_CONTEXT_MASK; -+ -+ if (ctxnum >= ELAN3_DMARING_BASE_CONTEXT_NUM && ctxnum < (ELAN3_DMARING_BASE_CONTEXT_NUM+EP3_NUM_RINGS)) -+ { -+ EP3_DMA_RING *ring = &rail->DmaRings[ctxnum - ELAN3_DMARING_BASE_CONTEXT_NUM]; -+ -+ ASSERT ((cmdoff << 2) == offsetof (E3_CommandPort, SetEvent)); /* can only be setevent commands! */ -+ ASSERT (tbuf->s.Addr >= DMA_RING_EVENT_ELAN(ring,0) && tbuf->s.Addr < DMA_RING_EVENT_ELAN(ring, ring->Entries)); -+ -+ writel (tbuf->s.Addr, (void *)(ring->CommandPort + (cmdoff << 2))); -+ } -+ else -+ { -+ ASSERT (ctxnum == ELAN3_MRF_CONTEXT_NUM); -+ -+ writel (tbuf->s.Addr, (void *)(ctxt->CommandPort + (cmdoff << 2))); -+ } -+ -+ return (OP_HANDLED); -+} -+ -+static E3_uint8 -+ep3_load8 (ELAN3_CTXT *ctxt, E3_Addr addr) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) ctxt->Private; -+ ELAN3_DEV *dev = ctxt->Device; -+ sdramaddr_t offset; -+ E3_uint8 *ptr; -+ -+ if ((offset = ep_elan2sdram (&rail->Generic, addr)) != 0) -+ return (elan3_sdram_readb (dev, offset)); -+ if ((ptr = ep_elan2main (&rail->Generic, addr)) != NULL) -+ return (*ptr); -+ -+ printk ("ep3_load8: %08x\n", addr); -+ return (0); -+} -+ -+static void -+ep3_store8 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint8 val) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) ctxt->Private; -+ ELAN3_DEV *dev = ctxt->Device; -+ sdramaddr_t offset; -+ E3_uint8 *ptr; -+ -+ if ((offset = ep_elan2sdram (&rail->Generic, addr)) != 0) -+ elan3_sdram_writeb (dev, offset, val); -+ else if ((ptr = ep_elan2main (&rail->Generic, addr)) != 0) -+ *ptr = val; -+ else -+ printk ("ep3_store8 %08x\n", addr); -+} -+ -+static E3_uint16 -+ep3_load16 (ELAN3_CTXT *ctxt, E3_Addr addr) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) ctxt->Private; -+ ELAN3_DEV *dev = ctxt->Device; -+ sdramaddr_t offset; -+ E3_uint16 *ptr; -+ -+ if ((offset = ep_elan2sdram (&rail->Generic, addr)) != 0) -+ return (elan3_sdram_readw (dev, offset)); -+ if ((ptr = ep_elan2main (&rail->Generic, addr)) != 0) -+ return (*ptr); -+ -+ printk ("ep3_load16 %08x\n", addr); -+ return (0); -+} -+ -+static void -+ep3_store16 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint16 val) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) ctxt->Private; -+ ELAN3_DEV *dev = ctxt->Device; -+ sdramaddr_t offset; -+ E3_uint16 *ptr; -+ -+ if ((offset = ep_elan2sdram (&rail->Generic, addr)) != 0) -+ elan3_sdram_writew (dev, offset, val); -+ else if ((ptr = ep_elan2main (&rail->Generic, addr)) != 0) -+ *ptr = val; -+ else -+ printk ("ep3_store16 %08x\n", addr); -+} -+ -+static E3_uint32 -+ep3_load32 (ELAN3_CTXT *ctxt, E3_Addr addr) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) ctxt->Private; -+ ELAN3_DEV *dev = ctxt->Device; -+ sdramaddr_t offset; -+ E3_uint32 *ptr; -+ -+ if ((offset = ep_elan2sdram (&rail->Generic, addr)) != 0) -+ return (elan3_sdram_readl(dev, offset)); -+ if ((ptr = ep_elan2main (&rail->Generic, addr)) != 0) -+ return (*ptr); -+ -+ printk ("ep3_load32 %08x\n", addr); -+ return (0); -+} -+ -+static void -+ep3_store32 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint32 val) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) ctxt->Private; -+ ELAN3_DEV *dev = ctxt->Device; -+ sdramaddr_t offset; -+ E3_uint32 *ptr; -+ -+ if ((offset = ep_elan2sdram (&rail->Generic, addr)) != 0) -+ elan3_sdram_writel (dev, offset, val); -+ else if ((ptr = ep_elan2main (&rail->Generic, addr)) != 0) -+ *ptr = val; -+ else -+ printk ("ep3_store32 %08x\n", addr); -+} -+ -+static E3_uint64 -+ep3_load64 (ELAN3_CTXT *ctxt, E3_Addr addr) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) ctxt->Private; -+ ELAN3_DEV *dev = ctxt->Device; -+ sdramaddr_t offset; -+ E3_uint64 *ptr; -+ -+ if ((offset = ep_elan2sdram (&rail->Generic, addr)) != 0) -+ return (elan3_sdram_readq (dev, offset)); -+ if ((ptr = ep_elan2main (&rail->Generic, addr)) != 0) -+ return (*ptr); -+ -+ printk ("ep3_load64 %08x\n", addr); -+ return (0); -+} -+ -+static void -+ep3_store64 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint64 val) -+{ -+ EP3_RAIL *rail = (EP3_RAIL *) ctxt->Private; -+ ELAN3_DEV *dev = ctxt->Device; -+ sdramaddr_t offset; -+ E3_uint64 *ptr; -+ -+ if ((offset = ep_elan2sdram (&rail->Generic, addr)) != 0) -+ elan3_sdram_writeq (dev, offset, val); -+ else if ((ptr = ep_elan2main (&rail->Generic, addr)) != 0) -+ *ptr = val; -+ else -+ printk ("ep3_store64 %08x\n", addr); -+} -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/ep/support_elan4.c linux-2.6.9/drivers/net/qsnet/ep/support_elan4.c ---- clean/drivers/net/qsnet/ep/support_elan4.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/support_elan4.c 2005-08-09 05:57:14.000000000 -0400 -@@ -0,0 +1,1192 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: support_elan4.c,v 1.24.2.2 2005/08/09 09:57:14 mike Exp $ $Name: QSNETMODULES-5-11-3_20050907 $" -+/* $Source: /cvs/master/quadrics/epmod/support_elan4.c,v $*/ -+ -+#include -+#include -+ -+#include -+ -+#include "kcomm_vp.h" -+#include "kcomm_elan4.h" -+#include "debug.h" -+ -+#include -+#include -+ -+void -+ep4_register_intcookie (EP4_RAIL *rail, EP4_INTCOOKIE *cp, E4_uint64 cookie, void (*callback)(EP4_RAIL *r, void *arg), void *arg) -+{ -+ unsigned long flags; -+ -+ cp->int_val = cookie; -+ cp->int_callback = callback; -+ cp->int_arg = arg; -+ -+ spin_lock_irqsave (&rail->r_intcookie_lock, flags); -+ list_add_tail (&cp->int_link, &rail->r_intcookie_hash[EP4_INTCOOKIE_HASH(cookie)]); -+ spin_unlock_irqrestore (&rail->r_intcookie_lock, flags); -+} -+ -+void -+ep4_deregister_intcookie (EP4_RAIL *rail, EP4_INTCOOKIE *cp) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave (&rail->r_intcookie_lock, flags); -+ list_del (&cp->int_link); -+ spin_unlock_irqrestore (&rail->r_intcookie_lock, flags); -+} -+ -+ -+EP4_INTCOOKIE * -+ep4_lookup_intcookie (EP4_RAIL *rail, E4_uint64 cookie) -+{ -+ struct list_head *el; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&rail->r_intcookie_lock, flags); -+ list_for_each (el, &rail->r_intcookie_hash[EP4_INTCOOKIE_HASH(cookie)]) { -+ EP4_INTCOOKIE *cp = list_entry (el, EP4_INTCOOKIE, int_link); -+ -+ if (cp->int_val == cookie) -+ { -+ spin_unlock_irqrestore (&rail->r_intcookie_lock, flags); -+ return cp; -+ } -+ } -+ spin_unlock_irqrestore (&rail->r_intcookie_lock, flags); -+ return NULL; -+} -+ -+E4_uint64 -+ep4_neterr_cookie (EP4_RAIL *rail, unsigned int node) -+{ -+ E4_uint64 cookie; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&rail->r_cookie_lock, flags); -+ cookie = rail->r_cookies[node]; -+ -+ rail->r_cookies[node] += EP4_COOKIE_INC; -+ -+ spin_unlock_irqrestore (&rail->r_cookie_lock, flags); -+ -+ return cookie; -+} -+ -+void -+ep4_eproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status) -+{ -+ EP4_RAIL *rail = EP4_CTXT_TO_RAIL (ctxt); -+ ELAN4_EPROC_TRAP trap; -+ -+ elan4_extract_eproc_trap (ctxt->ctxt_dev, status, &trap, 0); -+ -+ if (epdebug & DBG_EPTRAP) -+ elan4_display_eproc_trap (DBG_BUFFER, 0, "ep4_eproc_trap", &trap); -+ -+ switch (EPROC_TrapType (status)) -+ { -+ case EventProcNoFault: -+ EPRINTF1 (DBG_EPTRAP, "%s: EventProcNoFault\n", rail->r_generic.Name); -+ return; -+ -+ default: -+ printk ("%s: unhandled eproc trap %d\n", rail->r_generic.Name, EPROC_TrapType (status)); -+ elan4_display_eproc_trap (DBG_CONSOLE, 0, "ep4_eproc_trap", &trap); -+ } -+} -+ -+void -+ep4_cproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status, unsigned cqnum) -+{ -+ EP4_RAIL *rail = EP4_CTXT_TO_RAIL (ctxt); -+ ELAN4_CPROC_TRAP trap; -+ struct list_head *el; -+ register int i; -+ -+ elan4_extract_cproc_trap (ctxt->ctxt_dev, status, &trap, cqnum); -+ -+ if (epdebug & DBG_EPTRAP) -+ elan4_display_cproc_trap (DBG_BUFFER, 0, "ep4_cproc_trap", &trap); -+ -+ switch (CPROC_TrapType (status)) -+ { -+ case CommandProcInterruptQueueOverflow: -+ /* -+ * Try and handle a bunch of elan main interrupts -+ */ -+ for (i = 0; i r_ecq_list[i]) { -+ EP4_ECQ *ecq = list_entry (el, EP4_ECQ, ecq_link); -+ -+ if (elan4_cq2num (ecq->ecq_cq) == cqnum) -+ { -+ printk ("%s: defer command queue %d after trap %x\n", -+ rail->r_generic.Name, cqnum, CPROC_TrapType (status)); -+ -+ elan4_queue_mainintop (ctxt->ctxt_dev, &ecq->ecq_intop); -+ return; -+ } -+ } -+ } -+ break; -+ -+ case CommandProcDmaQueueOverflow: -+ case CommandProcThreadQueueOverflow: -+ for (i = 0; i r_ecq_list[i]) { -+ EP4_ECQ *ecq = list_entry (el, EP4_ECQ, ecq_link); -+ -+ if (elan4_cq2num (ecq->ecq_cq) == cqnum) -+ { -+ printk ("%s: restart command queue %d after trap %x\n", -+ rail->r_generic.Name, cqnum, CPROC_TrapType (status)); -+ -+ elan4_restartcq (ctxt->ctxt_dev, ecq->ecq_cq); -+ return; -+ } -+ } -+ } -+ break; -+ } -+ -+ printk ("%s: unhandled cproc trap %d for cqnum %d\n", rail->r_generic.Name, CPROC_TrapType (status), cqnum); -+ elan4_display_cproc_trap (DBG_CONSOLE, 0, "ep4_cproc_trap", &trap); -+} -+ -+void -+ep4_dproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status, unsigned unit) -+{ -+ EP4_RAIL *rail = EP4_CTXT_TO_RAIL (ctxt); -+ ELAN4_DPROC_TRAP trap; -+ -+ elan4_extract_dproc_trap (ctxt->ctxt_dev, status, &trap, unit); -+ -+ if (epdebug & DBG_EPTRAP) -+ elan4_display_dproc_trap (DBG_BUFFER, 0, "ep4_dproc_trap", &trap); -+ -+ if (! DPROC_PrefetcherFault (trap.tr_status)) -+ { -+ switch (DPROC_TrapType (trap.tr_status)) -+ { -+ case DmaProcFailCountError: -+ goto retry_this_dma; -+ -+ case DmaProcPacketAckError: -+ goto retry_this_dma; -+ -+ case DmaProcQueueOverflow: -+ goto retry_this_dma; -+ } -+ } -+ -+ printk ("%s: unhandled dproc trap\n", rail->r_generic.Name); -+ elan4_display_dproc_trap (DBG_CONSOLE, 0, "ep4_dproc_trap", &trap); -+ return; -+ -+ retry_this_dma: -+ /*XXXX implement backoff .... */ -+ -+ ep4_queue_dma_retry (rail, &trap.tr_desc, EP_RETRY_LOW_PRI); -+} -+ -+void -+ep4_tproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status) -+{ -+ EP4_RAIL *rail = EP4_CTXT_TO_RAIL (ctxt); -+ ELAN4_TPROC_TRAP *trap = &rail->r_tproc_trap; -+ -+ elan4_extract_tproc_trap (ctxt->ctxt_dev, status, trap); -+ -+ if (epdebug & DBG_EPTRAP) -+ elan4_display_tproc_trap (DBG_BUFFER, 0, "ep4_tproc_trap", trap); -+ -+ printk ("%s: unhandled tproc trap\n", rail->r_generic.Name); -+ elan4_display_tproc_trap (DBG_CONSOLE, 0, "ep4_tproc_trap", trap); -+} -+ -+void -+ep4_iproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status, unsigned unit) -+{ -+ EP4_RAIL *rail = EP4_CTXT_TO_RAIL (ctxt); -+ ELAN4_IPROC_TRAP *trap = &rail->r_iproc_trap; -+ -+ elan4_extract_iproc_trap (ctxt->ctxt_dev, status, trap, unit); -+ -+ if (epdebug & DBG_EPTRAP) -+ elan4_display_iproc_trap (DBG_BUFFER, 0, "ep4_iproc_trap", trap); -+ -+ elan4_inspect_iproc_trap (trap); -+ -+ switch (IPROC_TrapValue (trap->tr_transactions[trap->tr_trappedTrans].IProcStatusCntxAndTrType)) -+ { -+ case InputDmaQueueOverflow: -+ ep4_queue_dma_retry (rail, (E4_DMA *) &trap->tr_dataBuffers[trap->tr_trappedTrans], EP_RETRY_LOW_PRI); -+ return; -+ -+ case InputEventEngineTrapped: -+ { -+ E4_IprocTrapHeader *hdrp = &trap->tr_transactions[trap->tr_trappedTrans]; -+ sdramaddr_t inputq; -+ E4_Addr event; -+ -+ /* XXXX: flow control on the command queue which we issue to is -+ * rather difficult, we don't want to have space for an event -+ * for each possible context, nor the mechanism to hold the -+ * context filter up until the event has been executed. Given -+ * that the event engine will be restarted by this same interrupt -+ * and we're using high priority command queues, then we just use -+ * a single small command queue for this. -+ */ -+ switch (IPROC_TransactionType(hdrp->IProcStatusCntxAndTrType) & TR_OPCODE_MASK) -+ { -+ case TR_SETEVENT & TR_OPCODE_MASK: -+ if (hdrp->TrAddr != 0) -+ ep4_set_event_cmd (rail->r_event_ecq, hdrp->TrAddr); -+ return; -+ -+ case TR_INPUT_Q_COMMIT & TR_OPCODE_MASK: -+ if ((inputq = ep_elan2sdram (&rail->r_generic, hdrp->TrAddr)) == 0) -+ printk ("%s: TR_INPUT_Q_COMMIT at %llx is not sdram\n", rail->r_generic.Name, (long long)hdrp->TrAddr); -+ else -+ { -+ if ((event = elan4_sdram_readq (rail->r_ctxt.ctxt_dev, inputq + offsetof (E4_InputQueue, q_event))) != 0) -+ ep4_set_event_cmd (rail->r_event_ecq, event); -+ return; -+ } -+ } -+ break; -+ } -+ -+ case InputEopErrorOnWaitForEop: -+ case InputEopErrorTrap: -+ case InputCrcErrorAfterPAckOk: -+ if (! (trap->tr_flags & TR_FLAG_ACK_SENT) || (trap->tr_flags & TR_FLAG_EOP_BAD)) -+ return; -+ -+ if (EP4_CONTEXT_ISDATA (IPROC_NetworkContext (status))) -+ { -+ unsigned int nodeId = EP4_CONTEXT_TO_NODE (IPROC_NetworkContext (status)); -+ -+ if ((trap->tr_flags & (TR_FLAG_DMA_PACKET | TR_FLAG_BAD_TRANS)) || -+ ((trap->tr_flags & TR_FLAG_EOP_ERROR) && (trap->tr_identifyTrans == TR_TRANS_INVALID))) -+ { -+ EPRINTF2 (DBG_NETWORK_ERROR, "%s: network error on dma packet from node %d\n", rail->r_generic.Name, nodeId); -+ printk ("%s: network error on dma packet from node %d\n", rail->r_generic.Name, nodeId); -+ -+ ep_queue_network_error (&rail->r_generic, EP4_CONTEXT_TO_NODE(IPROC_NetworkContext (status)), EP_NODE_NETERR_DMA_PACKET, unit & 1, 0); -+ return; -+ } -+ -+ if (trap->tr_flags & TR_FLAG_EOP_ERROR) -+ { -+ E4_uint64 status = trap->tr_transactions[trap->tr_identifyTrans].IProcStatusCntxAndTrType; -+ EP_NETERR_COOKIE cookie = 0; -+ -+ switch (IPROC_TransactionType (status) & TR_OPCODE_MASK) -+ { -+ case TR_SETEVENT_IDENTIFY & TR_OPCODE_MASK: -+ if (IPROC_TrapValue(status) == InputNoFault) -+ cookie = trap->tr_transactions[trap->tr_identifyTrans].TrAddr; -+ else -+ cookie = trap->tr_dataBuffers[trap->tr_identifyTrans].Data[0]; -+ EPRINTF3(DBG_NETWORK_ERROR, "%s: network error on setevent <%lld%s%s%s%s> from node %d\n", rail->r_generic.Name, EP4_COOKIE_STRING(cookie), nodeId); -+ printk ("%s: network error on setevent <%lld%s%s%s%s> from node %d\n", rail->r_generic.Name, EP4_COOKIE_STRING(cookie), nodeId); -+ break; -+ -+ case TR_INPUT_Q_COMMIT & TR_OPCODE_MASK: -+ if (IPROC_TrapValue(status) == InputNoFault) -+ cookie = trap->tr_transactions[trap->tr_identifyTrans].TrAddr; -+ else -+ cookie = trap->tr_dataBuffers[trap->tr_identifyTrans].Data[0]; -+ EPRINTF3 (DBG_NETWORK_ERROR, "%s: network error on queue commit <%lld%s%s%s%s> from node %d\n", rail->r_generic.Name, EP4_COOKIE_STRING(cookie), nodeId); -+ printk ("%s: network error on queue commit <%lld%s%s%s%s> from node %d\n", rail->r_generic.Name, EP4_COOKIE_STRING(cookie), nodeId); -+ break; -+ -+ case TR_REMOTEDMA & TR_OPCODE_MASK: -+ cookie = trap->tr_transactions[trap->tr_identifyTrans].TrAddr; -+ EPRINTF3 (DBG_NETWORK_ERROR, "%s: network error on remote dma <%lld%s%s%s%s> from node %d\n", rail->r_generic.Name, EP4_COOKIE_STRING(cookie), nodeId); -+ printk ("%s: network error on remote dma <%lld%s%s%s%s> from node %d\n", rail->r_generic.Name, EP4_COOKIE_STRING(cookie), nodeId); -+ break; -+ -+ case TR_IDENTIFY & TR_OPCODE_MASK: -+ cookie = trap->tr_transactions[trap->tr_identifyTrans].TrAddr; -+ EPRINTF3 (DBG_NETWORK_ERROR, "%s: network error on identify <%lld%s%s%s%s> from node %d\n", rail->r_generic.Name, EP4_COOKIE_STRING(cookie), nodeId); -+ printk ("%s: network error on identify <%lld%s%s%s%s> from node %d\n", rail->r_generic.Name, EP4_COOKIE_STRING(cookie), nodeId); -+ break; -+ -+ default: -+ panic ("%s: unknown identify transaction type %x for eop error from node %d\n", rail->r_generic.Name, -+ IPROC_TransactionType (trap->tr_transactions[trap->tr_identifyTrans].IProcStatusCntxAndTrType), nodeId); -+ break; -+ } -+ -+ ep_queue_network_error (&rail->r_generic, nodeId, EP_NODE_NETERR_ATOMIC_PACKET, unit & 1, cookie); -+ } -+ } -+ return; -+ } -+ -+ printk ("%s: unhandled iproc trap\n", rail->r_generic.Name); -+ elan4_display_iproc_trap (DBG_CONSOLE, 0, "ep4_iproc_trap", trap); -+} -+ -+void -+ep4_interrupt (ELAN4_CTXT *ctxt, E4_uint64 cookie) -+{ -+ EP4_RAIL *rail = EP4_CTXT_TO_RAIL (ctxt); -+ EP4_INTCOOKIE *cp = ep4_lookup_intcookie (rail, cookie); -+ -+ if (cp == NULL) -+ { -+ printk ("ep4_interrupt: cannot find event cookie for %016llx\n", (long long) cookie); -+ return; -+ } -+ -+ cp->int_callback (rail, cp->int_arg); -+} -+ -+ELAN4_TRAP_OPS ep4_trap_ops = -+{ -+ ep4_eproc_trap, -+ ep4_cproc_trap, -+ ep4_dproc_trap, -+ ep4_tproc_trap, -+ ep4_iproc_trap, -+ ep4_interrupt, -+}; -+ -+void -+ep4_flush_filters (EP_RAIL *r) -+{ -+ /* nothing to do here as elan4_set_filter() flushes the context filter */ -+} -+ -+struct flush_queues_desc -+{ -+ EP4_RAIL *rail; -+ volatile int done; -+} ; -+ -+static void -+ep4_flush_queues_flushop (ELAN4_DEV *dev, void *arg, int qfull) -+{ -+ struct flush_queues_desc *desc = (struct flush_queues_desc *) arg; -+ EP4_RAIL *rail = desc->rail; -+ E4_uint64 qptrs = read_reg64 (dev, DProcHighPriPtrs); -+ E4_uint32 qsize = E4_QueueSize (E4_QueueSizeValue (qptrs)); -+ E4_uint32 qfptr = E4_QueueFrontPointer (qptrs); -+ E4_uint32 qbptr = E4_QueueBackPointer (qptrs); -+ E4_DProcQueueEntry qentry; -+ unsigned long flags; -+ -+ while ((qfptr != qbptr) || qfull) -+ { -+ E4_uint64 typeSize = elan4_sdram_readq (dev, qfptr + offsetof (E4_DProcQueueEntry, Desc.dma_typeSize)); -+ -+ if (DMA_Context (qentry.Desc.dma_typeSize) == rail->r_ctxt.ctxt_num) -+ { -+ E4_uint64 vp = elan4_sdram_readq (dev, qfptr + offsetof (E4_DProcQueueEntry, Desc.dma_vproc)); -+ EP_NODE_RAIL *nodeRail = &rail->r_generic.Nodes[EP_VP_TO_NODE(vp)]; -+ -+ EP4_ASSERT (rail, !EP_VP_ISDATA(vp) || (nodeRail->State >= EP_NODE_CONNECTED && nodeRail->State <= EP_NODE_LOCAL_PASSIVATE)); -+ -+ if (EP_VP_ISDATA(vp) && nodeRail->State == EP_NODE_LOCAL_PASSIVATE) -+ { -+ /* -+ * This is a DMA going to the node which is being removed, -+ * so move it onto the node dma list where it will get -+ * handled later. -+ */ -+ qentry.Desc.dma_typeSize = typeSize; -+ qentry.Desc.dma_cookie = elan4_sdram_readq (dev, qfptr + offsetof (E4_DProcQueueEntry, Desc.dma_cookie)); -+ qentry.Desc.dma_vproc = vp; -+ qentry.Desc.dma_srcAddr = elan4_sdram_readq (dev, qfptr + offsetof (E4_DProcQueueEntry, Desc.dma_srcAddr)); -+ qentry.Desc.dma_dstAddr = elan4_sdram_readq (dev, qfptr + offsetof (E4_DProcQueueEntry, Desc.dma_dstAddr)); -+ qentry.Desc.dma_srcEvent = elan4_sdram_readq (dev, qfptr + offsetof (E4_DProcQueueEntry, Desc.dma_srcEvent)); -+ qentry.Desc.dma_dstEvent = elan4_sdram_readq (dev, qfptr + offsetof (E4_DProcQueueEntry, Desc.dma_dstEvent)); -+ -+ EPRINTF4 (DBG_RETRY, "ep4_flush_dmas: %016llx %016llx %016llx %016llx\n", (long long)qentry.Desc.dma_typeSize, -+ (long long)qentry.Desc.dma_cookie, (long long)qentry.Desc.dma_vproc, (long long)qentry.Desc.dma_srcAddr); -+ EPRINTF3 (DBG_RETRY, " %016llx %016llx %016llx\n", (long long)qentry.Desc.dma_dstAddr, -+ (long long)qentry.Desc.dma_srcEvent, (long long)qentry.Desc.dma_dstEvent); -+ -+ ep4_queue_dma_stalled (rail, &qentry.Desc); -+ -+ qentry.Desc.dma_typeSize = DMA_ShMemWrite | dev->dev_ctxt.ctxt_num; -+ qentry.Desc.dma_cookie = 0; -+ qentry.Desc.dma_vproc = 0; -+ qentry.Desc.dma_srcAddr = 0; -+ qentry.Desc.dma_dstAddr = 0; -+ qentry.Desc.dma_srcEvent = 0; -+ qentry.Desc.dma_dstEvent = 0; -+ -+ elan4_sdram_copyq_to_sdram (dev, &qentry, qfptr, sizeof (E4_DProcQueueEntry)); -+ } -+ } -+ -+ qfptr = (qfptr & ~(qsize-1)) | ((qfptr + sizeof (E4_DProcQueueEntry)) & (qsize-1)); -+ qfull = 0; -+ } -+ -+ spin_lock_irqsave (&rail->r_haltop_lock, flags); -+ desc->done = 1; -+ kcondvar_wakeupall (&rail->r_haltop_sleep, &rail->r_haltop_lock); -+ spin_unlock_irqrestore (&rail->r_haltop_lock, flags); -+} -+ -+static void -+ep4_flush_queues_haltop (ELAN4_DEV *dev, void *arg) -+{ -+ struct flush_queues_desc *desc = (struct flush_queues_desc *) arg; -+ -+ elan4_queue_dma_flushop (dev, &desc->rail->r_flushop, 1); -+} -+ -+void -+ep4_flush_queues (EP_RAIL *r) -+{ -+ EP4_RAIL *rail = (EP4_RAIL *) r; -+ struct flush_queues_desc desc; -+ struct list_head *el, *nel; -+ unsigned long flags; -+ int i; -+ -+ /* initialise descriptor */ -+ desc.rail = rail; -+ desc.done = 0; -+ -+ /* First - stall the dma retry thread, so that it will no longer restart -+ * any dma's from the retry list */ -+ ep_kthread_stall (&rail->r_retry_thread); -+ -+ /* Second - flush through all command queues targetted by events, thread etc */ -+ ep4_flush_ecqs (rail); -+ -+ /* Third - queue a halt operation to flush through all DMA's which are executing -+ * or on the run queues */ -+ kmutex_lock (&rail->r_haltop_mutex); -+ -+ rail->r_haltop.op_mask = INT_DProcHalted; -+ rail->r_haltop.op_function = ep4_flush_queues_haltop; -+ rail->r_haltop.op_arg = &desc; -+ -+ rail->r_flushop.op_function = ep4_flush_queues_flushop; -+ rail->r_flushop.op_arg = &desc; -+ -+ elan4_queue_haltop (rail->r_ctxt.ctxt_dev, &rail->r_haltop); -+ -+ spin_lock_irqsave (&rail->r_haltop_lock, flags); -+ while (! desc.done) -+ kcondvar_wait (&rail->r_haltop_sleep, &rail->r_haltop_lock, &flags); -+ spin_unlock_irqrestore (&rail->r_haltop_lock, flags); -+ kmutex_unlock (&rail->r_haltop_mutex); -+ -+ /* Fourth - run down the dma retry lists and move all entries to the cancelled -+ * list. Any dma's which were on the run queues have already been -+ * moved there */ -+ spin_lock_irqsave (&rail->r_dma_lock, flags); -+ for (i = EP_RETRY_BASE; i < EP_NUM_RETRIES; i++) -+ { -+ list_for_each_safe (el,nel, &rail->r_dma_retrylist[i]) { -+ EP4_DMA_RETRY *retry = list_entry (el, EP4_DMA_RETRY, retry_link); -+ EP_NODE_RAIL *nodeRail = &rail->r_generic.Nodes[EP_VP_TO_NODE(retry->retry_dma.dma_vproc)]; -+ -+ EP4_ASSERT (rail, nodeRail->State >= EP_NODE_CONNECTED && nodeRail->State <= EP_NODE_LOCAL_PASSIVATE); -+ -+ if (nodeRail->State == EP_NODE_LOCAL_PASSIVATE) -+ { -+ list_del (&retry->retry_link); -+ list_add_tail (&retry->retry_link, &nodeRail->StalledDmas); -+ } -+ } -+ } -+ spin_unlock_irqrestore (&rail->r_dma_lock, flags); -+ -+ /* Finally - allow the retry thread to run again */ -+ ep_kthread_resume (&rail->r_retry_thread); -+} -+ -+struct write_qdesc_desc -+{ -+ EP4_RAIL *rail; -+ sdramaddr_t qaddr; -+ E4_InputQueue *qdesc; -+ volatile int done; -+} ; -+ -+static void -+ep4_write_qdesc_haltop (ELAN4_DEV *dev, void *arg) -+{ -+ struct write_qdesc_desc *desc = (struct write_qdesc_desc *) arg; -+ EP4_RAIL *rail = desc->rail; -+ unsigned long flags; -+ -+ elan4_sdram_copyq_to_sdram (dev, desc->qdesc, desc->qaddr, sizeof (E4_InputQueue)); -+ -+ spin_lock_irqsave (&rail->r_haltop_lock, flags); -+ desc->done = 1; -+ kcondvar_wakeupall (&rail->r_haltop_sleep, &rail->r_haltop_lock); -+ spin_unlock_irqrestore (&rail->r_haltop_lock, flags); -+} -+ -+void -+ep4_write_qdesc (EP4_RAIL *rail, sdramaddr_t qaddr, E4_InputQueue *qdesc) -+{ -+ struct write_qdesc_desc desc; -+ unsigned long flags; -+ -+ /* initialise descriptor */ -+ desc.rail = rail; -+ desc.qaddr = qaddr; -+ desc.qdesc = qdesc; -+ desc.done = 0; -+ -+ kmutex_lock (&rail->r_haltop_mutex); -+ -+ rail->r_haltop.op_mask = INT_DiscardingHighPri; -+ rail->r_haltop.op_function = ep4_write_qdesc_haltop; -+ rail->r_haltop.op_arg = &desc; -+ -+ elan4_queue_haltop (rail->r_ctxt.ctxt_dev, &rail->r_haltop); -+ -+ spin_lock_irqsave (&rail->r_haltop_lock, flags); -+ while (! desc.done) -+ kcondvar_wait (&rail->r_haltop_sleep, &rail->r_haltop_lock, &flags); -+ spin_unlock_irqrestore (&rail->r_haltop_lock, flags); -+ -+ kmutex_unlock (&rail->r_haltop_mutex); -+} -+#define CQ_SIZE_NWORDS ((CQ_Size (ecq->ecq_cq->cq_size) >> 3) - 8) /* available number of dwords (less enough to flush) */ -+EP4_ECQ * -+ep4_alloc_ecq (EP4_RAIL *rail, unsigned cqsize) -+{ -+ ELAN4_DEV *dev = rail->r_ctxt.ctxt_dev; -+ EP4_ECQ *ecq; -+ unsigned long pgoff, cqaddr; -+ -+ /* no space available, so allocate a new entry */ -+ KMEM_ZALLOC (ecq, EP4_ECQ *, sizeof (EP4_ECQ), 1); -+ -+ if (ecq == NULL) -+ return 0; -+ -+ if ((ecq->ecq_cq = elan4_alloccq (&rail->r_ctxt, cqsize, CQ_EnableAllBits, CQ_Priority)) == NULL) -+ { -+ KMEM_FREE (ecq, sizeof (EP4_ECQ)); -+ return 0; -+ } -+ -+ pgoff = (ecq->ecq_cq->cq_mapping & (PAGE_SIZE-1)); -+ cqaddr = (ecq->ecq_cq->cq_cqa->cqa_cqnum + ecq->ecq_cq->cq_idx + dev->dev_cqoffset) * CQ_CommandMappingSize; -+ -+ ecq->ecq_addr = ep_rmalloc (rail->r_ecq_rmap, PAGESIZE, 0) + pgoff; -+ ecq->ecq_avail = CQ_SIZE_NWORDS; /* available number of dwords (less enough to flush) */ -+ -+ ecq->ecq_intop.op_function = (ELAN4_HALTFN *) elan4_restartcq; -+ ecq->ecq_intop.op_arg = ecq->ecq_cq; -+ -+ ep4_cport_map (&rail->r_generic, ecq->ecq_addr - pgoff, cqaddr - pgoff, PAGESIZE, EP_PERM_WRITE); -+ -+ spin_lock_init (&ecq->ecq_lock); -+ -+ return ecq; -+} -+ -+void -+ep4_free_ecq (EP4_RAIL *rail, EP4_ECQ *ecq) -+{ -+ unsigned long pgoff = (ecq->ecq_cq->cq_mapping & (PAGE_SIZE-1)); -+ -+ spin_lock_destroy (&ecq->ecq_lock); -+ -+ ep4_unmap (&rail->r_generic, ecq->ecq_addr - pgoff, PAGESIZE); -+ ep_rmfree (rail->r_ecq_rmap, PAGESIZE, ecq->ecq_addr - pgoff); -+ -+ elan4_freecq (&rail->r_ctxt, ecq->ecq_cq); -+ -+ KMEM_FREE (ecq, sizeof (EP4_ECQ)); -+} -+ -+EP4_ECQ * -+ep4_get_ecq (EP4_RAIL *rail, unsigned which, unsigned ndwords) -+{ -+ ELAN4_DEV *dev = rail->r_ctxt.ctxt_dev; -+ struct list_head *el; -+ unsigned long flags; -+ EP4_ECQ *ecq; -+ -+ spin_lock_irqsave (&rail->r_ecq_lock, flags); -+ list_for_each (el, &rail->r_ecq_list[which]) { -+ EP4_ECQ *ecq = list_entry (el, EP4_ECQ, ecq_link); -+ -+ if (ecq->ecq_avail >= ndwords) -+ { -+ ecq->ecq_avail -= ndwords; -+ -+ spin_unlock_irqrestore (&rail->r_ecq_lock, flags); -+ -+ return ecq; -+ } -+ } -+ spin_unlock_irqrestore (&rail->r_ecq_lock, flags); -+ -+ if ((ecq = ep4_alloc_ecq (rail, EP4_ECQ_Size (which))) == NULL) -+ return NULL; -+ -+ if (which == EP4_ECQ_EVENT) -+ { -+ if ((ecq->ecq_event = ep_alloc_elan (&rail->r_generic, sizeof (E4_Event32), 0, &ecq->ecq_event_addr)) == 0) -+ { -+ ep4_free_ecq (rail, ecq); -+ return NULL; -+ } -+ -+ elan4_sdram_writeq (dev, ecq->ecq_event + offsetof (E4_Event32, ev_CountAndType), -+ E4_EVENT_INIT_VALUE (0, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0)); -+ elan4_sdram_writeq (dev, ecq->ecq_event + offsetof (E4_Event32, ev_WritePtr), -+ ecq->ecq_addr); -+ elan4_sdram_writeq (dev, ecq->ecq_event + offsetof (E4_Event32, ev_WriteValue), -+ SET_EVENT_CMD | (rail->r_elan_addr + offsetof (EP4_RAIL_ELAN, r_flush_event))); -+ -+ if ((ecq->ecq_flushcq = ep4_get_ecq (rail, EP4_ECQ_SINGLE, 1)) == NULL) -+ { -+ ep_free_elan (&rail->r_generic, ecq->ecq_event_addr, sizeof (E4_Event32)); -+ ep4_free_ecq (rail, ecq); -+ return NULL; -+ } -+ } -+ -+ spin_lock_irqsave (&rail->r_ecq_lock, flags); -+ list_add (&ecq->ecq_link, &rail->r_ecq_list[which]); -+ -+ ecq->ecq_avail -= ndwords; -+ spin_unlock_irqrestore (&rail->r_ecq_lock, flags); -+ -+ return ecq; -+} -+ -+void -+ep4_put_ecq (EP4_RAIL *rail, EP4_ECQ *ecq, unsigned ndwords) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave (&rail->r_ecq_lock, flags); -+ -+ ecq->ecq_avail += ndwords; -+ -+ if (ecq->ecq_avail != CQ_SIZE_NWORDS) -+ spin_unlock_irqrestore (&rail->r_ecq_lock, flags); -+ else -+ { -+ list_del (&ecq->ecq_link); -+ spin_unlock_irqrestore (&rail->r_ecq_lock, flags); -+ -+ if (ecq->ecq_flushcq) -+ ep4_put_ecq (rail, ecq->ecq_flushcq, 1); -+ if (ecq->ecq_event_addr) -+ ep_free_elan (&rail->r_generic, ecq->ecq_event_addr, sizeof (E4_Event32)); -+ -+ ep4_free_ecq (rail, ecq); -+ } -+} -+ -+void -+ep4_nop_cmd (EP4_ECQ *ecq, E4_uint64 tag) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave (&ecq->ecq_lock, flags); -+ elan4_nop_cmd (ecq->ecq_cq, tag); -+ spin_unlock_irqrestore (&ecq->ecq_lock, flags); -+ -+} -+ -+void -+ep4_set_event_cmd (EP4_ECQ *ecq, E4_Addr event) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave (&ecq->ecq_lock, flags); -+ elan4_set_event_cmd (ecq->ecq_cq, event); -+ spin_unlock_irqrestore (&ecq->ecq_lock, flags); -+} -+ -+void -+ep4_wait_event_cmd (EP4_ECQ *ecq, E4_Addr event, E4_uint64 candt, E4_uint64 param0, E4_uint64 param1) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave (&ecq->ecq_lock, flags); -+ elan4_wait_event_cmd (ecq->ecq_cq, event, candt, param0, param1); -+ spin_unlock_irqrestore (&ecq->ecq_lock, flags); -+} -+ -+void -+ep4_flush_interrupt (EP4_RAIL *rail, void *arg) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave (&rail->r_ecq_lock, flags); -+ rail->r_flush_count = 0; -+ kcondvar_wakeupone (&rail->r_flush_sleep, &rail->r_ecq_lock); -+ spin_unlock_irqrestore (&rail->r_ecq_lock, flags); -+} -+ -+void -+ep4_flush_ecqs (EP4_RAIL *rail) -+{ -+ ELAN4_DEV *dev = rail->r_ctxt.ctxt_dev; -+ struct list_head *el; -+ unsigned long flags; -+ int i; -+ -+ kmutex_lock (&rail->r_flush_mutex); -+ -+ EP4_SDRAM_ASSERT (rail, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_flush_event), E4_EVENT_INIT_VALUE (0, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG,0)); -+ -+ spin_lock_irqsave (&rail->r_ecq_lock, flags); -+ /* first flush all the "event" queues */ -+ list_for_each (el, &rail->r_ecq_list[EP4_ECQ_EVENT]) { -+ EP4_ECQ *ecq = list_entry (el, EP4_ECQ, ecq_link); -+ -+ elan4_sdram_writeq (dev, ecq->ecq_event + offsetof (E4_Event32, ev_CountAndType), -+ E4_EVENT_INIT_VALUE (-32, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0)); -+ -+ ep4_set_event_cmd (ecq->ecq_flushcq, ecq->ecq_event_addr); -+ -+ rail->r_flush_count++; -+ } -+ -+ /* next issue the setevents to all the other queues */ -+ for (i = EP4_ECQ_ATOMIC; i r_ecq_list[i]) { -+ EP4_ECQ *ecq = list_entry (el, EP4_ECQ, ecq_link); -+ -+ ep4_set_event_cmd (ecq, rail->r_elan_addr + offsetof (EP4_RAIL_ELAN, r_flush_event)); -+ -+ rail->r_flush_count++; -+ } -+ } -+ -+ /* issue the waitevent command */ -+ ep4_wait_event_cmd (rail->r_flush_mcq, rail->r_elan_addr + offsetof (EP4_RAIL_ELAN, r_flush_event), -+ E4_EVENT_INIT_VALUE (-32 * rail->r_flush_count, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG,0), -+ rail->r_flush_ecq->ecq_addr, -+ INTERRUPT_CMD | (rail->r_flush_intcookie.int_val << E4_MAIN_INT_SHIFT)); -+ -+ while (rail->r_flush_count) -+ if (kcondvar_timedwait (&rail->r_flush_sleep, &rail->r_ecq_lock, &flags, (lbolt + (HZ*10))) == -1) -+ elan4_hardware_lock_check(dev, "flush_ecqs"); -+ -+ spin_unlock_irqrestore (&rail->r_ecq_lock, flags); -+ -+ EP4_SDRAM_ASSERT (rail, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_flush_event), E4_EVENT_INIT_VALUE (0, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG,0)); -+ -+ kmutex_unlock (&rail->r_flush_mutex); -+} -+ -+void -+ep4_init_thread (EP4_RAIL *rail, E4_ThreadRegs *regs, sdramaddr_t stackTop, -+ EP_ADDR stackAddr, E4_Addr startpc, int nargs,...) -+{ -+ sdramaddr_t sp = stackTop - roundup (nargs * sizeof (E4_uint64), E4_STACK_ALIGN); -+ int i; -+ va_list ap; -+ -+ /* -+ * the thread start code expects the following : -+ * %r1 = stack pointer -+ * %r6 = frame pointer -+ * %r2 = function to call -+ * -+ * function args are store on stack above %sp -+ */ -+ -+ va_start(ap, nargs); -+ for (i = 0; i < nargs; i++) -+ elan4_sdram_writeq (rail->r_ctxt.ctxt_dev, sp + (i * sizeof (E4_uint64)), va_arg (ap, E4_uint64)); -+ va_end (ap); -+ -+ regs->Registers[0] = ep_symbol (&rail->r_threadcode, ".thread_start"); /* %r0 - PC */ -+ regs->Registers[1] = stackAddr - (stackTop - sp); /* %r1 - stack pointer */ -+ regs->Registers[2] = startpc; /* %r2 - start pc */ -+ regs->Registers[3] = 0; -+ regs->Registers[4] = 0; -+ regs->Registers[5] = 0; -+ regs->Registers[6] = stackTop; /* %r6 - frame pointer */ -+} -+ -+/* retransmission thread */ -+ -+void -+ep4_add_retry_ops (EP4_RAIL *rail, EP4_RETRY_OPS *ops) -+{ -+ ep_kthread_stall (&rail->r_retry_thread); -+ list_add_tail (&ops->op_link, &rail->r_retry_ops); -+ ep_kthread_resume (&rail->r_retry_thread); -+} -+ -+void -+ep4_remove_retry_ops (EP4_RAIL *rail, EP4_RETRY_OPS *ops) -+{ -+ ep_kthread_stall (&rail->r_retry_thread); -+ list_del (&ops->op_link); -+ ep_kthread_resume (&rail->r_retry_thread); -+} -+ -+void -+ep4_retry_thread (EP4_RAIL *rail) -+{ -+ struct list_head *el; -+ -+ kernel_thread_init ("ep4_retry"); -+ -+ for (;;) -+ { -+ long nextRunTime = 0; -+ -+ list_for_each (el, &rail->r_retry_ops) { -+ EP4_RETRY_OPS *ops = list_entry (el, EP4_RETRY_OPS, op_link); -+ -+ nextRunTime = ops->op_func (rail, ops->op_arg, nextRunTime); -+ } -+ -+ if (ep_kthread_sleep (&rail->r_retry_thread, nextRunTime) < 0) -+ break; -+ } -+ -+ ep_kthread_stopped (&rail->r_retry_thread); -+ -+ kernel_thread_exit(); -+} -+ -+/* DMA retransmission */ -+static unsigned ep4_dma_retry_times[EP_NUM_RETRIES]; -+ -+static unsigned long -+ep4_retry_dmas (EP4_RAIL *rail, void *arg, unsigned long nextRunTime) -+{ -+ unsigned long yieldAt = lbolt + (hz/10); -+ unsigned long flags; -+ int i; -+ -+ for (i = EP_RETRY_BASE; i < EP_NUM_RETRIES; i++) -+ { -+ while (! list_empty (&rail->r_dma_retrylist[i])) -+ { -+ EP4_DMA_RETRY *retry = list_entry (rail->r_dma_retrylist[i].next, EP4_DMA_RETRY, retry_link); -+ -+ if (! AFTER(lbolt, retry->retry_time)) -+ break; -+ -+ if (ep_kthread_should_stall (&rail->r_retry_thread) || AFTER (lbolt, yieldAt)) -+ goto cant_do_more; -+ -+ EPRINTF3 (DBG_RETRY, "%s: ep4_retry_dmas: flowcnt %llx %llx\n", rail->r_generic.Name, (long long)rail->r_dma_flowcnt, (long long)rail->r_main->r_dma_flowcnt); -+ -+ if ((rail->r_dma_flowcnt - rail->r_main->r_dma_flowcnt) > EP4_DMA_RETRY_FLOWCNT) -+ { -+ printk ("ep4_retry_dmas: flowcnt %llx %llx\n", (long long)rail->r_dma_flowcnt, (long long)rail->r_main->r_dma_flowcnt); -+ -+ goto cant_do_more; -+ } -+ -+ EPRINTF4 (DBG_RETRY, "%s: ep4_retry_dmas: %016llx %016llx %016llx\n", rail->r_generic.Name, -+ (long long)retry->retry_dma.dma_typeSize, (long long)retry->retry_dma.dma_cookie, (long long)retry->retry_dma.dma_vproc); -+ EPRINTF5 (DBG_RETRY, "%s: %016llx %016llx %016llx %016llx\n", rail->r_generic.Name, -+ (long long)retry->retry_dma.dma_srcAddr, (long long)retry->retry_dma.dma_dstAddr, (long long)retry->retry_dma.dma_srcEvent, -+ (long long)retry->retry_dma.dma_dstEvent); -+ -+ elan4_run_dma_cmd (rail->r_dma_ecq->ecq_cq, &retry->retry_dma); -+ elan4_write_dword_cmd (rail->r_dma_ecq->ecq_cq, rail->r_main_addr + offsetof (EP4_RAIL_MAIN, r_dma_flowcnt), ++rail->r_dma_flowcnt); -+ -+ spin_lock_irqsave (&rail->r_dma_lock, flags); -+ list_del (&retry->retry_link); -+ list_add (&retry->retry_link, &rail->r_dma_freelist); -+ spin_unlock_irqrestore (&rail->r_dma_lock, flags); -+ } -+ } -+ cant_do_more: -+ -+ /* re-compute the next retry time */ -+ for (i = EP_RETRY_BASE; i < EP_NUM_RETRIES; i++) -+ { -+ if (! list_empty (&rail->r_dma_retrylist[i])) -+ { -+ EP4_DMA_RETRY *retry = list_entry (rail->r_dma_retrylist[i].next, EP4_DMA_RETRY, retry_link); -+ -+ SET_NEXT_RUN_TIME (nextRunTime, retry->retry_time); -+ } -+ } -+ -+ return nextRunTime; -+} -+ -+void -+ep4_initialise_dma_retries (EP4_RAIL *rail) -+{ -+ int i; -+ -+ spin_lock_init (&rail->r_dma_lock); -+ -+ for (i = 0; i < EP_NUM_RETRIES; i++) -+ INIT_LIST_HEAD (&rail->r_dma_retrylist[i]); -+ -+ INIT_LIST_HEAD (&rail->r_dma_freelist); -+ -+ rail->r_dma_ecq = ep4_alloc_ecq (rail, EP4_DMA_RETRY_CQSIZE); -+ -+ rail->r_dma_allocated = 0; -+ rail->r_dma_reserved = 0; -+ -+ ep4_dma_retry_times[EP_RETRY_HIGH_PRI] = EP_RETRY_HIGH_PRI_TIME; -+ -+ for (i =0 ; i < EP_NUM_BACKOFF; i++) -+ ep4_dma_retry_times[EP_RETRY_HIGH_PRI_RETRY+i] = EP_RETRY_HIGH_PRI_TIME << i; -+ -+ ep4_dma_retry_times[EP_RETRY_LOW_PRI] = EP_RETRY_LOW_PRI_TIME; -+ -+ for (i =0 ; i < EP_NUM_BACKOFF; i++) -+ ep4_dma_retry_times[EP_RETRY_LOW_PRI_RETRY+i] = EP_RETRY_LOW_PRI_TIME << i; -+ -+ ep4_dma_retry_times[EP_RETRY_ANONYMOUS] = EP_RETRY_ANONYMOUS_TIME; -+ ep4_dma_retry_times[EP_RETRY_NETERR] = EP_RETRY_NETERR_TIME; -+ -+ rail->r_dma_ops.op_func = ep4_retry_dmas; -+ rail->r_dma_ops.op_arg = NULL; -+ -+ ep4_add_retry_ops (rail, &rail->r_dma_ops); -+} -+ -+void -+ep4_finalise_dma_retries (EP4_RAIL *rail) -+{ -+ ep4_remove_retry_ops (rail, &rail->r_dma_ops); -+ -+ /* Everyone should have given back their retry dma's by now */ -+ EP4_ASSERT (rail, rail->r_dma_reserved == 0); -+ -+ while (! list_empty (&rail->r_dma_freelist)) -+ { -+ EP4_DMA_RETRY *retry = list_entry (rail->r_dma_freelist.next, EP4_DMA_RETRY, retry_link); -+ -+ list_del (&retry->retry_link); -+ -+ KMEM_FREE (retry, sizeof (EP4_DMA_RETRY)); -+ } -+ -+ ep4_free_ecq (rail, rail->r_dma_ecq); -+ -+ spin_lock_destroy (&rail->r_dma_lock); -+} -+ -+int -+ep4_reserve_dma_retries (EP4_RAIL *rail, unsigned int count, EP_ATTRIBUTE attr) -+{ -+ EP4_DMA_RETRY *retry; -+ unsigned int remaining = count; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&rail->r_dma_lock, flags); -+ -+ if (remaining <= (rail->r_dma_allocated - rail->r_dma_reserved)) -+ { -+ rail->r_dma_reserved += remaining; -+ -+ spin_unlock_irqrestore (&rail->r_dma_lock, flags); -+ -+ return 0; -+ } -+ -+ remaining -= (rail->r_dma_allocated - rail->r_dma_reserved); -+ -+ rail->r_dma_reserved = rail->r_dma_allocated; -+ -+ spin_unlock_irqrestore (&rail->r_dma_lock, flags); -+ -+ while (remaining > 0) -+ { -+ KMEM_ALLOC (retry, EP4_DMA_RETRY *, sizeof (EP4_DMA_RETRY), !(attr & EP_NO_SLEEP)); -+ -+ if (retry == NULL) -+ goto failed; -+ -+ remaining--; -+ -+ spin_lock_irqsave (&rail->r_dma_lock, flags); -+ list_add (&retry->retry_link, &rail->r_dma_freelist); -+ -+ rail->r_dma_allocated++; -+ rail->r_dma_reserved++; -+ spin_unlock_irqrestore (&rail->r_dma_lock, flags); -+ } -+ -+ return 0; -+ -+ failed: -+ spin_lock_irqsave (&rail->r_dma_lock, flags); -+ rail->r_dma_reserved -= (count - remaining); -+ spin_unlock_irqrestore (&rail->r_dma_lock, flags); -+ -+ return 1; -+} -+ -+void -+ep4_release_dma_retries (EP4_RAIL *rail, unsigned int count) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave (&rail->r_dma_lock, flags); -+ rail->r_dma_reserved -= count; -+ spin_unlock_irqrestore (&rail->r_dma_lock, flags); -+} -+ -+void -+ep4_queue_dma_retry (EP4_RAIL *rail, E4_DMA *dma, int interval) -+{ -+ EP4_DMA_RETRY *retry; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&rail->r_dma_lock, flags); -+ -+ EP4_ASSERT (rail, !list_empty (&rail->r_dma_freelist)); -+ -+ /* take an item of the free list */ -+ retry = list_entry (rail->r_dma_freelist.next, EP4_DMA_RETRY, retry_link); -+ -+ list_del (&retry->retry_link); -+ -+ EPRINTF5 (DBG_RETRY, "%s: ep4_queue_dma_retry: %016llx %016llx %016llx %016llx\n", rail->r_generic.Name, -+ (long long)dma->dma_typeSize, (long long)dma->dma_cookie, (long long)dma->dma_vproc, (long long)dma->dma_srcAddr); -+ EPRINTF5 (DBG_RETRY, "%s: %016llx %016llx %016llx (%d)\n", rail->r_generic.Name, -+ (long long)dma->dma_dstAddr, (long long)dma->dma_srcEvent, (long long)dma->dma_dstEvent, interval); -+ -+ retry->retry_dma.dma_typeSize = dma->dma_typeSize; -+ retry->retry_dma.dma_cookie = dma->dma_cookie; -+ retry->retry_dma.dma_vproc = dma->dma_vproc; -+ retry->retry_dma.dma_srcAddr = dma->dma_srcAddr; -+ retry->retry_dma.dma_dstAddr = dma->dma_dstAddr; -+ retry->retry_dma.dma_srcEvent = dma->dma_srcEvent; -+ retry->retry_dma.dma_dstEvent = dma->dma_dstEvent; -+ -+ retry->retry_time = lbolt + ep4_dma_retry_times[interval]; -+ -+ /* chain onto the end of the approriate retry list */ -+ list_add_tail (&retry->retry_link, &rail->r_dma_retrylist[interval]); -+ -+ ep_kthread_schedule (&rail->r_retry_thread, retry->retry_time); -+ -+ spin_unlock_irqrestore (&rail->r_dma_lock, flags); -+} -+ -+void -+ep4_queue_dma_stalled (EP4_RAIL *rail, E4_DMA *dma) -+{ -+ EP_NODE_RAIL *nodeRail = &rail->r_generic.Nodes[EP_VP_TO_NODE(dma->dma_vproc)]; -+ EP4_DMA_RETRY *retry; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&rail->r_dma_lock, flags); -+ -+ EP4_ASSERT (rail, !list_empty (&rail->r_dma_freelist)); -+ -+ /* take an item of the free list */ -+ retry = list_entry (rail->r_dma_freelist.next, EP4_DMA_RETRY, retry_link); -+ -+ list_del (&retry->retry_link); -+ -+ EPRINTF5 (DBG_RETRY, "%s: ep4_queue_dma_stalled: %016llx %016llx %016llx %016llx\n", rail->r_generic.Name, -+ (long long)dma->dma_typeSize, (long long)dma->dma_cookie, (long long)dma->dma_vproc, (long long)dma->dma_srcAddr); -+ EPRINTF4 (DBG_RETRY, "%s: %016llx %016llx %016llx\n", rail->r_generic.Name, -+ (long long)dma->dma_dstAddr, (long long)dma->dma_srcEvent, (long long)dma->dma_dstEvent); -+ -+ retry->retry_dma.dma_typeSize = dma->dma_typeSize; -+ retry->retry_dma.dma_cookie = dma->dma_cookie; -+ retry->retry_dma.dma_vproc = dma->dma_vproc; -+ retry->retry_dma.dma_srcAddr = dma->dma_srcAddr; -+ retry->retry_dma.dma_dstAddr = dma->dma_dstAddr; -+ retry->retry_dma.dma_srcEvent = dma->dma_srcEvent; -+ retry->retry_dma.dma_dstEvent = dma->dma_dstEvent; -+ -+ /* chain onto the node cancelled dma list */ -+ list_add_tail (&retry->retry_link, &nodeRail->StalledDmas); -+ -+ spin_unlock_irqrestore (&rail->r_dma_lock, flags); -+} -+ -+void -+ep4_free_stalled_dmas (EP4_RAIL *rail, unsigned int nodeId) -+{ -+ EP_NODE_RAIL *nodeRail = &rail->r_generic.Nodes[nodeId]; -+ struct list_head *el, *nel; -+ unsigned long flags; -+ -+ spin_lock_irqsave (&rail->r_dma_lock, flags); -+ list_for_each_safe (el, nel, &nodeRail->StalledDmas) { -+ list_del (el); -+ list_add (el, &rail->r_dma_freelist); -+ } -+ spin_unlock_irqrestore (&rail->r_dma_lock, flags); -+} -+ -+void -+ep4_display_rail (EP4_RAIL *rail) -+{ -+ ELAN4_DEV *dev = rail->r_ctxt.ctxt_dev; -+ struct list_head *el; -+ register int i; -+ unsigned long flags; -+ -+ ep_debugf (DBG_DEBUG, "%s: vendorid=%x deviceid=%x\n", rail->r_generic.Name, -+ rail->r_generic.Devinfo.dev_vendor_id, rail->r_generic.Devinfo.dev_device_id); -+ -+ spin_lock_irqsave (&rail->r_ecq_lock, flags); -+ for (i = 0; i < EP4_NUM_ECQ; i++) -+ { -+ list_for_each (el, &rail->r_ecq_list[i]) { -+ EP4_ECQ *ecq = list_entry (el, EP4_ECQ, ecq_link); -+ -+ if (i == EP4_ECQ_EVENT) -+ ep_debugf (DBG_DEBUG, " ECQ[%d] ecq=%p cqnum=%d addr=%llx avail=%d event=%llx,%llx,%llx\n", -+ i, ecq, elan4_cq2num (ecq->ecq_cq), (long long)ecq->ecq_addr, ecq->ecq_avail, -+ elan4_sdram_readq (dev, ecq->ecq_event + offsetof (E4_Event32, ev_CountAndType)), -+ elan4_sdram_readq (dev, ecq->ecq_event + offsetof (E4_Event32, ev_WriteValue)), -+ elan4_sdram_readq (dev, ecq->ecq_event + offsetof (E4_Event32, ev_WritePtr))); -+ -+ else -+ ep_debugf (DBG_DEBUG, " ECQ[%d] ecq=%p cqnum=%d addr=%llx avail=%d\n", -+ i, ecq, elan4_cq2num (ecq->ecq_cq), (long long)ecq->ecq_addr, ecq->ecq_avail); -+ } -+ } -+ spin_unlock_irqrestore (&rail->r_ecq_lock, flags); -+ -+ ep_debugf (DBG_DEBUG, " flush count=%ld mcq=%p ecq=%p event %llx.%llx.%llx\n", -+ rail->r_flush_count, rail->r_flush_mcq, rail->r_flush_ecq, -+ elan4_sdram_readq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_flush_event.ev_CountAndType)), -+ elan4_sdram_readq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_flush_event.ev_WritePtr)), -+ elan4_sdram_readq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_flush_event.ev_WriteValue))); -+ -+ spin_lock_irqsave (&rail->r_dma_lock, flags); -+ for (i = 0; i < EP_NUM_RETRIES; i++) -+ { -+ list_for_each (el, &rail->r_dma_retrylist[i]) { -+ EP4_DMA_RETRY *retry = list_entry (el, EP4_DMA_RETRY, retry_link); -+ -+ ep_debugf (DBG_DEBUG, " RETRY[%d] typeSize %llx cookie %llx vproc %llx events %llx %llx\n", -+ i, (long long)retry->retry_dma.dma_typeSize, (long long)retry->retry_dma.dma_cookie, -+ (long long)retry->retry_dma.dma_vproc, (long long)retry->retry_dma.dma_srcEvent, (long long)retry->retry_dma.dma_dstEvent); -+ } -+ } -+ spin_unlock_irqrestore (&rail->r_dma_lock, flags); -+} -diff -urN clean/drivers/net/qsnet/ep/threadcode.c linux-2.6.9/drivers/net/qsnet/ep/threadcode.c ---- clean/drivers/net/qsnet/ep/threadcode.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/threadcode.c 2003-10-07 09:22:38.000000000 -0400 -@@ -0,0 +1,146 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: threadcode.c,v 1.11 2003/10/07 13:22:38 david Exp $" -+/* $Source: /cvs/master/quadrics/epmod/threadcode.c,v $ */ -+ -+#include -+ -+#include -+ -+EP_ADDR -+ep_symbol (EP_CODE *code, char *name) -+{ -+ EP_SYMBOL *s = code->symbols; -+ -+ while (s->name && strcmp (s->name, name)) -+ s++; -+ -+ return (s->name ? s->value : (EP_ADDR) 0); -+} -+ -+int -+ep_loadcode (EP_RAIL *rail, EP_CODE *code) -+{ -+ register int i; -+ -+ EP_ADDR _stext = ep_symbol (code, "_stext"); -+ EP_ADDR _etext = ep_symbol (code, "_etext"); -+ EP_ADDR _sdata = ep_symbol (code, "_sdata"); -+ EP_ADDR _edata = ep_symbol (code, "_edata"); -+ EP_ADDR _end = ep_symbol (code, "_end"); -+ EP_ADDR _rodata = roundup (_etext, sizeof (uint64_t)); -+ -+ if (_stext == (EP_ADDR) 0 || _etext == (EP_ADDR) 0 || -+ _sdata == (EP_ADDR) 0 || _edata == (EP_ADDR) 0 || -+ _end == (EP_ADDR) 0) -+ { -+ printk ("ep_loadcode: symbols not defined correctly for code at %p\n", code); -+ return (EINVAL); -+ } -+ -+ /* -+ * Include the rodata in the text segment -+ */ -+ _etext = _rodata + code->rodata_size; -+ -+ /* -+ * If _etext is in the same page as _sdata, then allocate a contiguous -+ * chunk of memory and map it as read/write. otherwise allocate two chunks -+ * and map the code in as read-only. -+ */ -+ if ((_etext & PAGEMASK) == (_sdata & PAGEMASK)) -+ { -+ code->ntext = btopr (_end - (_stext & PAGEMASK)); -+ code->pptext = ep_alloc_memory_elan (rail, _stext & PAGEMASK, ptob (code->ntext), EP_PERM_EXECUTE, 0); -+ -+ if (code->pptext == (sdramaddr_t) 0) -+ return (ENOMEM); -+ -+ code->_stext = code->pptext + (_stext & PAGEOFFSET); -+ code->_rodata = code->_stext + (_rodata - _stext); -+ code->_sdata = code->_stext + (_sdata - _stext); -+ } -+ else -+ { -+ code->ntext = btopr (_etext - (_stext & PAGEMASK)); -+ code->ndata = btopr (_end - (_sdata & PAGEMASK)); -+ -+ if (code->ntext) -+ { -+ code->pptext = ep_alloc_memory_elan (rail, _stext & PAGEMASK, ptob (code->ntext), EP_PERM_EXECUTE, 0); -+ -+ if (code->pptext == (sdramaddr_t) 0) -+ return (ENOMEM); -+ -+ code->_stext = code->pptext + (_stext & PAGEOFFSET); -+ code->_rodata = code->_stext + (_rodata - _stext); -+ } -+ -+ if (code->ndata) -+ { -+ code->ppdata = ep_alloc_memory_elan (rail, _sdata & PAGEMASK, ptob (code->ndata), EP_PERM_WRITE, 0); -+ -+ if (code->ppdata == (sdramaddr_t) 0) -+ { -+ if (code->ntext) ep_free_memory_elan (rail, _sdata & PAGEMASK); -+ code->ntext = 0; -+ -+ return (ENOMEM); -+ } -+ -+ code->_sdata = code->ppdata + (_sdata & PAGEOFFSET); -+ } -+ } -+ -+#ifdef __LITTLE_ENDIAN__ -+# define Flip 3 -+#else -+# define Flip 0 -+#endif -+ -+ /* -+ * Now copy the text and rodata into the SDRAM -+ * this is linked into the module to be byte -+ * copied to the SDRAM, since we want to copy -+ * with word accesses we have to do the byte -+ * assembly correctly. -+ */ -+ for (i = 0; i < code->text_size; i++) -+ rail->Operations.SdramWriteb (rail, code->_stext + i, code->text[i^Flip]); -+ -+ for (i = 0; i < code->rodata_size; i++) -+ rail->Operations.SdramWriteb (rail, code->_rodata + i, code->rodata[i^Flip]); -+ -+ /* -+ * And the initialised data segment. -+ */ -+ for (i = 0; i < code->data_size; i++) -+ rail->Operations.SdramWriteb (rail, code->_sdata + i, code->data[i^Flip]); -+ -+ return (ESUCCESS); -+} -+ -+void -+ep_unloadcode (EP_RAIL *rail, EP_CODE *code) -+{ -+ EP_ADDR _stext = ep_symbol (code, "_stext"); -+ EP_ADDR _sdata = ep_symbol (code, "_sdata"); -+ -+ if (code->pptext) -+ ep_free_memory_elan (rail, _stext & PAGEMASK); -+ if (code->ppdata) -+ ep_free_memory_elan (rail, _sdata & PAGEMASK); -+ code->pptext = code->ppdata = 0; -+} -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/ep/threadcode_elan3.c linux-2.6.9/drivers/net/qsnet/ep/threadcode_elan3.c ---- clean/drivers/net/qsnet/ep/threadcode_elan3.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/threadcode_elan3.c 2003-10-07 09:22:38.000000000 -0400 -@@ -0,0 +1,85 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: threadcode_elan3.c,v 1.11 2003/10/07 13:22:38 david Exp $" -+/* $Source: /cvs/master/quadrics/epmod/threadcode_elan3.c,v $ */ -+ -+#include -+ -+#include -+ -+#include "kcomm_elan3.h" -+#include "debug.h" -+ -+#include -+ -+E3_Addr -+ep3_init_thread (ELAN3_DEV *dev, -+ E3_Addr fn, /* Elan address of function */ -+ E3_Addr addr, /* Elan address of stack */ -+ sdramaddr_t stack, /* sdram address of stack */ -+ int stackSize, /* stack size (in bytes) */ -+ int nargs, -+ ...) -+{ -+ sdramaddr_t frame; -+ sdramaddr_t regs; -+ sdramaddr_t argsp; -+ int i; -+ va_list ap; -+ -+ /* -+ * Align the stack pointer at the top of the stack and leave space for a stack frame -+ */ -+ stack = ((stack + stackSize) & ~(E3_STACK_ALIGN-1)) - sizeof (E3_Frame); -+ addr = ((addr + stackSize) & ~(E3_STACK_ALIGN-1)) - sizeof (E3_Frame); -+ -+ va_start (ap, nargs); -+ -+ if (nargs > 6) -+ { -+ stack -= (((nargs*sizeof (E3_uint32))+E3_STACK_ALIGN-1) & ~(E3_STACK_ALIGN-1)); -+ addr -= (((nargs*sizeof (E3_uint32))+E3_STACK_ALIGN-1) & ~(E3_STACK_ALIGN-1)); -+ } -+ -+ frame = stack; -+ regs = stack - sizeof (E3_OutsRegs); -+ -+ /* -+ * Initialise the registers, and stack frame. -+ */ -+ elan3_sdram_writel (dev, regs + offsetof (E3_OutsRegs, o[6]), fn); -+ elan3_sdram_writel (dev, regs + offsetof (E3_OutsRegs, o[7]), 0); -+ -+ if (nargs <= 6) -+ { -+ for (i = 0; i < nargs; i++) -+ elan3_sdram_writel (dev, regs + offsetof (E3_OutsRegs, o[i]), va_arg (ap, E3_uint32)); -+ } -+ else -+ { -+ for (i = 0; i < 6; i++) -+ elan3_sdram_writel (dev, regs + offsetof (E3_OutsRegs, o[i]), va_arg (ap, E3_uint32)); -+ -+ for (argsp = frame + offsetof (E3_Frame, fr_argx[0]); i < nargs; i++, argsp += sizeof (E3_uint32)) -+ elan3_sdram_writel (dev, argsp, va_arg (ap, int)); -+ } -+ -+ elan3_sdram_writel (dev, frame + offsetof (E3_Frame, fr_savefp), 0); -+ elan3_sdram_writel (dev, frame + offsetof (E3_Frame, fr_savepc), 0); -+ -+ va_end (ap); -+ -+ return (addr); -+} -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/ep/threadcode_elan3_Linux.c linux-2.6.9/drivers/net/qsnet/ep/threadcode_elan3_Linux.c ---- clean/drivers/net/qsnet/ep/threadcode_elan3_Linux.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/threadcode_elan3_Linux.c 2005-09-07 10:39:44.000000000 -0400 -@@ -0,0 +1,112 @@ -+/* --------------------------------------------------------*/ -+/* MACHINE GENERATED ELAN CODE */ -+#include -+#include -+#include "kcomm_elan3.h" -+static uint32_t threadcode_elan3_text[] = { -+0x80a0239c, 0x00001082, 0x00e0a280, 0x47008002, 0x0020a380, 0x20600288, 0x20200286, 0x43008002, -+0x00000001, 0x0a006081, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, -+0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, -+0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, -+0x00000001, 0x00000001, 0xa800c613, 0xa300c609, 0x0020108a, 0x0080900b, 0x00006885, 0x0580a080, -+0x06008002, 0x02a0a080, 0x06008022, 0xffff0296, 0x04008010, 0xff3f0398, 0x1f008010, 0x00201090, -+0x00007081, 0x1600801c, 0x00000001, 0x60a0239c, 0x00a0a3c0, 0x20a0a3f0, 0x40a0a3e0, 0x00c03f3f, -+0xf8e017be, 0x04e08f80, 0x06008012, 0x00000001, 0x00c01ffc, 0x0000a081, 0x06008010, 0x40a083e0, -+0x14e007be, 0x00c01ffc, 0x0000a081, 0x40a083e0, 0x20a083f0, 0x00a083c0, 0x60a0039c, 0x00e0a280, -+0xbfffbf12, 0x0020a380, 0x03008012, 0x02201090, 0x03201090, 0x08e0c381, 0x80a0039c, 0xe0a0239c, -+0x60a023de, 0x80a0a3e0, 0xa0a0a3f0, 0x080010b8, 0x090010b0, 0x0a0010b2, 0x04000037, 0x402006b4, -+0x50200690, 0x01201092, 0x20a0239c, 0x00a0a3f0, 0x00c03f3f, 0x8ce117be, 0x04e08f80, 0x06008012, -+0x00000001, 0x00c01ff8, 0x0000b081, 0x06008010, 0x00a083f0, 0x14e007be, 0x00c01ff8, 0x0000b081, -+0x00a083f0, 0x20a0039c, 0x582006d0, 0x0020a280, 0x05008002, 0x0900a280, 0x10008002, 0x50200690, -+0xeaffbf30, 0x5c2006d4, 0x18001090, 0x19001092, 0x1b800294, 0x0a201096, 0x8affff7f, 0x05201098, -+0x446026d0, 0x302027f4, 0xdfffbf10, 0x50200690, 0xfdffbf10, 0x446026c0, 0x5c2006e0, 0x0020a480, -+0xf9ffbf06, 0x18001090, 0x19001092, 0x1b000494, 0x14201096, 0x7bffff7f, 0x0a201098, 0x0020a280, -+0xf4ffbf22, 0x486026e0, 0x00007081, 0x1600801c, 0x00000001, 0x60a0239c, 0x00a0a3c0, 0x20a0a3f0, -+0x40a0a3e0, 0x00c03f3f, 0x60e217be, 0x04e08f80, 0x06008012, 0x00000001, 0x00c01ffc, 0x0000a081, -+0x06008010, 0x40a083e0, 0x14e007be, 0x00c01ffc, 0x0000a081, 0x40a083e0, 0x20a083f0, 0x00a083c0, -+0x60a0039c, 0xff3f84a0, 0xe0ffbf1c, 0x18001090, 0xd5ffbf30, 0x60a003de, 0x80a083e0, 0xa0a083f0, -+0x08e0c381, 0xe0a0039c, 0x00a1239c, 0x60a023de, 0x80a0a3e0, 0xa0a0a3f0, 0x44a123d0, 0x090010b0, -+0x0a0010b6, 0x0b0010b8, 0x0c0010b4, 0x012010ba, 0xdca023fa, 0x142007d2, 0x082007d0, 0x084002b2, -+0x000027c0, 0xf42006d0, 0x0020a280, 0x15008032, 0xf42006d0, 0x18200790, 0xdca003d2, 0x20a0239c, -+0x00a0a3f0, 0x00c03f3f, 0x20e317be, 0x04e08f80, 0x06008012, 0x00000001, 0x00c01ff8, 0x0000b081, -+0x06008010, 0x00a083f0, 0x14e007be, 0x00c01ff8, 0x0000b081, 0x00a083f0, 0x20a0039c, 0xf42006d0, -+0x0020a280, 0x0a008022, 0xdca023c0, 0x042007d0, 0x0840a680, 0x06008032, 0xdca023c0, 0x18001082, -+0x0220d091, 0xe1ffbf10, 0xf42006d0, 0x06008010, 0x190010a2, 0x042006d0, 0x00c026d0, 0x18001082, -+0x0020d091, 0x042006d0, 0x01200290, 0x042026d0, 0x000006d0, 0x0020a280, 0x04008002, 0x18001090, -+0x4f010040, 0x1b001092, 0xf02006e0, 0x0020a480, 0xf1ffbf02, 0x40b03611, 0x004004d2, 0x01201290, -+0x0840a280, 0x0e018012, 0x10001096, 0x046004d0, 0x01208a80, 0x33008002, 0xa0200484, 0x0c2610ba, -+0x000024fa, 0x00211090, 0x042024d0, 0x246004d0, 0x80200290, 0x082024d0, 0xec2004d0, 0x00210290, -+0x0c2024d0, 0x102024c4, 0x186004d2, 0x02602a93, 0x098006d0, 0x0001003b, 0x1d000290, 0x098026d0, -+0xc0ff3f3b, 0x1d000a90, 0x44a103fa, 0x606007d2, 0x00680292, 0x09001290, 0x4000003b, 0x1d001290, -+0x142024d0, 0x206004d0, 0x10210290, 0x182024d0, 0x186004d0, 0x02202a91, 0x088006d2, 0x0001003b, -+0x1d400292, 0x088026d2, 0xc0ff3f3b, 0x1d400a92, 0x186004d0, 0x00280290, 0x80000015, 0x0a001290, -+0x08401292, 0x4000003b, 0x1d401292, 0x1c2024d2, 0x01201090, 0xa02024d0, 0x20200496, 0xa8200484, -+0x306004d0, 0x0020a280, 0x2b008012, 0x00201098, 0x0c2610ba, 0x00c022fa, 0x04e022c0, 0xc0200490, -+0x10e022d0, 0x186004d2, 0x02602a93, 0x098006d0, 0x0001003b, 0x1d000290, 0x098026d0, 0xc0ff3f3b, -+0x1d000a90, 0x44a103fa, 0x606007d2, 0x00680292, 0x09001290, 0x4000003b, 0x1d001290, 0x14e022d0, -+0x206004d0, 0x10210290, 0x18e022d0, 0x186004d0, 0x02202a91, 0x088006d2, 0x0001003b, 0x1d400292, -+0x088026d2, 0xc0ff3f3b, 0x1d400a92, 0x186004d0, 0x00280290, 0x80000015, 0x0a001290, 0x08401292, -+0x4000003b, 0x1d401292, 0x1ce022d2, 0x4f008010, 0x0020109a, 0x0c00109a, 0x306004d0, 0x0840a380, -+0x3b00801a, 0xe02004c6, 0x0c2610ba, 0x00c022fa, 0x01202b91, 0x0c000290, 0x02202a91, 0x08400490, -+0x382002d2, 0x04e022d2, 0x342002d0, 0x08e022d0, 0x0ce022c6, 0x10e022c4, 0x186004d0, 0x02202a91, -+0x088006d2, 0x0001003b, 0x1d400292, 0x088026d2, 0xc0ff3f3b, 0x1d400a92, 0x44a103fa, 0x606007d0, -+0x00280290, 0x08401292, 0x4000003b, 0x1d401292, 0x14e022d2, 0x206004d0, 0x10210290, 0x18e022d0, -+0x186004d0, 0x02202a91, 0x088006d4, 0x0001003b, 0x1d800294, 0x088026d4, 0xc0ff3f3b, 0x1d800a94, -+0x186004d0, 0x00280290, 0x80000013, 0x09001290, 0x08801294, 0x4000003b, 0x1d801294, 0x1ce022d4, -+0x01201090, 0x008020d0, 0x04e002d0, 0x08c00086, 0x0840039a, 0x01200398, 0x20e00296, 0x306004d0, -+0x0800a380, 0xc9ffbf0a, 0x08a00084, 0xc0200490, 0xf0ff22d0, 0xe42004d0, 0x0d00a280, 0x0b00801a, -+0x00201098, 0x04008010, 0x10001096, 0x01200398, 0x20e00296, 0x306004d0, 0x0800a380, 0xfcffbf2a, -+0x04e022c0, 0xfc3f109a, 0xe42024da, 0x10001082, 0x186004d0, 0x00280290, 0x08006081, 0x00000001, -+0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, -+0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, -+0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00201098, -+0x0c00109a, 0x142004fa, 0xec00823b, 0x3080d61b, 0x00006891, 0x0420a280, 0x3b008002, 0x0c00a280, -+0x04008002, 0x00000001, 0x0120d091, 0x36008030, 0x7c2006d0, 0x01200290, 0x7c2026d0, 0x782006d0, -+0x0020a280, 0x04008002, 0x78200690, 0x64000040, 0x40e00692, 0xf02004d0, 0x0020a280, 0x03008012, -+0xf02026d0, 0x80e026c0, 0x7c2006d0, 0x40e026d0, 0x046004d0, 0x04208a80, 0x13008002, 0x1100108a, -+0xec2004cc, 0x3fa00b8e, 0x40e0018e, 0x0780239c, 0x0080bbe0, 0x006099e0, 0x00a0b9e0, 0x406099e0, -+0x40a0b9e0, 0x806099e0, 0x80a0b9e0, 0xc06099e0, 0xc0a0b9e0, 0x00809be0, 0x0780039c, 0x0e008010, -+0xec2004d2, 0xec2004cc, 0x3fa00b8e, 0x40e0018e, 0x0780239c, 0x0080bbe0, 0x006099e0, 0x00a0b9e0, -+0x406099e0, 0x40a0b9e0, 0x00809be0, 0x0780039c, 0xec2004d2, 0xe42004d0, 0x886222d0, 0x042006d0, -+0x00c026d0, 0x000007d0, 0x01208a80, 0x05008012, 0x00000001, 0x142027f2, 0x06008010, 0xdca003fa, -+0x142027f2, 0xfe3f0a90, 0x000027d0, 0xdca003fa, 0x016007ba, 0xdca023fa, 0x0c2007d0, 0x0840a680, -+0x04008032, 0x082007d0, 0x03008010, 0x102007f2, 0x084006b2, 0x00007081, 0x1600801c, 0x00000001, -+0x60a0239c, 0x00a0a3c0, 0x20a0a3f0, 0x40a0a3e0, 0x02c03f3f, 0x8ce017be, 0x04e08f80, 0x06008012, -+0x00000001, 0x00c01ffc, 0x0000a081, 0x06008010, 0x40a083e0, 0x14e007be, 0x00c01ffc, 0x0000a081, -+0x40a083e0, 0x20a083f0, 0x00a083c0, 0x60a0039c, 0x042007d0, 0x0840a680, 0xb3febf12, 0x190010a2, -+0x8afebf10, 0xf42006d0, 0x60a003de, 0x80a083e0, 0xa0a083f0, 0x08e0c381, 0x00a1039c, 0x80a0239c, -+0x042002c4, 0x004022c4, 0x18008030, 0x00007081, 0x16008012, 0x00000001, 0x60a0239c, 0x00a0a3c0, -+0x20a0a3f0, 0x40a0a3e0, 0x02c03f3f, 0x24e117be, 0x04e08f80, 0x06008012, 0x00000001, 0x00c01ffc, -+0x0000a081, 0x06008010, 0x40a083e0, 0x14e007be, 0x00c01ffc, 0x0000a081, 0x40a083e0, 0x20a083f0, -+0x00a083c0, 0x60a0039c, 0x000002c4, 0x00a0a080, 0xe7ffbf12, 0x00000001, 0x042002c4, 0x01a00084, -+0x042022c4, 0x000002c4, 0x00a0a080, 0xddffbf12, 0x00000001, 0x08e0c381, 0x80a0039c, }; -+#define threadcode_elan3_text_size 0x97c -+static uint32_t threadcode_elan3_data[] = { -+0}; -+#define threadcode_elan3_data_size 0x0 -+static uint32_t threadcode_elan3_rodata[] = { -+0}; -+#define threadcode_elan3_rodata_size 0x0 -+static EP_SYMBOL threadcode_elan3_symbols[] = { -+ {"__bss_start", 0xff00297c}, -+ {"_edata", 0xff00297c}, -+ {"_end", 0xff002988}, -+ {"_etext", 0xff00097c}, -+ {"_sdata", 0xff00297c}, -+ {"_stext", 0xff000000}, -+ {"ep3_spinblock", 0xff0008dc}, -+ {"ep3comms_rcvr", 0xff0002a8}, -+ {"kcomm_probe", 0xff00013c}, -+ {"r", 0xff00297c}, -+ {"rail", 0xff002984}, -+ {"rm", 0xff002980}, -+ {0, 0}}; -+EP_CODE threadcode_elan3 = { -+ (unsigned char *) threadcode_elan3_text, -+ threadcode_elan3_text_size, -+ (unsigned char *) threadcode_elan3_data, -+ threadcode_elan3_data_size, -+ (unsigned char *) threadcode_elan3_rodata, -+ threadcode_elan3_rodata_size, -+ threadcode_elan3_symbols, -+}; -diff -urN clean/drivers/net/qsnet/ep/threadcode_elan3_Linux.code.dis linux-2.6.9/drivers/net/qsnet/ep/threadcode_elan3_Linux.code.dis ---- clean/drivers/net/qsnet/ep/threadcode_elan3_Linux.code.dis 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/threadcode_elan3_Linux.code.dis 2005-09-07 10:39:44.000000000 -0400 -@@ -0,0 +1,620 @@ -+ -+threadcode_elan3_Linux.code: file format elf32-elan -+ -+Disassembly of section .text: -+ -+ff000000 <_stext>: -+ff000000: 80 a0 23 9c sub %sp, 0x80, %sp -+ff000004: 00 00 10 82 mov %g0, %g1 -+ff000008: 00 e0 a2 80 cmp %o3, 0 -+ff00000c: 47 00 80 02 be ff000128 <_stext+0x128> -+ff000010: 00 20 a3 80 cmp %o4, 0 -+ff000014: 20 60 02 88 add %o1, 0x20, %g4 -+ff000018: 20 20 02 86 add %o0, 0x20, %g3 -+ff00001c: 43 00 80 02 be ff000128 <_stext+0x128> -+ff000020: 00 00 00 01 nop -+ff000024: 0a 00 60 81 open %o2 -+ff000028: 00 00 00 01 nop -+ff00002c: 00 00 00 01 nop -+ff000030: 00 00 00 01 nop -+ff000034: 00 00 00 01 nop -+ff000038: 00 00 00 01 nop -+ff00003c: 00 00 00 01 nop -+ff000040: 00 00 00 01 nop -+ff000044: 00 00 00 01 nop -+ff000048: 00 00 00 01 nop -+ff00004c: 00 00 00 01 nop -+ff000050: 00 00 00 01 nop -+ff000054: 00 00 00 01 nop -+ff000058: 00 00 00 01 nop -+ff00005c: 00 00 00 01 nop -+ff000060: 00 00 00 01 nop -+ff000064: 00 00 00 01 nop -+ff000068: 00 00 00 01 nop -+ff00006c: 00 00 00 01 nop -+ff000070: 00 00 00 01 nop -+ff000074: 00 00 00 01 nop -+ff000078: 00 00 00 01 nop -+ff00007c: 00 00 00 01 nop -+ff000080: 00 00 00 01 nop -+ff000084: 00 00 00 01 nop -+ff000088: a8 00 c6 13 sendtrans 0x3005, [ %o0 ], %o1 -+ff00008c: a3 00 c6 09 sendtrans 0x3005, [ %g3 ], %g4 -+ff000090: 00 20 10 8a clr %g5 -+ff000094: 00 80 90 0b sendtrans 0x8400, %g0, %g5 -+ff000098: 00 00 68 85 close %g2 -+ff00009c: 05 80 a0 80 cmp %g2, %g5 -+ff0000a0: 06 00 80 02 be ff0000b8 <_stext+0xb8> -+ff0000a4: 02 a0 a0 80 cmp %g2, 2 -+ff0000a8: 06 00 80 22 be,a ff0000c0 <_stext+0xc0> -+ff0000ac: ff ff 02 96 add %o3, -1, %o3 -+ff0000b0: 04 00 80 10 b ff0000c0 <_stext+0xc0> -+ff0000b4: ff 3f 03 98 add %o4, -1, %o4 -+ff0000b8: 1f 00 80 10 b ff000134 <_stext+0x134> -+ff0000bc: 00 20 10 90 clr %o0 -+ff0000c0: 00 00 70 81 breaktest -+ff0000c4: 16 00 80 1c bpos ff00011c <_stext+0x11c> -+ff0000c8: 00 00 00 01 nop -+ff0000cc: 60 a0 23 9c sub %sp, 0x60, %sp -+ff0000d0: 00 a0 a3 c0 stblock32 %g0, [ %sp ] -+ff0000d4: 20 a0 a3 f0 stblock32 %i0, [ %sp + 0x20 ] -+ff0000d8: 40 a0 a3 e0 stblock32 %l0, [ %sp + 0x40 ] -+ff0000dc: 00 c0 3f 3f sethi %hi(0xff000000), %i7 -+ff0000e0: f8 e0 17 be or %i7, 0xf8, %i7 ! ff0000f8 <_stext+0xf8> -+ff0000e4: 04 e0 8f 80 btst 4, %i7 -+ff0000e8: 06 00 80 12 bne ff000100 <_stext+0x100> -+ff0000ec: 00 00 00 01 nop -+ff0000f0: 00 c0 1f fc ldd [ %i7 ], %fp -+ff0000f4: 00 00 a0 81 break -+ff0000f8: 06 00 80 10 b ff000110 <_stext+0x110> -+ff0000fc: 40 a0 83 e0 ldblock32 [ %sp + 0x40 ], %l0 -+ff000100: 14 e0 07 be add %i7, 0x14, %i7 -+ff000104: 00 c0 1f fc ldd [ %i7 ], %fp -+ff000108: 00 00 a0 81 break -+ff00010c: 40 a0 83 e0 ldblock32 [ %sp + 0x40 ], %l0 -+ff000110: 20 a0 83 f0 ldblock32 [ %sp + 0x20 ], %i0 -+ff000114: 00 a0 83 c0 ldblock32 [ %sp ], %g0 -+ff000118: 60 a0 03 9c add %sp, 0x60, %sp -+ff00011c: 00 e0 a2 80 cmp %o3, 0 -+ff000120: bf ff bf 12 bne ff00001c <_stext+0x1c> -+ff000124: 00 20 a3 80 cmp %o4, 0 -+ff000128: 03 00 80 12 bne ff000134 <_stext+0x134> -+ff00012c: 02 20 10 90 mov 2, %o0 -+ff000130: 03 20 10 90 mov 3, %o0 -+ff000134: 08 e0 c3 81 retl -+ff000138: 80 a0 03 9c add %sp, 0x80, %sp -+ -+ff00013c : -+ff00013c: e0 a0 23 9c sub %sp, 0xe0, %sp -+ff000140: 60 a0 23 de st %o7, [ %sp + 0x60 ] -+ff000144: 80 a0 a3 e0 stblock32 %l0, [ %sp + 0x80 ] -+ff000148: a0 a0 a3 f0 stblock32 %i0, [ %sp + 0xa0 ] -+ff00014c: 08 00 10 b8 mov %o0, %i4 -+ff000150: 09 00 10 b0 mov %o1, %i0 -+ff000154: 0a 00 10 b2 mov %o2, %i1 -+ff000158: 04 00 00 37 sethi %hi(0x1000), %i3 -+ff00015c: 40 20 06 b4 add %i0, 0x40, %i2 -+ff000160: 50 20 06 90 add %i0, 0x50, %o0 -+ff000164: 01 20 10 92 mov 1, %o1 -+ff000168: 20 a0 23 9c sub %sp, 0x20, %sp -+ff00016c: 00 a0 a3 f0 stblock32 %i0, [ %sp ] -+ff000170: 00 c0 3f 3f sethi %hi(0xff000000), %i7 -+ff000174: 8c e1 17 be or %i7, 0x18c, %i7 ! ff00018c -+ff000178: 04 e0 8f 80 btst 4, %i7 -+ff00017c: 06 00 80 12 bne ff000194 -+ff000180: 00 00 00 01 nop -+ff000184: 00 c0 1f f8 ldd [ %i7 ], %i4 -+ff000188: 00 00 b0 81 waitevent -+ff00018c: 06 00 80 10 b ff0001a4 -+ff000190: 00 a0 83 f0 ldblock32 [ %sp ], %i0 -+ff000194: 14 e0 07 be add %i7, 0x14, %i7 -+ff000198: 00 c0 1f f8 ldd [ %i7 ], %i4 -+ff00019c: 00 00 b0 81 waitevent -+ff0001a0: 00 a0 83 f0 ldblock32 [ %sp ], %i0 -+ff0001a4: 20 a0 03 9c add %sp, 0x20, %sp -+ff0001a8: 58 20 06 d0 ld [ %i0 + 0x58 ], %o0 -+ff0001ac: 00 20 a2 80 cmp %o0, 0 -+ff0001b0: 05 00 80 02 be ff0001c4 -+ff0001b4: 09 00 a2 80 cmp %o0, %o1 -+ff0001b8: 10 00 80 02 be ff0001f8 -+ff0001bc: 50 20 06 90 add %i0, 0x50, %o0 -+ff0001c0: ea ff bf 30 b,a ff000168 -+ff0001c4: 5c 20 06 d4 ld [ %i0 + 0x5c ], %o2 -+ff0001c8: 18 00 10 90 mov %i0, %o0 -+ff0001cc: 19 00 10 92 mov %i1, %o1 -+ff0001d0: 1b 80 02 94 add %o2, %i3, %o2 -+ff0001d4: 0a 20 10 96 mov 0xa, %o3 -+ff0001d8: 8a ff ff 7f call ff000000 <_stext> -+ff0001dc: 05 20 10 98 mov 5, %o4 -+ff0001e0: 44 60 26 d0 st %o0, [ %i1 + 0x44 ] -+ff0001e4: 30 20 27 f4 st %i2, [ %i4 + 0x30 ] -+ff0001e8: df ff bf 10 b ff000164 -+ff0001ec: 50 20 06 90 add %i0, 0x50, %o0 -+ff0001f0: fd ff bf 10 b ff0001e4 -+ff0001f4: 44 60 26 c0 clr [ %i1 + 0x44 ] -+ff0001f8: 5c 20 06 e0 ld [ %i0 + 0x5c ], %l0 -+ff0001fc: 00 20 a4 80 cmp %l0, 0 -+ff000200: f9 ff bf 06 bl ff0001e4 -+ff000204: 18 00 10 90 mov %i0, %o0 -+ff000208: 19 00 10 92 mov %i1, %o1 -+ff00020c: 1b 00 04 94 add %l0, %i3, %o2 -+ff000210: 14 20 10 96 mov 0x14, %o3 -+ff000214: 7b ff ff 7f call ff000000 <_stext> -+ff000218: 0a 20 10 98 mov 0xa, %o4 -+ff00021c: 00 20 a2 80 cmp %o0, 0 -+ff000220: f4 ff bf 22 be,a ff0001f0 -+ff000224: 48 60 26 e0 st %l0, [ %i1 + 0x48 ] -+ff000228: 00 00 70 81 breaktest -+ff00022c: 16 00 80 1c bpos ff000284 -+ff000230: 00 00 00 01 nop -+ff000234: 60 a0 23 9c sub %sp, 0x60, %sp -+ff000238: 00 a0 a3 c0 stblock32 %g0, [ %sp ] -+ff00023c: 20 a0 a3 f0 stblock32 %i0, [ %sp + 0x20 ] -+ff000240: 40 a0 a3 e0 stblock32 %l0, [ %sp + 0x40 ] -+ff000244: 00 c0 3f 3f sethi %hi(0xff000000), %i7 -+ff000248: 60 e2 17 be or %i7, 0x260, %i7 ! ff000260 -+ff00024c: 04 e0 8f 80 btst 4, %i7 -+ff000250: 06 00 80 12 bne ff000268 -+ff000254: 00 00 00 01 nop -+ff000258: 00 c0 1f fc ldd [ %i7 ], %fp -+ff00025c: 00 00 a0 81 break -+ff000260: 06 00 80 10 b ff000278 -+ff000264: 40 a0 83 e0 ldblock32 [ %sp + 0x40 ], %l0 -+ff000268: 14 e0 07 be add %i7, 0x14, %i7 -+ff00026c: 00 c0 1f fc ldd [ %i7 ], %fp -+ff000270: 00 00 a0 81 break -+ff000274: 40 a0 83 e0 ldblock32 [ %sp + 0x40 ], %l0 -+ff000278: 20 a0 83 f0 ldblock32 [ %sp + 0x20 ], %i0 -+ff00027c: 00 a0 83 c0 ldblock32 [ %sp ], %g0 -+ff000280: 60 a0 03 9c add %sp, 0x60, %sp -+ff000284: ff 3f 84 a0 addcc %l0, -1, %l0 -+ff000288: e0 ff bf 1c bpos ff000208 -+ff00028c: 18 00 10 90 mov %i0, %o0 -+ff000290: d5 ff bf 30 b,a ff0001e4 -+ff000294: 60 a0 03 de ld [ %sp + 0x60 ], %o7 -+ff000298: 80 a0 83 e0 ldblock32 [ %sp + 0x80 ], %l0 -+ff00029c: a0 a0 83 f0 ldblock32 [ %sp + 0xa0 ], %i0 -+ff0002a0: 08 e0 c3 81 retl -+ff0002a4: e0 a0 03 9c add %sp, 0xe0, %sp -+ -+ff0002a8 : -+ff0002a8: 00 a1 23 9c sub %sp, 0x100, %sp -+ff0002ac: 60 a0 23 de st %o7, [ %sp + 0x60 ] -+ff0002b0: 80 a0 a3 e0 stblock32 %l0, [ %sp + 0x80 ] -+ff0002b4: a0 a0 a3 f0 stblock32 %i0, [ %sp + 0xa0 ] -+ff0002b8: 44 a1 23 d0 st %o0, [ %sp + 0x144 ] -+ff0002bc: 09 00 10 b0 mov %o1, %i0 -+ff0002c0: 0a 00 10 b6 mov %o2, %i3 -+ff0002c4: 0b 00 10 b8 mov %o3, %i4 -+ff0002c8: 0c 00 10 b4 mov %o4, %i2 -+ff0002cc: 01 20 10 ba mov 1, %i5 -+ff0002d0: dc a0 23 fa st %i5, [ %sp + 0xdc ] -+ff0002d4: 14 20 07 d2 ld [ %i4 + 0x14 ], %o1 -+ff0002d8: 08 20 07 d0 ld [ %i4 + 8 ], %o0 -+ff0002dc: 08 40 02 b2 add %o1, %o0, %i1 -+ff0002e0: 00 00 27 c0 clr [ %i4 ] -+ff0002e4: f4 20 06 d0 ld [ %i0 + 0xf4 ], %o0 -+ff0002e8: 00 20 a2 80 cmp %o0, 0 -+ff0002ec: 15 00 80 32 bne,a ff000340 -+ff0002f0: f4 20 06 d0 ld [ %i0 + 0xf4 ], %o0 -+ff0002f4: 18 20 07 90 add %i4, 0x18, %o0 -+ff0002f8: dc a0 03 d2 ld [ %sp + 0xdc ], %o1 -+ff0002fc: 20 a0 23 9c sub %sp, 0x20, %sp -+ff000300: 00 a0 a3 f0 stblock32 %i0, [ %sp ] -+ff000304: 00 c0 3f 3f sethi %hi(0xff000000), %i7 -+ff000308: 20 e3 17 be or %i7, 0x320, %i7 ! ff000320 -+ff00030c: 04 e0 8f 80 btst 4, %i7 -+ff000310: 06 00 80 12 bne ff000328 -+ff000314: 00 00 00 01 nop -+ff000318: 00 c0 1f f8 ldd [ %i7 ], %i4 -+ff00031c: 00 00 b0 81 waitevent -+ff000320: 06 00 80 10 b ff000338 -+ff000324: 00 a0 83 f0 ldblock32 [ %sp ], %i0 -+ff000328: 14 e0 07 be add %i7, 0x14, %i7 -+ff00032c: 00 c0 1f f8 ldd [ %i7 ], %i4 -+ff000330: 00 00 b0 81 waitevent -+ff000334: 00 a0 83 f0 ldblock32 [ %sp ], %i0 -+ff000338: 20 a0 03 9c add %sp, 0x20, %sp -+ff00033c: f4 20 06 d0 ld [ %i0 + 0xf4 ], %o0 -+ff000340: 00 20 a2 80 cmp %o0, 0 -+ff000344: 0a 00 80 22 be,a ff00036c -+ff000348: dc a0 23 c0 clr [ %sp + 0xdc ] -+ff00034c: 04 20 07 d0 ld [ %i4 + 4 ], %o0 -+ff000350: 08 40 a6 80 cmp %i1, %o0 -+ff000354: 06 00 80 32 bne,a ff00036c -+ff000358: dc a0 23 c0 clr [ %sp + 0xdc ] -+ff00035c: 18 00 10 82 mov %i0, %g1 -+ff000360: 02 20 d0 91 ta 2 -+ff000364: e1 ff bf 10 b ff0002e8 -+ff000368: f4 20 06 d0 ld [ %i0 + 0xf4 ], %o0 -+ff00036c: 06 00 80 10 b ff000384 -+ff000370: 19 00 10 a2 mov %i1, %l1 -+ff000374: 04 20 06 d0 ld [ %i0 + 4 ], %o0 -+ff000378: 00 c0 26 d0 st %o0, [ %i3 ] -+ff00037c: 18 00 10 82 mov %i0, %g1 -+ff000380: 00 20 d0 91 ta 0 -+ff000384: 04 20 06 d0 ld [ %i0 + 4 ], %o0 -+ff000388: 01 20 02 90 inc %o0 -+ff00038c: 04 20 26 d0 st %o0, [ %i0 + 4 ] -+ff000390: 00 00 06 d0 ld [ %i0 ], %o0 -+ff000394: 00 20 a2 80 cmp %o0, 0 -+ff000398: 04 00 80 02 be ff0003a8 -+ff00039c: 18 00 10 90 mov %i0, %o0 -+ff0003a0: 4f 01 00 40 call ff0008dc -+ff0003a4: 1b 00 10 92 mov %i3, %o1 -+ff0003a8: f0 20 06 e0 ld [ %i0 + 0xf0 ], %l0 -+ff0003ac: 00 20 a4 80 cmp %l0, 0 -+ff0003b0: f1 ff bf 02 be ff000374 -+ff0003b4: 40 b0 36 11 sethi %hi(0xdac10000), %o0 -+ff0003b8: 00 40 04 d2 ld [ %l1 ], %o1 -+ff0003bc: 01 20 12 90 or %o0, 1, %o0 -+ff0003c0: 08 40 a2 80 cmp %o1, %o0 -+ff0003c4: 0e 01 80 12 bne ff0007fc -+ff0003c8: 10 00 10 96 mov %l0, %o3 -+ff0003cc: 04 60 04 d0 ld [ %l1 + 4 ], %o0 -+ff0003d0: 01 20 8a 80 btst 1, %o0 -+ff0003d4: 33 00 80 02 be ff0004a0 -+ff0003d8: a0 20 04 84 add %l0, 0xa0, %g2 -+ff0003dc: 0c 26 10 ba mov 0x60c, %i5 -+ff0003e0: 00 00 24 fa st %i5, [ %l0 ] -+ff0003e4: 00 21 10 90 mov 0x100, %o0 -+ff0003e8: 04 20 24 d0 st %o0, [ %l0 + 4 ] -+ff0003ec: 24 60 04 d0 ld [ %l1 + 0x24 ], %o0 -+ff0003f0: 80 20 02 90 add %o0, 0x80, %o0 -+ff0003f4: 08 20 24 d0 st %o0, [ %l0 + 8 ] -+ff0003f8: ec 20 04 d0 ld [ %l0 + 0xec ], %o0 -+ff0003fc: 00 21 02 90 add %o0, 0x100, %o0 -+ff000400: 0c 20 24 d0 st %o0, [ %l0 + 0xc ] -+ff000404: 10 20 24 c4 st %g2, [ %l0 + 0x10 ] -+ff000408: 18 60 04 d2 ld [ %l1 + 0x18 ], %o1 -+ff00040c: 02 60 2a 93 sll %o1, 2, %o1 -+ff000410: 09 80 06 d0 ld [ %i2 + %o1 ], %o0 -+ff000414: 00 01 00 3b sethi %hi(0x40000), %i5 -+ff000418: 1d 00 02 90 add %o0, %i5, %o0 -+ff00041c: 09 80 26 d0 st %o0, [ %i2 + %o1 ] -+ff000420: c0 ff 3f 3b sethi %hi(0xffff0000), %i5 -+ff000424: 1d 00 0a 90 and %o0, %i5, %o0 -+ff000428: 44 a1 03 fa ld [ %sp + 0x144 ], %i5 -+ff00042c: 60 60 07 d2 ld [ %i5 + 0x60 ], %o1 -+ff000430: 00 68 02 92 add %o1, 0x800, %o1 -+ff000434: 09 00 12 90 or %o0, %o1, %o0 -+ff000438: 40 00 00 3b sethi %hi(0x10000), %i5 -+ff00043c: 1d 00 12 90 or %o0, %i5, %o0 -+ff000440: 14 20 24 d0 st %o0, [ %l0 + 0x14 ] -+ff000444: 20 60 04 d0 ld [ %l1 + 0x20 ], %o0 -+ff000448: 10 21 02 90 add %o0, 0x110, %o0 -+ff00044c: 18 20 24 d0 st %o0, [ %l0 + 0x18 ] -+ff000450: 18 60 04 d0 ld [ %l1 + 0x18 ], %o0 -+ff000454: 02 20 2a 91 sll %o0, 2, %o0 -+ff000458: 08 80 06 d2 ld [ %i2 + %o0 ], %o1 -+ff00045c: 00 01 00 3b sethi %hi(0x40000), %i5 -+ff000460: 1d 40 02 92 add %o1, %i5, %o1 -+ff000464: 08 80 26 d2 st %o1, [ %i2 + %o0 ] -+ff000468: c0 ff 3f 3b sethi %hi(0xffff0000), %i5 -+ff00046c: 1d 40 0a 92 and %o1, %i5, %o1 -+ff000470: 18 60 04 d0 ld [ %l1 + 0x18 ], %o0 -+ff000474: 00 28 02 90 add %o0, 0x800, %o0 -+ff000478: 80 00 00 15 sethi %hi(0x20000), %o2 -+ff00047c: 0a 00 12 90 or %o0, %o2, %o0 -+ff000480: 08 40 12 92 or %o1, %o0, %o1 -+ff000484: 40 00 00 3b sethi %hi(0x10000), %i5 -+ff000488: 1d 40 12 92 or %o1, %i5, %o1 -+ff00048c: 1c 20 24 d2 st %o1, [ %l0 + 0x1c ] -+ff000490: 01 20 10 90 mov 1, %o0 -+ff000494: a0 20 24 d0 st %o0, [ %l0 + 0xa0 ] -+ff000498: 20 20 04 96 add %l0, 0x20, %o3 -+ff00049c: a8 20 04 84 add %l0, 0xa8, %g2 -+ff0004a0: 30 60 04 d0 ld [ %l1 + 0x30 ], %o0 -+ff0004a4: 00 20 a2 80 cmp %o0, 0 -+ff0004a8: 2b 00 80 12 bne ff000554 -+ff0004ac: 00 20 10 98 clr %o4 -+ff0004b0: 0c 26 10 ba mov 0x60c, %i5 -+ff0004b4: 00 c0 22 fa st %i5, [ %o3 ] -+ff0004b8: 04 e0 22 c0 clr [ %o3 + 4 ] -+ff0004bc: c0 20 04 90 add %l0, 0xc0, %o0 -+ff0004c0: 10 e0 22 d0 st %o0, [ %o3 + 0x10 ] -+ff0004c4: 18 60 04 d2 ld [ %l1 + 0x18 ], %o1 -+ff0004c8: 02 60 2a 93 sll %o1, 2, %o1 -+ff0004cc: 09 80 06 d0 ld [ %i2 + %o1 ], %o0 -+ff0004d0: 00 01 00 3b sethi %hi(0x40000), %i5 -+ff0004d4: 1d 00 02 90 add %o0, %i5, %o0 -+ff0004d8: 09 80 26 d0 st %o0, [ %i2 + %o1 ] -+ff0004dc: c0 ff 3f 3b sethi %hi(0xffff0000), %i5 -+ff0004e0: 1d 00 0a 90 and %o0, %i5, %o0 -+ff0004e4: 44 a1 03 fa ld [ %sp + 0x144 ], %i5 -+ff0004e8: 60 60 07 d2 ld [ %i5 + 0x60 ], %o1 -+ff0004ec: 00 68 02 92 add %o1, 0x800, %o1 -+ff0004f0: 09 00 12 90 or %o0, %o1, %o0 -+ff0004f4: 40 00 00 3b sethi %hi(0x10000), %i5 -+ff0004f8: 1d 00 12 90 or %o0, %i5, %o0 -+ff0004fc: 14 e0 22 d0 st %o0, [ %o3 + 0x14 ] -+ff000500: 20 60 04 d0 ld [ %l1 + 0x20 ], %o0 -+ff000504: 10 21 02 90 add %o0, 0x110, %o0 -+ff000508: 18 e0 22 d0 st %o0, [ %o3 + 0x18 ] -+ff00050c: 18 60 04 d0 ld [ %l1 + 0x18 ], %o0 -+ff000510: 02 20 2a 91 sll %o0, 2, %o0 -+ff000514: 08 80 06 d2 ld [ %i2 + %o0 ], %o1 -+ff000518: 00 01 00 3b sethi %hi(0x40000), %i5 -+ff00051c: 1d 40 02 92 add %o1, %i5, %o1 -+ff000520: 08 80 26 d2 st %o1, [ %i2 + %o0 ] -+ff000524: c0 ff 3f 3b sethi %hi(0xffff0000), %i5 -+ff000528: 1d 40 0a 92 and %o1, %i5, %o1 -+ff00052c: 18 60 04 d0 ld [ %l1 + 0x18 ], %o0 -+ff000530: 00 28 02 90 add %o0, 0x800, %o0 -+ff000534: 80 00 00 15 sethi %hi(0x20000), %o2 -+ff000538: 0a 00 12 90 or %o0, %o2, %o0 -+ff00053c: 08 40 12 92 or %o1, %o0, %o1 -+ff000540: 40 00 00 3b sethi %hi(0x10000), %i5 -+ff000544: 1d 40 12 92 or %o1, %i5, %o1 -+ff000548: 1c e0 22 d2 st %o1, [ %o3 + 0x1c ] -+ff00054c: 4f 00 80 10 b ff000688 -+ff000550: 00 20 10 9a clr %o5 -+ff000554: 0c 00 10 9a mov %o4, %o5 -+ff000558: 30 60 04 d0 ld [ %l1 + 0x30 ], %o0 -+ff00055c: 08 40 a3 80 cmp %o5, %o0 -+ff000560: 3b 00 80 1a bcc ff00064c -+ff000564: e0 20 04 c6 ld [ %l0 + 0xe0 ], %g3 -+ff000568: 0c 26 10 ba mov 0x60c, %i5 -+ff00056c: 00 c0 22 fa st %i5, [ %o3 ] -+ff000570: 01 20 2b 91 sll %o4, 1, %o0 -+ff000574: 0c 00 02 90 add %o0, %o4, %o0 -+ff000578: 02 20 2a 91 sll %o0, 2, %o0 -+ff00057c: 08 40 04 90 add %l1, %o0, %o0 -+ff000580: 38 20 02 d2 ld [ %o0 + 0x38 ], %o1 -+ff000584: 04 e0 22 d2 st %o1, [ %o3 + 4 ] -+ff000588: 34 20 02 d0 ld [ %o0 + 0x34 ], %o0 -+ff00058c: 08 e0 22 d0 st %o0, [ %o3 + 8 ] -+ff000590: 0c e0 22 c6 st %g3, [ %o3 + 0xc ] -+ff000594: 10 e0 22 c4 st %g2, [ %o3 + 0x10 ] -+ff000598: 18 60 04 d0 ld [ %l1 + 0x18 ], %o0 -+ff00059c: 02 20 2a 91 sll %o0, 2, %o0 -+ff0005a0: 08 80 06 d2 ld [ %i2 + %o0 ], %o1 -+ff0005a4: 00 01 00 3b sethi %hi(0x40000), %i5 -+ff0005a8: 1d 40 02 92 add %o1, %i5, %o1 -+ff0005ac: 08 80 26 d2 st %o1, [ %i2 + %o0 ] -+ff0005b0: c0 ff 3f 3b sethi %hi(0xffff0000), %i5 -+ff0005b4: 1d 40 0a 92 and %o1, %i5, %o1 -+ff0005b8: 44 a1 03 fa ld [ %sp + 0x144 ], %i5 -+ff0005bc: 60 60 07 d0 ld [ %i5 + 0x60 ], %o0 -+ff0005c0: 00 28 02 90 add %o0, 0x800, %o0 -+ff0005c4: 08 40 12 92 or %o1, %o0, %o1 -+ff0005c8: 40 00 00 3b sethi %hi(0x10000), %i5 -+ff0005cc: 1d 40 12 92 or %o1, %i5, %o1 -+ff0005d0: 14 e0 22 d2 st %o1, [ %o3 + 0x14 ] -+ff0005d4: 20 60 04 d0 ld [ %l1 + 0x20 ], %o0 -+ff0005d8: 10 21 02 90 add %o0, 0x110, %o0 -+ff0005dc: 18 e0 22 d0 st %o0, [ %o3 + 0x18 ] -+ff0005e0: 18 60 04 d0 ld [ %l1 + 0x18 ], %o0 -+ff0005e4: 02 20 2a 91 sll %o0, 2, %o0 -+ff0005e8: 08 80 06 d4 ld [ %i2 + %o0 ], %o2 -+ff0005ec: 00 01 00 3b sethi %hi(0x40000), %i5 -+ff0005f0: 1d 80 02 94 add %o2, %i5, %o2 -+ff0005f4: 08 80 26 d4 st %o2, [ %i2 + %o0 ] -+ff0005f8: c0 ff 3f 3b sethi %hi(0xffff0000), %i5 -+ff0005fc: 1d 80 0a 94 and %o2, %i5, %o2 -+ff000600: 18 60 04 d0 ld [ %l1 + 0x18 ], %o0 -+ff000604: 00 28 02 90 add %o0, 0x800, %o0 -+ff000608: 80 00 00 13 sethi %hi(0x20000), %o1 -+ff00060c: 09 00 12 90 or %o0, %o1, %o0 -+ff000610: 08 80 12 94 or %o2, %o0, %o2 -+ff000614: 40 00 00 3b sethi %hi(0x10000), %i5 -+ff000618: 1d 80 12 94 or %o2, %i5, %o2 -+ff00061c: 1c e0 22 d4 st %o2, [ %o3 + 0x1c ] -+ff000620: 01 20 10 90 mov 1, %o0 -+ff000624: 00 80 20 d0 st %o0, [ %g2 ] -+ff000628: 04 e0 02 d0 ld [ %o3 + 4 ], %o0 -+ff00062c: 08 c0 00 86 add %g3, %o0, %g3 -+ff000630: 08 40 03 9a add %o5, %o0, %o5 -+ff000634: 01 20 03 98 inc %o4 -+ff000638: 20 e0 02 96 add %o3, 0x20, %o3 -+ff00063c: 30 60 04 d0 ld [ %l1 + 0x30 ], %o0 -+ff000640: 08 00 a3 80 cmp %o4, %o0 -+ff000644: c9 ff bf 0a bcs ff000568 -+ff000648: 08 a0 00 84 add %g2, 8, %g2 -+ff00064c: c0 20 04 90 add %l0, 0xc0, %o0 -+ff000650: f0 ff 22 d0 st %o0, [ %o3 + -16 ] -+ff000654: e4 20 04 d0 ld [ %l0 + 0xe4 ], %o0 -+ff000658: 0d 00 a2 80 cmp %o0, %o5 -+ff00065c: 0b 00 80 1a bcc ff000688 -+ff000660: 00 20 10 98 clr %o4 -+ff000664: 04 00 80 10 b ff000674 -+ff000668: 10 00 10 96 mov %l0, %o3 -+ff00066c: 01 20 03 98 inc %o4 -+ff000670: 20 e0 02 96 add %o3, 0x20, %o3 -+ff000674: 30 60 04 d0 ld [ %l1 + 0x30 ], %o0 -+ff000678: 08 00 a3 80 cmp %o4, %o0 -+ff00067c: fc ff bf 2a bcs,a ff00066c -+ff000680: 04 e0 22 c0 clr [ %o3 + 4 ] -+ff000684: fc 3f 10 9a mov -4, %o5 -+ff000688: e4 20 24 da st %o5, [ %l0 + 0xe4 ] -+ff00068c: 10 00 10 82 mov %l0, %g1 -+ff000690: 18 60 04 d0 ld [ %l1 + 0x18 ], %o0 -+ff000694: 00 28 02 90 add %o0, 0x800, %o0 -+ff000698: 08 00 60 81 open %o0 -+ff00069c: 00 00 00 01 nop -+ff0006a0: 00 00 00 01 nop -+ff0006a4: 00 00 00 01 nop -+ff0006a8: 00 00 00 01 nop -+ff0006ac: 00 00 00 01 nop -+ff0006b0: 00 00 00 01 nop -+ff0006b4: 00 00 00 01 nop -+ff0006b8: 00 00 00 01 nop -+ff0006bc: 00 00 00 01 nop -+ff0006c0: 00 00 00 01 nop -+ff0006c4: 00 00 00 01 nop -+ff0006c8: 00 00 00 01 nop -+ff0006cc: 00 00 00 01 nop -+ff0006d0: 00 00 00 01 nop -+ff0006d4: 00 00 00 01 nop -+ff0006d8: 00 00 00 01 nop -+ff0006dc: 00 00 00 01 nop -+ff0006e0: 00 00 00 01 nop -+ff0006e4: 00 00 00 01 nop -+ff0006e8: 00 00 00 01 nop -+ff0006ec: 00 00 00 01 nop -+ff0006f0: 00 00 00 01 nop -+ff0006f4: 00 00 00 01 nop -+ff0006f8: 00 00 00 01 nop -+ff0006fc: 00 20 10 98 clr %o4 ! 0 <*ABS*> -+ff000700: 0c 00 10 9a mov %o4, %o5 -+ff000704: 14 20 04 fa ld [ %l0 + 0x14 ], %i5 -+ff000708: ec 00 82 3b sendtrans 0x1007, %o4, %i5 -+ff00070c: 30 80 d6 1b sendtrans 0xb401, [ %l0 ], %o5 -+ff000710: 00 00 68 91 close %o0 -+ff000714: 04 20 a2 80 cmp %o0, 4 -+ff000718: 3b 00 80 02 be ff000804 -+ff00071c: 0c 00 a2 80 cmp %o0, %o4 -+ff000720: 04 00 80 02 be ff000730 -+ff000724: 00 00 00 01 nop -+ff000728: 01 20 d0 91 ta 1 -+ff00072c: 36 00 80 30 b,a ff000804 -+ff000730: 7c 20 06 d0 ld [ %i0 + 0x7c ], %o0 -+ff000734: 01 20 02 90 inc %o0 -+ff000738: 7c 20 26 d0 st %o0, [ %i0 + 0x7c ] -+ff00073c: 78 20 06 d0 ld [ %i0 + 0x78 ], %o0 -+ff000740: 00 20 a2 80 cmp %o0, 0 -+ff000744: 04 00 80 02 be ff000754 -+ff000748: 78 20 06 90 add %i0, 0x78, %o0 -+ff00074c: 64 00 00 40 call ff0008dc -+ff000750: 40 e0 06 92 add %i3, 0x40, %o1 -+ff000754: f0 20 04 d0 ld [ %l0 + 0xf0 ], %o0 -+ff000758: 00 20 a2 80 cmp %o0, 0 -+ff00075c: 03 00 80 12 bne ff000768 -+ff000760: f0 20 26 d0 st %o0, [ %i0 + 0xf0 ] -+ff000764: 80 e0 26 c0 clr [ %i3 + 0x80 ] -+ff000768: 7c 20 06 d0 ld [ %i0 + 0x7c ], %o0 -+ff00076c: 40 e0 26 d0 st %o0, [ %i3 + 0x40 ] -+ff000770: 04 60 04 d0 ld [ %l1 + 4 ], %o0 -+ff000774: 04 20 8a 80 btst 4, %o0 -+ff000778: 13 00 80 02 be ff0007c4 -+ff00077c: 11 00 10 8a mov %l1, %g5 -+ff000780: ec 20 04 cc ld [ %l0 + 0xec ], %g6 -+ff000784: 3f a0 0b 8e and %sp, 0x3f, %g7 -+ff000788: 40 e0 01 8e add %g7, 0x40, %g7 -+ff00078c: 07 80 23 9c sub %sp, %g7, %sp -+ff000790: 00 80 bb e0 stblock64 %l0, [ %sp ] -+ff000794: 00 60 99 e0 ldblock64 [ %g5 ], %l0 -+ff000798: 00 a0 b9 e0 stblock64 %l0, [ %g6 ] -+ff00079c: 40 60 99 e0 ldblock64 [ %g5 + 0x40 ], %l0 -+ff0007a0: 40 a0 b9 e0 stblock64 %l0, [ %g6 + 0x40 ] -+ff0007a4: 80 60 99 e0 ldblock64 [ %g5 + 0x80 ], %l0 -+ff0007a8: 80 a0 b9 e0 stblock64 %l0, [ %g6 + 0x80 ] -+ff0007ac: c0 60 99 e0 ldblock64 [ %g5 + 0xc0 ], %l0 -+ff0007b0: c0 a0 b9 e0 stblock64 %l0, [ %g6 + 0xc0 ] -+ff0007b4: 00 80 9b e0 ldblock64 [ %sp ], %l0 -+ff0007b8: 07 80 03 9c add %sp, %g7, %sp -+ff0007bc: 0e 00 80 10 b ff0007f4 -+ff0007c0: ec 20 04 d2 ld [ %l0 + 0xec ], %o1 -+ff0007c4: ec 20 04 cc ld [ %l0 + 0xec ], %g6 -+ff0007c8: 3f a0 0b 8e and %sp, 0x3f, %g7 -+ff0007cc: 40 e0 01 8e add %g7, 0x40, %g7 -+ff0007d0: 07 80 23 9c sub %sp, %g7, %sp -+ff0007d4: 00 80 bb e0 stblock64 %l0, [ %sp ] -+ff0007d8: 00 60 99 e0 ldblock64 [ %g5 ], %l0 -+ff0007dc: 00 a0 b9 e0 stblock64 %l0, [ %g6 ] -+ff0007e0: 40 60 99 e0 ldblock64 [ %g5 + 0x40 ], %l0 -+ff0007e4: 40 a0 b9 e0 stblock64 %l0, [ %g6 + 0x40 ] -+ff0007e8: 00 80 9b e0 ldblock64 [ %sp ], %l0 -+ff0007ec: 07 80 03 9c add %sp, %g7, %sp -+ff0007f0: ec 20 04 d2 ld [ %l0 + 0xec ], %o1 -+ff0007f4: e4 20 04 d0 ld [ %l0 + 0xe4 ], %o0 -+ff0007f8: 88 62 22 d0 st %o0, [ %o1 + 0x288 ] -+ff0007fc: 04 20 06 d0 ld [ %i0 + 4 ], %o0 -+ff000800: 00 c0 26 d0 st %o0, [ %i3 ] -+ff000804: 00 00 07 d0 ld [ %i4 ], %o0 -+ff000808: 01 20 8a 80 btst 1, %o0 -+ff00080c: 05 00 80 12 bne ff000820 -+ff000810: 00 00 00 01 nop -+ff000814: 14 20 27 f2 st %i1, [ %i4 + 0x14 ] -+ff000818: 06 00 80 10 b ff000830 -+ff00081c: dc a0 03 fa ld [ %sp + 0xdc ], %i5 -+ff000820: 14 20 27 f2 st %i1, [ %i4 + 0x14 ] -+ff000824: fe 3f 0a 90 and %o0, -2, %o0 -+ff000828: 00 00 27 d0 st %o0, [ %i4 ] -+ff00082c: dc a0 03 fa ld [ %sp + 0xdc ], %i5 -+ff000830: 01 60 07 ba inc %i5 -+ff000834: dc a0 23 fa st %i5, [ %sp + 0xdc ] -+ff000838: 0c 20 07 d0 ld [ %i4 + 0xc ], %o0 -+ff00083c: 08 40 a6 80 cmp %i1, %o0 -+ff000840: 04 00 80 32 bne,a ff000850 -+ff000844: 08 20 07 d0 ld [ %i4 + 8 ], %o0 -+ff000848: 03 00 80 10 b ff000854 -+ff00084c: 10 20 07 f2 ld [ %i4 + 0x10 ], %i1 -+ff000850: 08 40 06 b2 add %i1, %o0, %i1 -+ff000854: 00 00 70 81 breaktest -+ff000858: 16 00 80 1c bpos ff0008b0 -+ff00085c: 00 00 00 01 nop -+ff000860: 60 a0 23 9c sub %sp, 0x60, %sp -+ff000864: 00 a0 a3 c0 stblock32 %g0, [ %sp ] -+ff000868: 20 a0 a3 f0 stblock32 %i0, [ %sp + 0x20 ] -+ff00086c: 40 a0 a3 e0 stblock32 %l0, [ %sp + 0x40 ] -+ff000870: 02 c0 3f 3f sethi %hi(0xff000800), %i7 -+ff000874: 8c e0 17 be or %i7, 0x8c, %i7 ! ff00088c -+ff000878: 04 e0 8f 80 btst 4, %i7 -+ff00087c: 06 00 80 12 bne ff000894 -+ff000880: 00 00 00 01 nop -+ff000884: 00 c0 1f fc ldd [ %i7 ], %fp -+ff000888: 00 00 a0 81 break -+ff00088c: 06 00 80 10 b ff0008a4 -+ff000890: 40 a0 83 e0 ldblock32 [ %sp + 0x40 ], %l0 -+ff000894: 14 e0 07 be add %i7, 0x14, %i7 -+ff000898: 00 c0 1f fc ldd [ %i7 ], %fp -+ff00089c: 00 00 a0 81 break -+ff0008a0: 40 a0 83 e0 ldblock32 [ %sp + 0x40 ], %l0 -+ff0008a4: 20 a0 83 f0 ldblock32 [ %sp + 0x20 ], %i0 -+ff0008a8: 00 a0 83 c0 ldblock32 [ %sp ], %g0 -+ff0008ac: 60 a0 03 9c add %sp, 0x60, %sp -+ff0008b0: 04 20 07 d0 ld [ %i4 + 4 ], %o0 -+ff0008b4: 08 40 a6 80 cmp %i1, %o0 -+ff0008b8: b3 fe bf 12 bne ff000384 -+ff0008bc: 19 00 10 a2 mov %i1, %l1 -+ff0008c0: 8a fe bf 10 b ff0002e8 -+ff0008c4: f4 20 06 d0 ld [ %i0 + 0xf4 ], %o0 -+ff0008c8: 60 a0 03 de ld [ %sp + 0x60 ], %o7 -+ff0008cc: 80 a0 83 e0 ldblock32 [ %sp + 0x80 ], %l0 -+ff0008d0: a0 a0 83 f0 ldblock32 [ %sp + 0xa0 ], %i0 -+ff0008d4: 08 e0 c3 81 retl -+ff0008d8: 00 a1 03 9c add %sp, 0x100, %sp -+ -+ff0008dc : -+ff0008dc: 80 a0 23 9c sub %sp, 0x80, %sp -+ff0008e0: 04 20 02 c4 ld [ %o0 + 4 ], %g2 -+ff0008e4: 00 40 22 c4 st %g2, [ %o1 ] -+ff0008e8: 18 00 80 30 b,a ff000948 -+ff0008ec: 00 00 70 81 breaktest -+ff0008f0: 16 00 80 12 bne ff000948 -+ff0008f4: 00 00 00 01 nop -+ff0008f8: 60 a0 23 9c sub %sp, 0x60, %sp -+ff0008fc: 00 a0 a3 c0 stblock32 %g0, [ %sp ] -+ff000900: 20 a0 a3 f0 stblock32 %i0, [ %sp + 0x20 ] -+ff000904: 40 a0 a3 e0 stblock32 %l0, [ %sp + 0x40 ] -+ff000908: 02 c0 3f 3f sethi %hi(0xff000800), %i7 -+ff00090c: 24 e1 17 be or %i7, 0x124, %i7 ! ff000924 -+ff000910: 04 e0 8f 80 btst 4, %i7 -+ff000914: 06 00 80 12 bne ff00092c -+ff000918: 00 00 00 01 nop -+ff00091c: 00 c0 1f fc ldd [ %i7 ], %fp -+ff000920: 00 00 a0 81 break -+ff000924: 06 00 80 10 b ff00093c -+ff000928: 40 a0 83 e0 ldblock32 [ %sp + 0x40 ], %l0 -+ff00092c: 14 e0 07 be add %i7, 0x14, %i7 -+ff000930: 00 c0 1f fc ldd [ %i7 ], %fp -+ff000934: 00 00 a0 81 break -+ff000938: 40 a0 83 e0 ldblock32 [ %sp + 0x40 ], %l0 -+ff00093c: 20 a0 83 f0 ldblock32 [ %sp + 0x20 ], %i0 -+ff000940: 00 a0 83 c0 ldblock32 [ %sp ], %g0 -+ff000944: 60 a0 03 9c add %sp, 0x60, %sp -+ff000948: 00 00 02 c4 ld [ %o0 ], %g2 -+ff00094c: 00 a0 a0 80 cmp %g2, 0 -+ff000950: e7 ff bf 12 bne ff0008ec -+ff000954: 00 00 00 01 nop -+ff000958: 04 20 02 c4 ld [ %o0 + 4 ], %g2 -+ff00095c: 01 a0 00 84 inc %g2 -+ff000960: 04 20 22 c4 st %g2, [ %o0 + 4 ] -+ff000964: 00 00 02 c4 ld [ %o0 ], %g2 -+ff000968: 00 a0 a0 80 cmp %g2, 0 -+ff00096c: dd ff bf 12 bne ff0008e0 -+ff000970: 00 00 00 01 nop -+ff000974: 08 e0 c3 81 retl -+ff000978: 80 a0 03 9c add %sp, 0x80, %sp -+Disassembly of section .data: -diff -urN clean/drivers/net/qsnet/ep/threadcode_elan4_Linux.c linux-2.6.9/drivers/net/qsnet/ep/threadcode_elan4_Linux.c ---- clean/drivers/net/qsnet/ep/threadcode_elan4_Linux.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/threadcode_elan4_Linux.c 2005-09-07 10:39:44.000000000 -0400 -@@ -0,0 +1,107 @@ -+/* --------------------------------------------------------*/ -+/* MACHINE GENERATED ELAN CODE */ -+#include -+#include -+#include "kcomm_elan4.h" -+static uint32_t threadcode_elan4_text[] = { -+0x00a00087, 0xc04060cb, 0x00003080, 0x80001080, 0x02606180, 0x02004032, 0x807f60cb, 0x04606180, -+0x02004032, 0x407f60d3, 0x08606180, 0x02004032, 0x007f60db, 0x10606180, 0x02004032, 0xc07e60e3, -+0x20606180, 0x02004032, 0x807e60eb, 0x40606180, 0x02004032, 0x407e60f3, 0x80606180, 0x02004032, -+0x007e60fb, 0x40001180, 0xc3801080, 0xc07f60c3, 0x20002000, 0x20002000, 0x20002000, 0x20002000, -+0x407f8001, 0x6860c0c7, 0x4060e0d1, 0x00208490, 0x00208080, 0x00208080, 0x6060c0d4, 0x00208292, -+0x00608290, 0x00a08294, 0xff3f8088, 0x1c381293, 0xc00044c8, 0x13004291, 0xc000c5d1, 0xc00044c8, -+0x20381288, 0x0020b200, 0x0e004003, 0x01608408, 0x00001088, 0x04204288, 0x0020b200, 0x04004003, -+0x00208080, 0x74010040, 0x00a08488, 0xc00044c8, 0x20381288, 0x0020b200, 0xf6ff7f13, 0x01608408, -+0x10161282, 0x800094c2, 0xc00044c8, 0x20381288, 0x0020b200, 0xe7ff7f13, 0x00208080, 0x686040c7, -+0x406060d1, 0x606040d4, 0x08e00180, 0xc0608001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, -+0xc07e8001, 0xc060c0c7, 0x4060e0d3, 0x00208490, 0x00208080, 0x00208080, 0x8060e0db, 0x00208698, -+0x00208080, 0x00208080, 0x4061c0c8, 0x00608295, 0x00a0829b, 0x5861c0cb, 0x6061c0cc, 0x6861c0cd, -+0x0120809c, 0x08e042d1, 0x1c00900a, 0x05b4128a, 0x606140c8, 0x586140cb, 0x58010040, 0x18e042c9, -+0x0020809c, 0x586140cd, 0xc04043c8, 0x0840b400, 0x30014003, 0xffff3f08, 0xe023829f, 0x20f4179f, -+0x10e3879f, 0xe023829e, 0x20b4179e, 0x03a3879e, 0x00a0879d, 0x00608493, 0x18608408, 0x800012c2, -+0x089a109a, 0x20b4169a, 0x20b8169a, 0x00a88609, 0x20741289, 0x01120008, 0x0a381288, 0x08408297, -+0x45208088, 0x06341288, 0x406140c9, 0xc84042c8, 0x00288218, 0x04608408, 0x800012c2, 0x089a1088, -+0x20341288, 0x20381288, 0x00208299, 0x20608408, 0x800012c2, 0x089a1089, 0x20741289, 0x20781289, -+0x30608408, 0x800012c2, 0x089a1094, 0x20341594, 0x20381594, 0x02604688, 0x0020b200, 0x03004012, -+0x80608216, 0x60608216, 0x90608509, 0x804012c8, 0x01208208, 0x804092c8, 0x046012c8, 0x043a1288, -+0x0020b200, 0x04004003, 0x686140c8, 0x7dffff7f, 0x00e0868a, 0x886045d0, 0x0020b400, 0x12004013, -+0x90608512, 0x808014c8, 0x80c096c8, 0x64010040, 0x00608588, 0x00208290, 0x808014c8, 0x01208208, -+0x808094c8, 0x04a014c8, 0x043a1288, 0x0020b200, 0x05004003, 0x686140c8, 0x00a08489, 0x69ffff7f, -+0x00e0868a, 0x80c014c2, 0x139a1089, 0x20741289, 0x20781289, 0x40b03608, 0x01208288, 0x0840b200, -+0x06004003, 0x90608508, 0x800012c8, 0x80c096c8, 0xbf004010, 0xa86045c8, 0xa02344c3, 0x01604688, -+0x0020b500, 0x03004013, 0x14008209, 0x01208209, 0x05208088, 0x09009221, 0x0774188a, 0x0a00840b, -+0x05741888, 0x0800840c, 0x406140cd, 0x29228088, 0x03341288, 0xc84043c9, 0x03b41688, 0xc84042cf, -+0x01604688, 0x0020b200, 0x1d004002, 0x0023830c, 0xca00c4d7, 0x40c40f09, 0x09608289, 0x08e0c2c9, -+0x0ae08388, 0x10e0c2c8, 0x81001008, 0x0a341288, 0x18e0c2c8, 0x1de08388, 0x20e0c2c8, 0x28e0c2d8, -+0x24e08408, 0x800012c2, 0x089a1088, 0x20341288, 0x20381288, 0x80208208, 0x30e0c2c8, 0x00e18008, -+0x38e0c2c8, 0x40e0c2d6, 0x48e0c2cc, 0xc000c3df, 0x20e0830f, 0x80e0820b, 0x2020830c, 0x0020b500, -+0x13004033, 0x0020808d, 0xc0c0c2d7, 0x40c40f0a, 0x09a0828a, 0x08e0c2ca, 0x0ae08388, 0x10e0c2c8, -+0x00040008, 0x18e0c2c8, 0x1de08388, 0x20e0c2c8, 0x28e0c2d8, 0x40e0c2d6, 0x48e0c2cc, 0xc000c3de, -+0x00208092, 0x4b004010, 0x20e0830f, 0xb8238408, 0x800012c2, 0x089a108e, 0x20b4138e, 0x20b8138e, -+0x00208092, 0x1480b400, 0x2d00401b, 0x40c40f08, 0x092082a3, 0x00040022, 0xffff3f08, 0xe02382a0, -+0x203418a0, 0x102388a0, 0x0d408309, 0x0d408209, 0x02741289, 0x09c08409, 0x3860820a, 0x808012c2, -+0x0a9a108a, 0x20b4128a, 0x20b8128a, 0xc0c0c2d7, 0x08e0c2e3, 0x0ae08388, 0x10e0c2c8, 0x20b41288, -+0x22008288, 0x18e0c2c8, 0x1de08388, 0x20e0c2c8, 0x28e0c2d8, 0x34608209, 0x804012c2, 0x099a1089, -+0x20741289, 0x20781289, 0x30e0c2c9, 0x38e0c2ce, 0x40e0c2d6, 0x48e0c2cc, 0xc000c3e0, 0x0a80830e, -+0x0a808412, 0x20e0830f, 0x80e0820b, 0x0160830d, 0x1440b300, 0xddff7f0b, 0x2020830c, 0xe03f830c, -+0xc000c3dd, 0xbc238408, 0x800012c2, 0x089a1088, 0x20341288, 0x20381288, 0x1200b200, 0x0e00401b, -+0x07741888, 0x0060888d, 0x0460b800, 0x08004019, 0x0800840b, 0x00040008, 0x18e0c2c8, 0x0160830d, -+0x0460b300, 0xfdff7f09, 0x80e0820b, 0xfc3f8092, 0x07741888, 0x08008408, 0x606140cb, 0xc00062e3, -+0x402062f3, 0xc0c0e2e3, 0xc0c0e2f3, 0x982244c8, 0x8860c5c8, 0x886045c8, 0x0020b200, 0x05004033, -+0xd82294c0, 0x88608508, 0x8060c5c8, 0xd82294c0, 0x04604688, 0x0020b200, 0x0c004002, 0xdc2294c0, -+0xc0c064e3, 0x40e064f3, 0xc0c0e0e3, 0x80e064e3, 0x40e0e0f3, 0xc0e064f3, 0x80e0e0e3, 0xc0e0e0f3, -+0x07004010, 0x88e28008, 0xc0c064e3, 0x40e064f3, 0xc0c0e0e3, 0x40e0e0f3, 0x88e28008, 0x08961482, -+0x800092c2, 0x406140cd, 0x29228088, 0x03341288, 0xc84043c9, 0x03b41688, 0xc840c2cf, 0x90608508, -+0x800012c8, 0x80c096c8, 0xa86045c8, 0x0840b400, 0x03004013, 0x00618411, 0xa06045d1, 0x986045c8, -+0x0020b200, 0x04004013, 0x0120871c, 0x586140c9, 0x0860c2d1, 0xfe21b700, 0x0f004035, 0x986045cb, -+0x00001088, 0x02204288, 0x0020b200, 0x05004003, 0x586140ca, 0x18000040, 0x606140c8, 0x586140ca, -+0xc08042c8, 0x0840b400, 0xdcfe7f13, 0x00608493, 0x986045cb, 0x00e0b200, 0xc5fe7f03, 0x1c00900a, -+0x606140c8, 0x60608509, 0x38000040, 0xe03f808a, 0x586140cb, 0x08e0c2d1, 0xbcfe7f10, 0x0120871c, -+0xc06040c7, 0x406060d3, 0x806060db, 0x08e00180, 0x40618001, 0x807f8001, 0xc040e0d3, 0x4060e0db, -+0x00208490, 0x00208698, 0x00208080, 0x00208080, 0x00e08192, 0x02000040, 0x00608091, 0x14e08110, -+0x17208097, 0xc000f2d3, 0xc04060d3, 0x406060db, 0x08a00080, 0x80608001, 0x407f8001, 0x4060e0d3, -+0x8060e0db, 0x00208490, 0x00208698, 0x00208080, 0x00208080, 0x00e08192, 0x02000040, 0x00608091, -+0x40e08110, 0xc040e0d1, 0x37208097, 0x3860c0d7, 0x00208490, 0x00e08597, 0x00208080, 0x00208080, -+0x1f608290, 0x20b41291, 0x08638491, 0x00608092, 0x00208293, 0xc000f2d1, 0x406060d3, 0x806060db, -+0x08a00080, 0xc0608001, 0x407f8001, 0x4060e0d3, 0x8060e0db, 0x00208490, 0x00208698, 0x00208080, -+0x00208080, 0x00e08192, 0x02000040, 0x00608091, 0x54e08110, 0xc040e0d1, 0x37208097, 0x3860c0d7, -+0x00208490, 0x00e08597, 0x00208080, 0x00208080, 0x1f608290, 0x20b41291, 0x08638491, 0x00608092, -+0x00208293, 0x0ef41294, 0x0d208594, 0x17208095, 0x17208096, 0x17208097, 0xc000f2d3, 0x406060d3, -+0x806060db, 0x08a00080, 0xc0608001, 0x01208097, 0xb0e3c0d7, 0x80a060d2, 0x98e28004, 0x98e2c0c0, -+0x80a0c0c4, 0xc080c4c3, 0x01e0b400, 0x06004002, 0x00a08490, 0x00e08097, 0x02208097, 0xb0e3c0d7, -+0xd8e2d0d0, 0xd8e2c0d0, 0x03208097, 0xb0e3c0d7, 0x00e08088, 0x0e004010, 0x00a060c3, 0x407f8001, -+0x4060e0d3, 0x8060e0db, 0x00208490, 0x00208698, 0x00208080, 0x00208080, 0x01208089, 0x8820c2c9, -+0x00608091, 0x00e08197, 0x0020f2d3, 0x406060d3, 0x806060db, 0x08e00180, 0xc0608001, }; -+#define threadcode_elan4_text_size 0x87c -+static uint32_t threadcode_elan4_data[] = { -+0}; -+#define threadcode_elan4_data_size 0x0 -+static uint32_t threadcode_elan4_rodata[] = { -+0}; -+#define threadcode_elan4_rodata_size 0x0 -+static EP_SYMBOL threadcode_elan4_symbols[] = { -+ {".thread_restart", 0x00000000f800000c}, -+ {".thread_start", 0x00000000f8000000}, -+ {"__bss_start", 0x00000000f810087c}, -+ {"_edata", 0x00000000f810087c}, -+ {"_end", 0x00000000f8100880}, -+ {"_etext", 0x00000000f800087c}, -+ {"_sdata", 0x00000000f810087c}, -+ {"_stext", 0x00000000f8000000}, -+ {"c_queue_rxd", 0x00000000f80007ec}, -+ {"c_reschedule", 0x00000000f80006b4}, -+ {"c_stall_thread", 0x00000000f800083c}, -+ {"c_waitevent", 0x00000000f80006f8}, -+ {"c_waitevent_interrupt", 0x00000000f8000768}, -+ {"ep4_spinblock", 0x00000000f8000080}, -+ {"ep4comms_rcvr", 0x00000000f8000140}, -+ {0, 0}}; -+EP_CODE threadcode_elan4 = { -+ (unsigned char *) threadcode_elan4_text, -+ threadcode_elan4_text_size, -+ (unsigned char *) threadcode_elan4_data, -+ threadcode_elan4_data_size, -+ (unsigned char *) threadcode_elan4_rodata, -+ threadcode_elan4_rodata_size, -+ threadcode_elan4_symbols, -+}; -diff -urN clean/drivers/net/qsnet/ep/threadcode_elan4_Linux.code.dis linux-2.6.9/drivers/net/qsnet/ep/threadcode_elan4_Linux.code.dis ---- clean/drivers/net/qsnet/ep/threadcode_elan4_Linux.code.dis 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/ep/threadcode_elan4_Linux.code.dis 2005-09-07 10:39:44.000000000 -0400 -@@ -0,0 +1,628 @@ -+ -+threadcode_elan4_Linux.code: file format elf64-elan -+ -+Disassembly of section .text: -+ -+00000000f8000000 <_stext>: -+ f8000000: 00 a0 00 87 call %r2 -+ f8000004: c0 40 60 cb ld64 [ %sp ], %r8 -+ f8000008: 00 00 30 80 unimp -+ -+00000000f800000c <.thread_restart>: -+ f800000c: 80 00 10 80 setflg -+ f8000010: 02 60 61 80 btst 2, %r5 -+ f8000014: 02 00 40 32 bne,a f800001c <.thread_restart+0x10> -+ f8000018: 80 7f 60 cb ld64 [ %sp + -128 ], %r8 -+ f800001c: 04 60 61 80 btst 4, %r5 -+ f8000020: 02 00 40 32 bne,a f8000028 <.thread_restart+0x1c> -+ f8000024: 40 7f 60 d3 ld64 [ %sp + -192 ], %r16 -+ f8000028: 08 60 61 80 btst 8, %r5 -+ f800002c: 02 00 40 32 bne,a f8000034 <.thread_restart+0x28> -+ f8000030: 00 7f 60 db ld64 [ %sp + -256 ], %r24 -+ f8000034: 10 60 61 80 btst 0x10, %r5 -+ f8000038: 02 00 40 32 bne,a f8000040 <.thread_restart+0x34> -+ f800003c: c0 7e 60 e3 ld64 [ %sp + -320 ], %r32 -+ f8000040: 20 60 61 80 btst 0x20, %r5 -+ f8000044: 02 00 40 32 bne,a f800004c <.thread_restart+0x40> -+ f8000048: 80 7e 60 eb ld64 [ %sp + -384 ], %r40 -+ f800004c: 40 60 61 80 btst 0x40, %r5 -+ f8000050: 02 00 40 32 bne,a f8000058 <.thread_restart+0x4c> -+ f8000054: 40 7e 60 f3 ld64 [ %sp + -448 ], %r48 -+ f8000058: 80 60 61 80 btst 0x80, %r5 -+ f800005c: 02 00 40 32 bne,a f8000064 <.thread_restart+0x58> -+ f8000060: 00 7e 60 fb ld64 [ %sp + -512 ], %r56 -+ f8000064: 40 00 11 80 ldcc %r4 -+ f8000068: c3 80 10 80 rti %r2, %r3 -+ f800006c: c0 7f 60 c3 ld64 [ %sp + -64 ], %r0 -+ f8000070: 20 00 20 00 sethi %hi(0x80008000), %r0 -+ f8000074: 20 00 20 00 sethi %hi(0x80008000), %r0 -+ f8000078: 20 00 20 00 sethi %hi(0x80008000), %r0 -+ f800007c: 20 00 20 00 sethi %hi(0x80008000), %r0 -+ -+00000000f8000080 : -+ f8000080: 40 7f 80 01 add %sp, -192, %sp -+ f8000084: 68 60 c0 c7 st8 %r7, [ %sp + 0x68 ] -+ f8000088: 40 60 e0 d1 st32 %r16, [ %sp + 0x40 ] -+ f800008c: 00 20 84 90 mov %r16, %r16 -+ f8000090: 00 20 80 80 nop -+ f8000094: 00 20 80 80 nop -+ f8000098: 60 60 c0 d4 st8 %r20, [ %sp + 0x60 ] -+ f800009c: 00 20 82 92 mov %r8, %r18 -+ f80000a0: 00 60 82 90 mov %r9, %r16 -+ f80000a4: 00 a0 82 94 mov %r10, %r20 -+ f80000a8: ff 3f 80 88 mov -1, %r8 -+ f80000ac: 1c 38 12 93 srl8 %r8, 0x1c, %r19 -+ -+00000000f80000b0 : -+ f80000b0: c0 00 44 c8 ld8 [ %r16 ], %r8 -+ f80000b4: 13 00 42 91 and %r8, %r19, %r17 -+ f80000b8: c0 00 c5 d1 st8 %r17, [ %r20 ] -+ f80000bc: c0 00 44 c8 ld8 [ %r16 ], %r8 -+ f80000c0: 20 38 12 88 srl8 %r8, 0x20, %r8 -+ f80000c4: 00 20 b2 00 cmp %r8, 0 -+ f80000c8: 0e 00 40 03 be %xcc, f8000100 -+ f80000cc: 01 60 84 08 add %r17, 1, %r8 -+ -+00000000f80000d0 : -+ f80000d0: 00 00 10 88 breaktest %r8 -+ f80000d4: 04 20 42 88 and %r8, 4, %r8 -+ f80000d8: 00 20 b2 00 cmp %r8, 0 -+ f80000dc: 04 00 40 03 be %xcc, f80000ec -+ f80000e0: 00 20 80 80 nop -+ f80000e4: 74 01 00 40 call f80006b4 -+ f80000e8: 00 a0 84 88 mov %r18, %r8 -+ -+00000000f80000ec : -+ f80000ec: c0 00 44 c8 ld8 [ %r16 ], %r8 -+ f80000f0: 20 38 12 88 srl8 %r8, 0x20, %r8 -+ f80000f4: 00 20 b2 00 cmp %r8, 0 -+ f80000f8: f6 ff 7f 13 bne %xcc, f80000d0 -+ f80000fc: 01 60 84 08 add %r17, 1, %r8 -+ -+00000000f8000100 : -+ f8000100: 10 16 12 82 sll8, byte %r8, %r16, %r2 -+ f8000104: 80 00 94 c2 st4 %r2, [ %r16 ] -+ f8000108: c0 00 44 c8 ld8 [ %r16 ], %r8 -+ f800010c: 20 38 12 88 srl8 %r8, 0x20, %r8 -+ f8000110: 00 20 b2 00 cmp %r8, 0 -+ f8000114: e7 ff 7f 13 bne %xcc, f80000b0 -+ f8000118: 00 20 80 80 nop -+ f800011c: 68 60 40 c7 ld8 [ %sp + 0x68 ], %r7 -+ f8000120: 40 60 60 d1 ld32 [ %sp + 0x40 ], %r16 -+ f8000124: 60 60 40 d4 ld8 [ %sp + 0x60 ], %r20 -+ f8000128: 08 e0 01 80 retl -+ f800012c: c0 60 80 01 add %sp, 0xc0, %sp -+ f8000130: 00 00 00 01 sethi %hi(0), %sp -+ f8000134: 00 00 00 01 sethi %hi(0), %sp -+ f8000138: 00 00 00 01 sethi %hi(0), %sp -+ f800013c: 00 00 00 01 sethi %hi(0), %sp -+ -+00000000f8000140 : -+ f8000140: c0 7e 80 01 add %sp, -320, %sp -+ f8000144: c0 60 c0 c7 st8 %r7, [ %sp + 0xc0 ] -+ f8000148: 40 60 e0 d3 st64 %r16, [ %sp + 0x40 ] -+ f800014c: 00 20 84 90 mov %r16, %r16 -+ f8000150: 00 20 80 80 nop -+ f8000154: 00 20 80 80 nop -+ f8000158: 80 60 e0 db st64 %r24, [ %sp + 0x80 ] -+ f800015c: 00 20 86 98 mov %r24, %r24 -+ f8000160: 00 20 80 80 nop -+ f8000164: 00 20 80 80 nop -+ f8000168: 40 61 c0 c8 st8 %r8, [ %sp + 0x140 ] -+ f800016c: 00 60 82 95 mov %r9, %r21 -+ f8000170: 00 a0 82 9b mov %r10, %r27 -+ f8000174: 58 61 c0 cb st8 %r11, [ %sp + 0x158 ] -+ f8000178: 60 61 c0 cc st8 %r12, [ %sp + 0x160 ] -+ f800017c: 68 61 c0 cd st8 %r13, [ %sp + 0x168 ] -+ f8000180: 01 20 80 9c mov 1, %r28 -+ f8000184: 08 e0 42 d1 ld8 [ %r11 + 8 ], %r17 -+ -+00000000f8000188 : -+ f8000188: 1c 00 90 0a neg %r28, %r10 -+ -+00000000f800018c : -+ f800018c: 05 b4 12 8a sll8 %r10, 5, %r10 -+ f8000190: 60 61 40 c8 ld8 [ %sp + 0x160 ], %r8 -+ f8000194: 58 61 40 cb ld8 [ %sp + 0x158 ], %r11 -+ f8000198: 58 01 00 40 call f80006f8 -+ f800019c: 18 e0 42 c9 ld8 [ %r11 + 0x18 ], %r9 -+ f80001a0: 00 20 80 9c mov %r0, %r28 -+ f80001a4: 58 61 40 cd ld8 [ %sp + 0x158 ], %r13 -+ f80001a8: c0 40 43 c8 ld8 [ %r13 ], %r8 -+ f80001ac: 08 40 b4 00 cmp %r17, %r8 -+ f80001b0: 30 01 40 03 be %xcc, f8000670 -+ f80001b4: ff ff 3f 08 sethi %hi(0xfffffc00), %r8 -+ f80001b8: e0 23 82 9f or %r8, 0x3e0, %r31 -+ f80001bc: 20 f4 17 9f sll8 %r31, 0x20, %r31 -+ f80001c0: 10 e3 87 9f or %r31, 0x310, %r31 -+ f80001c4: e0 23 82 9e or %r8, 0x3e0, %r30 -+ f80001c8: 20 b4 17 9e sll8 %r30, 0x20, %r30 -+ f80001cc: 03 a3 87 9e or %r30, 0x303, %r30 -+ f80001d0: 00 a0 87 9d mov %r30, %r29 -+ f80001d4: 00 60 84 93 mov %r17, %r19 -+ -+00000000f80001d8 : -+ f80001d8: 18 60 84 08 add %r17, 0x18, %r8 -+ f80001dc: 80 00 12 c2 ld4 [ %r8 ], %r2 -+ f80001e0: 08 9a 10 9a srl8, byte %r2, %r8, %r26 -+ f80001e4: 20 b4 16 9a sll8 %r26, 0x20, %r26 -+ f80001e8: 20 b8 16 9a srl8 %r26, 0x20, %r26 -+ f80001ec: 00 a8 86 09 add %r26, 0x800, %r9 -+ f80001f0: 20 74 12 89 sll8 %r9, 0x20, %r9 -+ f80001f4: 01 12 00 08 sethi %hi(0x480400), %r8 -+ f80001f8: 0a 38 12 88 srl8 %r8, 0xa, %r8 -+ f80001fc: 08 40 82 97 or %r9, %r8, %r23 -+ f8000200: 45 20 80 88 mov 0x45, %r8 -+ f8000204: 06 34 12 88 sll8 %r8, 6, %r8 -+ f8000208: 40 61 40 c9 ld8 [ %sp + 0x140 ], %r9 -+ f800020c: c8 40 42 c8 ld8 [ %r9 + %r8 ], %r8 -+ f8000210: 00 28 82 18 add %r8, 0x800, %r24 -+ f8000214: 04 60 84 08 add %r17, 4, %r8 -+ f8000218: 80 00 12 c2 ld4 [ %r8 ], %r2 -+ f800021c: 08 9a 10 88 srl8, byte %r2, %r8, %r8 -+ f8000220: 20 34 12 88 sll8 %r8, 0x20, %r8 -+ f8000224: 20 38 12 88 srl8 %r8, 0x20, %r8 -+ f8000228: 00 20 82 99 mov %r8, %r25 -+ f800022c: 20 60 84 08 add %r17, 0x20, %r8 -+ f8000230: 80 00 12 c2 ld4 [ %r8 ], %r2 -+ f8000234: 08 9a 10 89 srl8, byte %r2, %r8, %r9 -+ f8000238: 20 74 12 89 sll8 %r9, 0x20, %r9 -+ f800023c: 20 78 12 89 srl8 %r9, 0x20, %r9 -+ f8000240: 30 60 84 08 add %r17, 0x30, %r8 -+ f8000244: 80 00 12 c2 ld4 [ %r8 ], %r2 -+ f8000248: 08 9a 10 94 srl8, byte %r2, %r8, %r20 -+ f800024c: 20 34 15 94 sll8 %r20, 0x20, %r20 -+ f8000250: 20 38 15 94 srl8 %r20, 0x20, %r20 -+ f8000254: 02 60 46 88 and %r25, 2, %r8 -+ f8000258: 00 20 b2 00 cmp %r8, 0 -+ f800025c: 03 00 40 12 bne f8000268 -+ f8000260: 80 60 82 16 add %r9, 0x80, %r22 -+ f8000264: 60 60 82 16 add %r9, 0x60, %r22 -+ -+00000000f8000268 : -+ f8000268: 90 60 85 09 add %r21, 0x90, %r9 -+ f800026c: 80 40 12 c8 ld4 [ %r9 ], %r8 -+ f8000270: 01 20 82 08 inc %r8 -+ f8000274: 80 40 92 c8 st4 %r8, [ %r9 ] -+ f8000278: 04 60 12 c8 ld4 [ %r9 + 4 ], %r8 -+ f800027c: 04 3a 12 88 srl8, byte %r8, 4, %r8 -+ f8000280: 00 20 b2 00 cmp %r8, 0 -+ f8000284: 04 00 40 03 be %xcc, f8000294 -+ f8000288: 68 61 40 c8 ld8 [ %sp + 0x168 ], %r8 -+ f800028c: 7d ff ff 7f call f8000080 -+ f8000290: 00 e0 86 8a mov %r27, %r10 -+ -+00000000f8000294 : -+ f8000294: 88 60 45 d0 ld8 [ %r21 + 0x88 ], %r16 -+ f8000298: 00 20 b4 00 cmp %r16, 0 -+ f800029c: 12 00 40 13 bne %xcc, f80002e4 -+ f80002a0: 90 60 85 12 add %r21, 0x90, %r18 -+ f80002a4: 80 80 14 c8 ld4 [ %r18 ], %r8 -+ f80002a8: 80 c0 96 c8 st4 %r8, [ %r27 ] -+ f80002ac: 64 01 00 40 call f800083c -+ f80002b0: 00 60 85 88 mov %r21, %r8 -+ f80002b4: 00 20 82 90 mov %r8, %r16 -+ f80002b8: 80 80 14 c8 ld4 [ %r18 ], %r8 -+ f80002bc: 01 20 82 08 inc %r8 -+ f80002c0: 80 80 94 c8 st4 %r8, [ %r18 ] -+ f80002c4: 04 a0 14 c8 ld4 [ %r18 + 4 ], %r8 -+ f80002c8: 04 3a 12 88 srl8, byte %r8, 4, %r8 -+ f80002cc: 00 20 b2 00 cmp %r8, 0 -+ f80002d0: 05 00 40 03 be %xcc, f80002e4 -+ f80002d4: 68 61 40 c8 ld8 [ %sp + 0x168 ], %r8 -+ f80002d8: 00 a0 84 89 mov %r18, %r9 -+ f80002dc: 69 ff ff 7f call f8000080 -+ f80002e0: 00 e0 86 8a mov %r27, %r10 -+ -+00000000f80002e4 : -+ f80002e4: 80 c0 14 c2 ld4 [ %r19 ], %r2 -+ f80002e8: 13 9a 10 89 srl8, byte %r2, %r19, %r9 -+ f80002ec: 20 74 12 89 sll8 %r9, 0x20, %r9 -+ f80002f0: 20 78 12 89 srl8 %r9, 0x20, %r9 -+ f80002f4: 40 b0 36 08 sethi %hi(0xdac10000), %r8 -+ f80002f8: 01 20 82 88 or %r8, 1, %r8 -+ f80002fc: 08 40 b2 00 cmp %r9, %r8 -+ f8000300: 06 00 40 03 be %xcc, f8000318 -+ f8000304: 90 60 85 08 add %r21, 0x90, %r8 -+ f8000308: 80 00 12 c8 ld4 [ %r8 ], %r8 -+ f800030c: 80 c0 96 c8 st4 %r8, [ %r27 ] -+ f8000310: bf 00 40 10 b f800060c -+ f8000314: a8 60 45 c8 ld8 [ %r21 + 0xa8 ], %r8 -+ -+00000000f8000318 : -+ f8000318: a0 23 44 c3 ld8 [ %r16 + 0x3a0 ], %r3 -+ f800031c: 01 60 46 88 and %r25, 1, %r8 -+ f8000320: 00 20 b5 00 cmp %r20, 0 -+ f8000324: 03 00 40 13 bne %xcc, f8000330 -+ f8000328: 14 00 82 09 add %r8, %r20, %r9 -+ f800032c: 01 20 82 09 add %r8, 1, %r9 -+ -+00000000f8000330 : -+ f8000330: 05 20 80 88 mov 5, %r8 -+ f8000334: 09 00 92 21 sub %r8, %r9, %r33 -+ f8000338: 07 74 18 8a sll8 %r33, 7, %r10 -+ f800033c: 0a 00 84 0b add %r16, %r10, %r11 -+ f8000340: 05 74 18 88 sll8 %r33, 5, %r8 -+ f8000344: 08 00 84 0c add %r16, %r8, %r12 -+ f8000348: 40 61 40 cd ld8 [ %sp + 0x140 ], %r13 -+ f800034c: 29 22 80 88 mov 0x229, %r8 -+ f8000350: 03 34 12 88 sll8 %r8, 3, %r8 -+ f8000354: c8 40 43 c9 ld8 [ %r13 + %r8 ], %r9 -+ f8000358: 03 b4 16 88 sll8 %r26, 3, %r8 -+ f800035c: c8 40 42 cf ld8 [ %r9 + %r8 ], %r15 -+ f8000360: 01 60 46 88 and %r25, 1, %r8 -+ f8000364: 00 20 b2 00 cmp %r8, 0 -+ f8000368: 1d 00 40 02 be f80003dc -+ f800036c: 00 23 83 0c add %r12, 0x300, %r12 -+ f8000370: ca 00 c4 d7 st8 %r23, [ %r16 + %r10 ] -+ f8000374: 40 c4 0f 09 sethi %hi(0x3f110000), %r9 -+ f8000378: 09 60 82 89 or %r9, 9, %r9 ! 3f110009 <*ABS*+0x3f110009> -+ f800037c: 08 e0 c2 c9 st8 %r9, [ %r11 + 8 ] -+ f8000380: 0a e0 83 88 or %r15, 0xa, %r8 -+ f8000384: 10 e0 c2 c8 st8 %r8, [ %r11 + 0x10 ] -+ f8000388: 81 00 10 08 sethi %hi(0x40020400), %r8 -+ f800038c: 0a 34 12 88 sll8 %r8, 0xa, %r8 -+ f8000390: 18 e0 c2 c8 st8 %r8, [ %r11 + 0x18 ] -+ f8000394: 1d e0 83 88 or %r15, 0x1d, %r8 -+ f8000398: 20 e0 c2 c8 st8 %r8, [ %r11 + 0x20 ] -+ f800039c: 28 e0 c2 d8 st8 %r24, [ %r11 + 0x28 ] -+ f80003a0: 24 e0 84 08 add %r19, 0x24, %r8 -+ f80003a4: 80 00 12 c2 ld4 [ %r8 ], %r2 -+ f80003a8: 08 9a 10 88 srl8, byte %r2, %r8, %r8 -+ f80003ac: 20 34 12 88 sll8 %r8, 0x20, %r8 -+ f80003b0: 20 38 12 88 srl8 %r8, 0x20, %r8 -+ f80003b4: 80 20 82 08 add %r8, 0x80, %r8 -+ f80003b8: 30 e0 c2 c8 st8 %r8, [ %r11 + 0x30 ] -+ f80003bc: 00 e1 80 08 add %r3, 0x100, %r8 -+ f80003c0: 38 e0 c2 c8 st8 %r8, [ %r11 + 0x38 ] -+ f80003c4: 40 e0 c2 d6 st8 %r22, [ %r11 + 0x40 ] -+ f80003c8: 48 e0 c2 cc st8 %r12, [ %r11 + 0x48 ] -+ f80003cc: c0 00 c3 df st8 %r31, [ %r12 ] -+ f80003d0: 20 e0 83 0f add %r15, 0x20, %r15 -+ f80003d4: 80 e0 82 0b add %r11, 0x80, %r11 -+ f80003d8: 20 20 83 0c add %r12, 0x20, %r12 -+ -+00000000f80003dc : -+ f80003dc: 00 20 b5 00 cmp %r20, 0 -+ f80003e0: 13 00 40 33 bne,a %xcc, f800042c -+ f80003e4: 00 20 80 8d mov %r0, %r13 -+ f80003e8: c0 c0 c2 d7 st8 %r23, [ %r11 ] -+ f80003ec: 40 c4 0f 0a sethi %hi(0x3f110000), %r10 -+ f80003f0: 09 a0 82 8a or %r10, 9, %r10 -+ f80003f4: 08 e0 c2 ca st8 %r10, [ %r11 + 8 ] -+ f80003f8: 0a e0 83 88 or %r15, 0xa, %r8 -+ f80003fc: 10 e0 c2 c8 st8 %r8, [ %r11 + 0x10 ] -+ f8000400: 00 04 00 08 sethi %hi(0x100000), %r8 -+ f8000404: 18 e0 c2 c8 st8 %r8, [ %r11 + 0x18 ] -+ f8000408: 1d e0 83 88 or %r15, 0x1d, %r8 -+ f800040c: 20 e0 c2 c8 st8 %r8, [ %r11 + 0x20 ] -+ f8000410: 28 e0 c2 d8 st8 %r24, [ %r11 + 0x28 ] -+ f8000414: 40 e0 c2 d6 st8 %r22, [ %r11 + 0x40 ] -+ f8000418: 48 e0 c2 cc st8 %r12, [ %r11 + 0x48 ] -+ f800041c: c0 00 c3 de st8 %r30, [ %r12 ] -+ f8000420: 00 20 80 92 mov %r0, %r18 -+ f8000424: 4b 00 40 10 b f8000550 -+ f8000428: 20 e0 83 0f add %r15, 0x20, %r15 -+ -+00000000f800042c : -+ f800042c: b8 23 84 08 add %r16, 0x3b8, %r8 -+ f8000430: 80 00 12 c2 ld4 [ %r8 ], %r2 -+ f8000434: 08 9a 10 8e srl8, byte %r2, %r8, %r14 -+ f8000438: 20 b4 13 8e sll8 %r14, 0x20, %r14 -+ f800043c: 20 b8 13 8e srl8 %r14, 0x20, %r14 -+ f8000440: 00 20 80 92 mov %r0, %r18 -+ f8000444: 14 80 b4 00 cmp %r18, %r20 -+ f8000448: 2d 00 40 1b bcc %xcc, f80004fc -+ f800044c: 40 c4 0f 08 sethi %hi(0x3f110000), %r8 -+ f8000450: 09 20 82 a3 or %r8, 9, %r35 -+ f8000454: 00 04 00 22 sethi %hi(0x100000), %r34 -+ f8000458: ff ff 3f 08 sethi %hi(0xfffffc00), %r8 -+ f800045c: e0 23 82 a0 or %r8, 0x3e0, %r32 -+ f8000460: 20 34 18 a0 sll8 %r32, 0x20, %r32 -+ f8000464: 10 23 88 a0 or %r32, 0x310, %r32 -+ -+00000000f8000468 : -+ f8000468: 0d 40 83 09 add %r13, %r13, %r9 -+ f800046c: 0d 40 82 09 add %r9, %r13, %r9 -+ f8000470: 02 74 12 89 sll8 %r9, 2, %r9 -+ f8000474: 09 c0 84 09 add %r19, %r9, %r9 -+ f8000478: 38 60 82 0a add %r9, 0x38, %r10 -+ f800047c: 80 80 12 c2 ld4 [ %r10 ], %r2 -+ f8000480: 0a 9a 10 8a srl8, byte %r2, %r10, %r10 -+ f8000484: 20 b4 12 8a sll8 %r10, 0x20, %r10 -+ f8000488: 20 b8 12 8a srl8 %r10, 0x20, %r10 -+ f800048c: c0 c0 c2 d7 st8 %r23, [ %r11 ] -+ f8000490: 08 e0 c2 e3 st8 %r35, [ %r11 + 8 ] -+ f8000494: 0a e0 83 88 or %r15, 0xa, %r8 -+ f8000498: 10 e0 c2 c8 st8 %r8, [ %r11 + 0x10 ] -+ f800049c: 20 b4 12 88 sll8 %r10, 0x20, %r8 -+ f80004a0: 22 00 82 88 or %r8, %r34, %r8 -+ f80004a4: 18 e0 c2 c8 st8 %r8, [ %r11 + 0x18 ] -+ f80004a8: 1d e0 83 88 or %r15, 0x1d, %r8 -+ f80004ac: 20 e0 c2 c8 st8 %r8, [ %r11 + 0x20 ] -+ f80004b0: 28 e0 c2 d8 st8 %r24, [ %r11 + 0x28 ] -+ f80004b4: 34 60 82 09 add %r9, 0x34, %r9 -+ f80004b8: 80 40 12 c2 ld4 [ %r9 ], %r2 -+ f80004bc: 09 9a 10 89 srl8, byte %r2, %r9, %r9 -+ f80004c0: 20 74 12 89 sll8 %r9, 0x20, %r9 -+ f80004c4: 20 78 12 89 srl8 %r9, 0x20, %r9 -+ f80004c8: 30 e0 c2 c9 st8 %r9, [ %r11 + 0x30 ] -+ f80004cc: 38 e0 c2 ce st8 %r14, [ %r11 + 0x38 ] -+ f80004d0: 40 e0 c2 d6 st8 %r22, [ %r11 + 0x40 ] -+ f80004d4: 48 e0 c2 cc st8 %r12, [ %r11 + 0x48 ] -+ f80004d8: c0 00 c3 e0 st8 %r32, [ %r12 ] -+ f80004dc: 0a 80 83 0e add %r14, %r10, %r14 -+ f80004e0: 0a 80 84 12 add %r18, %r10, %r18 -+ f80004e4: 20 e0 83 0f add %r15, 0x20, %r15 -+ f80004e8: 80 e0 82 0b add %r11, 0x80, %r11 -+ f80004ec: 01 60 83 0d inc %r13 -+ f80004f0: 14 40 b3 00 cmp %r13, %r20 -+ f80004f4: dd ff 7f 0b bcs %xcc, f8000468 -+ f80004f8: 20 20 83 0c add %r12, 0x20, %r12 -+ -+00000000f80004fc : -+ f80004fc: e0 3f 83 0c add %r12, -32, %r12 -+ f8000500: c0 00 c3 dd st8 %r29, [ %r12 ] -+ f8000504: bc 23 84 08 add %r16, 0x3bc, %r8 -+ f8000508: 80 00 12 c2 ld4 [ %r8 ], %r2 -+ f800050c: 08 9a 10 88 srl8, byte %r2, %r8, %r8 -+ f8000510: 20 34 12 88 sll8 %r8, 0x20, %r8 -+ f8000514: 20 38 12 88 srl8 %r8, 0x20, %r8 -+ f8000518: 12 00 b2 00 cmp %r8, %r18 -+ f800051c: 0e 00 40 1b bcc %xcc, f8000554 -+ f8000520: 07 74 18 88 sll8 %r33, 7, %r8 -+ f8000524: 00 60 88 8d mov %r33, %r13 -+ f8000528: 04 60 b8 00 cmp %r33, 4 -+ f800052c: 08 00 40 19 bgu %xcc, f800054c -+ f8000530: 08 00 84 0b add %r16, %r8, %r11 -+ f8000534: 00 04 00 08 sethi %hi(0x100000), %r8 -+ -+00000000f8000538 : -+ f8000538: 18 e0 c2 c8 st8 %r8, [ %r11 + 0x18 ] -+ f800053c: 01 60 83 0d inc %r13 -+ f8000540: 04 60 b3 00 cmp %r13, 4 -+ f8000544: fd ff 7f 09 bleu %xcc, f8000538 -+ f8000548: 80 e0 82 0b add %r11, 0x80, %r11 -+ -+00000000f800054c : -+ f800054c: fc 3f 80 92 mov -4, %r18 -+ -+00000000f8000550 : -+ f8000550: 07 74 18 88 sll8 %r33, 7, %r8 -+ -+00000000f8000554 : -+ f8000554: 08 00 84 08 add %r16, %r8, %r8 -+ f8000558: 60 61 40 cb ld8 [ %sp + 0x160 ], %r11 -+ f800055c: c0 00 62 e3 ld64 [ %r8 ], %r32 -+ f8000560: 40 20 62 f3 ld64 [ %r8 + 0x40 ], %r48 -+ f8000564: c0 c0 e2 e3 st64 %r32, [ %r11 ] -+ f8000568: c0 c0 e2 f3 st64 %r48, [ %r11 ] -+ f800056c: 98 22 44 c8 ld8 [ %r16 + 0x298 ], %r8 -+ f8000570: 88 60 c5 c8 st8 %r8, [ %r21 + 0x88 ] -+ f8000574: 88 60 45 c8 ld8 [ %r21 + 0x88 ], %r8 -+ f8000578: 00 20 b2 00 cmp %r8, 0 -+ f800057c: 05 00 40 33 bne,a %xcc, f8000590 -+ f8000580: d8 22 94 c0 st4 %r0, [ %r16 + 0x2d8 ] -+ f8000584: 88 60 85 08 add %r21, 0x88, %r8 -+ f8000588: 80 60 c5 c8 st8 %r8, [ %r21 + 0x80 ] -+ f800058c: d8 22 94 c0 st4 %r0, [ %r16 + 0x2d8 ] -+ -+00000000f8000590 : -+ f8000590: 04 60 46 88 and %r25, 4, %r8 -+ f8000594: 00 20 b2 00 cmp %r8, 0 -+ f8000598: 0c 00 40 02 be f80005c8 -+ f800059c: dc 22 94 c0 st4 %r0, [ %r16 + 0x2dc ] -+ f80005a0: c0 c0 64 e3 ld64 [ %r19 ], %r32 -+ f80005a4: 40 e0 64 f3 ld64 [ %r19 + 0x40 ], %r48 -+ f80005a8: c0 c0 e0 e3 st64 %r32, [ %r3 ] -+ f80005ac: 80 e0 64 e3 ld64 [ %r19 + 0x80 ], %r32 -+ f80005b0: 40 e0 e0 f3 st64 %r48, [ %r3 + 0x40 ] -+ f80005b4: c0 e0 64 f3 ld64 [ %r19 + 0xc0 ], %r48 -+ f80005b8: 80 e0 e0 e3 st64 %r32, [ %r3 + 0x80 ] -+ f80005bc: c0 e0 e0 f3 st64 %r48, [ %r3 + 0xc0 ] -+ f80005c0: 07 00 40 10 b f80005dc -+ f80005c4: 88 e2 80 08 add %r3, 0x288, %r8 -+ -+00000000f80005c8 : -+ f80005c8: c0 c0 64 e3 ld64 [ %r19 ], %r32 -+ f80005cc: 40 e0 64 f3 ld64 [ %r19 + 0x40 ], %r48 -+ f80005d0: c0 c0 e0 e3 st64 %r32, [ %r3 ] -+ f80005d4: 40 e0 e0 f3 st64 %r48, [ %r3 + 0x40 ] -+ f80005d8: 88 e2 80 08 add %r3, 0x288, %r8 -+ -+00000000f80005dc : -+ f80005dc: 08 96 14 82 sll8, byte %r18, %r8, %r2 -+ f80005e0: 80 00 92 c2 st4 %r2, [ %r8 ] -+ f80005e4: 40 61 40 cd ld8 [ %sp + 0x140 ], %r13 -+ f80005e8: 29 22 80 88 mov 0x229, %r8 -+ f80005ec: 03 34 12 88 sll8 %r8, 3, %r8 -+ f80005f0: c8 40 43 c9 ld8 [ %r13 + %r8 ], %r9 -+ f80005f4: 03 b4 16 88 sll8 %r26, 3, %r8 -+ f80005f8: c8 40 c2 cf st8 %r15, [ %r9 + %r8 ] -+ f80005fc: 90 60 85 08 add %r21, 0x90, %r8 -+ f8000600: 80 00 12 c8 ld4 [ %r8 ], %r8 -+ f8000604: 80 c0 96 c8 st4 %r8, [ %r27 ] -+ -+00000000f8000608 : -+ f8000608: a8 60 45 c8 ld8 [ %r21 + 0xa8 ], %r8 -+ -+00000000f800060c : -+ f800060c: 08 40 b4 00 cmp %r17, %r8 -+ f8000610: 03 00 40 13 bne %xcc, f800061c -+ f8000614: 00 61 84 11 add %r17, 0x100, %r17 -+ f8000618: a0 60 45 d1 ld8 [ %r21 + 0xa0 ], %r17 -+ -+00000000f800061c : -+ f800061c: 98 60 45 c8 ld8 [ %r21 + 0x98 ], %r8 -+ f8000620: 00 20 b2 00 cmp %r8, 0 -+ f8000624: 04 00 40 13 bne %xcc, f8000634 -+ f8000628: 01 20 87 1c inc %r28 -+ f800062c: 58 61 40 c9 ld8 [ %sp + 0x158 ], %r9 -+ f8000630: 08 60 c2 d1 st8 %r17, [ %r9 + 8 ] -+ -+00000000f8000634 : -+ f8000634: fe 21 b7 00 cmp %r28, 0x1fe -+ f8000638: 0f 00 40 35 bg,a %xcc, f8000674 -+ f800063c: 98 60 45 cb ld8 [ %r21 + 0x98 ], %r11 -+ f8000640: 00 00 10 88 breaktest %r8 -+ f8000644: 02 20 42 88 and %r8, 2, %r8 -+ f8000648: 00 20 b2 00 cmp %r8, 0 -+ f800064c: 05 00 40 03 be %xcc, f8000660 -+ f8000650: 58 61 40 ca ld8 [ %sp + 0x158 ], %r10 -+ f8000654: 18 00 00 40 call f80006b4 -+ f8000658: 60 61 40 c8 ld8 [ %sp + 0x160 ], %r8 -+ f800065c: 58 61 40 ca ld8 [ %sp + 0x158 ], %r10 -+ -+00000000f8000660 : -+ f8000660: c0 80 42 c8 ld8 [ %r10 ], %r8 -+ f8000664: 08 40 b4 00 cmp %r17, %r8 -+ f8000668: dc fe 7f 13 bne %xcc, f80001d8 -+ f800066c: 00 60 84 93 mov %r17, %r19 -+ -+00000000f8000670 : -+ f8000670: 98 60 45 cb ld8 [ %r21 + 0x98 ], %r11 -+ -+00000000f8000674 : -+ f8000674: 00 e0 b2 00 cmp %r11, 0 -+ f8000678: c5 fe 7f 03 be %xcc, f800018c -+ f800067c: 1c 00 90 0a neg %r28, %r10 -+ f8000680: 60 61 40 c8 ld8 [ %sp + 0x160 ], %r8 -+ f8000684: 60 60 85 09 add %r21, 0x60, %r9 -+ f8000688: 38 00 00 40 call f8000768 -+ f800068c: e0 3f 80 8a mov -32, %r10 -+ f8000690: 58 61 40 cb ld8 [ %sp + 0x158 ], %r11 -+ f8000694: 08 e0 c2 d1 st8 %r17, [ %r11 + 8 ] -+ f8000698: bc fe 7f 10 b f8000188 -+ f800069c: 01 20 87 1c inc %r28 -+ f80006a0: c0 60 40 c7 ld8 [ %sp + 0xc0 ], %r7 -+ f80006a4: 40 60 60 d3 ld64 [ %sp + 0x40 ], %r16 -+ f80006a8: 80 60 60 db ld64 [ %sp + 0x80 ], %r24 -+ f80006ac: 08 e0 01 80 retl -+ f80006b0: 40 61 80 01 add %sp, 0x140, %sp -+ -+00000000f80006b4 : -+ f80006b4: 80 7f 80 01 add %sp, -128, %sp -+ f80006b8: c0 40 e0 d3 st64 %r16, [ %sp ] -+ f80006bc: 40 60 e0 db st64 %r24, [ %sp + 0x40 ] -+ f80006c0: 00 20 84 90 mov %r16, %r16 -+ f80006c4: 00 20 86 98 mov %r24, %r24 -+ f80006c8: 00 20 80 80 nop -+ f80006cc: 00 20 80 80 nop -+ f80006d0: 00 e0 81 92 mov %r7, %r18 -+ f80006d4: 02 00 00 40 call f80006dc -+ f80006d8: 00 60 80 91 mov %sp, %r17 -+ f80006dc: 14 e0 81 10 add %r7, 0x14, %r16 -+ f80006e0: 17 20 80 97 mov 0x17, %r23 -+ f80006e4: c0 00 f2 d3 st64suspend %r16, [ %r8 ] -+ f80006e8: c0 40 60 d3 ld64 [ %sp ], %r16 -+ f80006ec: 40 60 60 db ld64 [ %sp + 0x40 ], %r24 -+ f80006f0: 08 a0 00 80 jmp %r2 + 8 -+ f80006f4: 80 60 80 01 add %sp, 0x80, %sp -+ -+00000000f80006f8 : -+ f80006f8: 40 7f 80 01 add %sp, -192, %sp -+ f80006fc: 40 60 e0 d3 st64 %r16, [ %sp + 0x40 ] -+ f8000700: 80 60 e0 db st64 %r24, [ %sp + 0x80 ] -+ f8000704: 00 20 84 90 mov %r16, %r16 -+ f8000708: 00 20 86 98 mov %r24, %r24 -+ f800070c: 00 20 80 80 nop -+ f8000710: 00 20 80 80 nop -+ f8000714: 00 e0 81 92 mov %r7, %r18 -+ f8000718: 02 00 00 40 call f8000720 -+ f800071c: 00 60 80 91 mov %sp, %r17 -+ f8000720: 40 e0 81 10 add %r7, 0x40, %r16 -+ f8000724: c0 40 e0 d1 st32 %r16, [ %sp ] -+ f8000728: 37 20 80 97 mov 0x37, %r23 -+ f800072c: 38 60 c0 d7 st8 %r23, [ %sp + 0x38 ] -+ f8000730: 00 20 84 90 mov %r16, %r16 -+ f8000734: 00 e0 85 97 mov %r23, %r23 -+ f8000738: 00 20 80 80 nop -+ f800073c: 00 20 80 80 nop -+ f8000740: 1f 60 82 90 or %r9, 0x1f, %r16 -+ f8000744: 20 b4 12 91 sll8 %r10, 0x20, %r17 -+ f8000748: 08 63 84 91 or %r17, 0x308, %r17 -+ f800074c: 00 60 80 92 mov %sp, %r18 -+ f8000750: 00 20 82 93 mov %r8, %r19 -+ f8000754: c0 00 f2 d1 st32suspend %r16, [ %r8 ] -+ f8000758: 40 60 60 d3 ld64 [ %sp + 0x40 ], %r16 -+ f800075c: 80 60 60 db ld64 [ %sp + 0x80 ], %r24 -+ f8000760: 08 a0 00 80 jmp %r2 + 8 -+ f8000764: c0 60 80 01 add %sp, 0xc0, %sp -+ -+00000000f8000768 : -+ f8000768: 40 7f 80 01 add %sp, -192, %sp -+ f800076c: 40 60 e0 d3 st64 %r16, [ %sp + 0x40 ] -+ f8000770: 80 60 e0 db st64 %r24, [ %sp + 0x80 ] -+ f8000774: 00 20 84 90 mov %r16, %r16 -+ f8000778: 00 20 86 98 mov %r24, %r24 -+ f800077c: 00 20 80 80 nop -+ f8000780: 00 20 80 80 nop -+ f8000784: 00 e0 81 92 mov %r7, %r18 -+ f8000788: 02 00 00 40 call f8000790 -+ f800078c: 00 60 80 91 mov %sp, %r17 -+ f8000790: 54 e0 81 10 add %r7, 0x54, %r16 -+ f8000794: c0 40 e0 d1 st32 %r16, [ %sp ] -+ f8000798: 37 20 80 97 mov 0x37, %r23 -+ f800079c: 38 60 c0 d7 st8 %r23, [ %sp + 0x38 ] -+ f80007a0: 00 20 84 90 mov %r16, %r16 -+ f80007a4: 00 e0 85 97 mov %r23, %r23 -+ f80007a8: 00 20 80 80 nop -+ f80007ac: 00 20 80 80 nop -+ f80007b0: 1f 60 82 90 or %r9, 0x1f, %r16 -+ f80007b4: 20 b4 12 91 sll8 %r10, 0x20, %r17 -+ f80007b8: 08 63 84 91 or %r17, 0x308, %r17 -+ f80007bc: 00 60 80 92 mov %sp, %r18 -+ f80007c0: 00 20 82 93 mov %r8, %r19 -+ f80007c4: 0e f4 12 94 sll8 %r11, 0xe, %r20 -+ f80007c8: 0d 20 85 94 or %r20, 0xd, %r20 -+ f80007cc: 17 20 80 95 mov 0x17, %r21 -+ f80007d0: 17 20 80 96 mov 0x17, %r22 -+ f80007d4: 17 20 80 97 mov 0x17, %r23 -+ f80007d8: c0 00 f2 d3 st64suspend %r16, [ %r8 ] -+ f80007dc: 40 60 60 d3 ld64 [ %sp + 0x40 ], %r16 -+ f80007e0: 80 60 60 db ld64 [ %sp + 0x80 ], %r24 -+ f80007e4: 08 a0 00 80 jmp %r2 + 8 -+ f80007e8: c0 60 80 01 add %sp, 0xc0, %sp -+ -+00000000f80007ec : -+ f80007ec: 01 20 80 97 mov 1, %r23 -+ f80007f0: b0 e3 c0 d7 st8 %r23, [ %r3 + 0x3b0 ] -+ f80007f4: 80 a0 60 d2 ld16 [ %r2 + 0x80 ], %r18 -+ f80007f8: 98 e2 80 04 add %r3, 0x298, %r4 -+ f80007fc: 98 e2 c0 c0 st8 %r0, [ %r3 + 0x298 ] -+ f8000800: 80 a0 c0 c4 st8 %r4, [ %r2 + 0x80 ] -+ f8000804: c0 80 c4 c3 st8 %r3, [ %r18 ] -+ f8000808: 01 e0 b4 00 cmp %r19, 1 -+ f800080c: 06 00 40 02 be f8000824 -+ f8000810: 00 a0 84 90 mov %r18, %r16 -+ f8000814: 00 e0 80 97 mov %r3, %r23 -+ f8000818: 02 20 80 97 mov 2, %r23 -+ f800081c: b0 e3 c0 d7 st8 %r23, [ %r3 + 0x3b0 ] -+ f8000820: d8 e2 d0 d0 st8suspend %r16, [ %r3 + 0x2d8 ] -+ f8000824: d8 e2 c0 d0 st8 %r16, [ %r3 + 0x2d8 ] -+ f8000828: 03 20 80 97 mov 3, %r23 -+ f800082c: b0 e3 c0 d7 st8 %r23, [ %r3 + 0x3b0 ] -+ f8000830: 00 e0 80 88 mov %r3, %r8 -+ f8000834: 0e 00 40 10 b f800086c <.epcomms_resume_thread> -+ f8000838: 00 a0 60 c3 ld64 [ %r2 ], %r0 -+ -+00000000f800083c : -+ f800083c: 40 7f 80 01 add %sp, -192, %sp -+ f8000840: 40 60 e0 d3 st64 %r16, [ %sp + 0x40 ] -+ f8000844: 80 60 e0 db st64 %r24, [ %sp + 0x80 ] -+ f8000848: 00 20 84 90 mov %r16, %r16 -+ f800084c: 00 20 86 98 mov %r24, %r24 -+ f8000850: 00 20 80 80 nop -+ f8000854: 00 20 80 80 nop -+ f8000858: 01 20 80 89 mov 1, %r9 -+ f800085c: 88 20 c2 c9 st8 %r9, [ %r8 + 0x88 ] -+ f8000860: 00 60 80 91 mov %sp, %r17 -+ f8000864: 00 e0 81 97 mov %r7, %r23 -+ f8000868: 00 20 f2 d3 st64suspend %r16, [ %r8 ] -+ -+00000000f800086c <.epcomms_resume_thread>: -+ f800086c: 40 60 60 d3 ld64 [ %sp + 0x40 ], %r16 -+ f8000870: 80 60 60 db ld64 [ %sp + 0x80 ], %r24 -+ f8000874: 08 e0 01 80 retl -+ f8000878: c0 60 80 01 add %sp, 0xc0, %sp -+Disassembly of section .data: -diff -urN clean/drivers/net/qsnet/jtag/jtagdrv.c linux-2.6.9/drivers/net/qsnet/jtag/jtagdrv.c ---- clean/drivers/net/qsnet/jtag/jtagdrv.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/jtag/jtagdrv.c 2003-06-07 12:02:35.000000000 -0400 -@@ -0,0 +1,451 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: jtagdrv.c,v 1.12 2003/06/07 16:02:35 david Exp $" -+/* $Source: /cvs/master/quadrics/jtagmod/jtagdrv.c,v $*/ -+ -+#include -+ -+#include "jtagdrv.h" -+#include -+ -+int -+jtagdrv_strobe_data (JTAG_DEV *dev, u_char data) -+{ -+ u_char dsr; -+ -+ PRINTF (DBG_ECPP, ("jtagdrv_strobe_data: %s %s %s -> ", (data & LPT_DATA_TRST) ? "TRST" : "trst", -+ (data & LPT_DATA_TDI) ? "TDI" : "tdi", (data & LPT_DATA_TMS) ? "TMS" : "tms")); -+ -+ -+ LPT_WRITE_DATA (dev, data); DELAY(5); /* Drive NEW values on data wires */ -+ LPT_WRITE_CTRL (dev, LPT_CTRL_TCLK); DELAY(5); /* Drive strobe low */ -+ LPT_READ_STAT (dev, dsr); DELAY(5); /* Sample TDI from ring */ -+ LPT_WRITE_CTRL (dev, 0); DELAY(5); /* Drive strobe high */ -+ -+ PRINTF (DBG_ECPP, ("%s\n", (dsr & LPT_STAT_PE) ? "TDO" : "tdo")); -+ -+ return ((dsr & LPT_STAT_PE) ? 1 : 0); -+} -+ -+void -+jtagdrv_select_ring (JTAG_DEV *dev, u_int ring) -+{ -+ PRINTF (DBG_ECPP, ("jtagdrv_select_ring: ring=0x%x\n", ring)); -+ -+ LPT_WRITE_CTRL (dev, 0); DELAY(5); /* Drive strobe and TCLK high */ -+ LPT_WRITE_DATA (dev, ring); DELAY(5); /* Drive ring address */ -+ LPT_WRITE_CTRL (dev, LPT_CTRL_RCLK); DELAY(5); /* Drive strobe low */ -+ LPT_WRITE_CTRL (dev, 0); DELAY(5); /* Drive strobe high */ -+} -+ -+void -+jtagdrv_reset (JTAG_DEV *dev) -+{ -+ register int i; -+ -+ for (i = 0; i < 5; i++) -+ jtagdrv_strobe_data (dev, LPT_DATA_TRST | LPT_DATA_TMS); /* 5 clocks to Reset from any state */ -+ jtagdrv_strobe_data (dev, LPT_DATA_TRST); /* to Run-Test/Idle */ -+} -+ -+void -+jtagdrv_shift_ir (JTAG_DEV *dev, u_char *value, int nbits) -+{ -+ register int i; -+ register int bit; -+ -+ jtagdrv_strobe_data (dev, LPT_DATA_TRST | LPT_DATA_TMS); /* to Select DR-Scan */ -+ jtagdrv_strobe_data (dev, LPT_DATA_TRST | LPT_DATA_TMS); /* to Select IR-Scan */ -+ jtagdrv_strobe_data (dev, LPT_DATA_TRST); /* to Capture-IR */ -+ jtagdrv_strobe_data (dev, LPT_DATA_TRST); /* to Shift-IR */ -+ -+ for (i = 0; i < nbits; i++) -+ { -+ /* strobe through the instruction bits, asserting TMS on the last bit */ -+ -+ if (i == (nbits-1)) -+ bit = jtagdrv_strobe_data (dev, LPT_DATA_TRST | LPT_DATA_TMS | (JTAG_BIT(value, i) ? LPT_DATA_TDI : 0)); -+ else -+ bit = jtagdrv_strobe_data (dev, LPT_DATA_TRST | (JTAG_BIT(value, i) ? LPT_DATA_TDI : 0)); -+ -+ if (bit) -+ JTAG_SET_BIT(value, i); -+ else -+ JTAG_CLR_BIT(value, i); -+ } -+ -+ jtagdrv_strobe_data (dev, LPT_DATA_TRST | LPT_DATA_TMS); /* to Update-IR */ -+ jtagdrv_strobe_data (dev, LPT_DATA_TRST); /* to Run-Test/Idle */ -+} -+ -+ -+void -+jtagdrv_shift_dr (JTAG_DEV *dev, u_char *value, int nbits) -+{ -+ register int i; -+ register int bit; -+ -+ jtagdrv_strobe_data (dev, LPT_DATA_TRST | LPT_DATA_TMS); /* to Select DR-Scan */ -+ jtagdrv_strobe_data (dev, LPT_DATA_TRST); /* to Capture-DR */ -+ jtagdrv_strobe_data (dev, LPT_DATA_TRST); /* to Shift-DR */ -+ -+ for (i = 0; i < nbits; i++) -+ { -+ /* strobe through the data bits, asserting TMS on the last bit */ -+ -+ if (i == (nbits-1)) -+ bit = jtagdrv_strobe_data (dev, LPT_DATA_TRST | LPT_DATA_TMS | (JTAG_BIT(value, i) ? LPT_DATA_TDI : 0)); -+ else -+ bit = jtagdrv_strobe_data (dev, LPT_DATA_TRST | (JTAG_BIT(value, i) ? LPT_DATA_TDI : 0)); -+ -+ if (bit) -+ JTAG_SET_BIT(value, i); -+ else -+ JTAG_CLR_BIT(value, i); -+ } -+ -+ jtagdrv_strobe_data (dev, LPT_DATA_TRST | LPT_DATA_TMS); /* to Update-DR */ -+ jtagdrv_strobe_data (dev, LPT_DATA_TRST); /* to Run-Test/Idle */ -+} -+ -+static int -+jtagdrv_i2c_start (JTAG_DEV *dev) -+{ -+ u_char dsr; -+ int i; -+ -+ PRINTF (DBG_ECPP, ("jtagdrv_i2c_start\n")); -+ -+ /* Issue a stop sequence */ -+ LPT_WRITE_CTRL (dev, LPT_CTRL_SCLK); DELAY(1); /* SCLK low */ -+ LPT_WRITE_DATA (dev, 0); DELAY(5); /* SDA low */ -+ LPT_WRITE_CTRL (dev, 0); DELAY(5); /* SCLK high */ -+ LPT_WRITE_DATA (dev, LPT_DATA_SDA); DELAY(5); /* SDA high */ -+ -+ /* sample the line to see if we're idle */ -+ LPT_READ_STAT (dev, dsr); /* sample SDA */ -+ if ((dsr & LPT_STAT_SDA) == 0) /* Cannot start if SDA already driven */ -+ { -+ PRINTF (DBG_ECPP, ("jtagdrv_i2c_start: cannot start - sda driven low\n")); -+ -+ for (i = 0; i < 16 ; i++) -+ { -+ LPT_WRITE_CTRL (dev, LPT_CTRL_SCLK); DELAY(5); /* SCLK low */ -+ LPT_WRITE_CTRL (dev, 0); DELAY(5); /* SCLK high */ -+ LPT_READ_STAT (dev, dsr); -+ -+ if (dsr & LPT_STAT_SDA) -+ { -+ PRINTF (DBG_ECPP, ("jtagdrv_i2c_start - stopped after %d clocks\n", i)); -+ break; -+ } -+ } -+ -+ if ((dsr & LPT_STAT_SDA) == 0) -+ { -+ PRINTF (DBG_ECPP, ("jtagdrv_i2c_start - cannot start - not idle\n")); -+ return (0); -+ } -+ -+ /* seen SDA float high, so issue a stop sequence */ -+ LPT_WRITE_CTRL (dev, LPT_CTRL_SCLK); DELAY(1); /* SCLK low */ -+ LPT_WRITE_DATA (dev, 0); DELAY(5); /* SDA low */ -+ LPT_WRITE_CTRL (dev, 0); DELAY(5); /* SCLK high */ -+ LPT_WRITE_DATA (dev, LPT_DATA_SDA); DELAY(5); /* SDA high */ -+ } -+ -+ LPT_WRITE_DATA (dev, 0); DELAY(4); /* drive SDA low */ -+ return (1); -+} -+ -+static void -+jtagdrv_i2c_stop (JTAG_DEV *dev) -+{ -+ u_char dsr; -+ int i; -+ -+ PRINTF (DBG_ECPP, ("jtagdrv_i2c_stop\n")); -+ -+ LPT_WRITE_CTRL (dev, LPT_CTRL_SCLK); DELAY(1); /* SCLK low */ -+ LPT_WRITE_DATA (dev, 0); DELAY(5); /* SDA low */ -+ LPT_WRITE_CTRL (dev, 0); DELAY(5); /* SCLK high */ -+ LPT_WRITE_DATA (dev, LPT_DATA_SDA); DELAY(5); /* SDA high */ -+ -+ /* -+ * bug fix for temperature sensor chip -+ * if it's still driving SDA, then clock -+ * it until it stops driving it -+ */ -+ LPT_READ_STAT (dev, dsr); -+ if ((dsr & LPT_STAT_SDA) == 0) -+ { -+ PRINTF (DBG_ECPP, ("jtagdrv_i2c_stop - slave not stodeved\n")); -+ for (i = 0; i < 16 ; i++) -+ { -+ LPT_WRITE_CTRL (dev, LPT_CTRL_SCLK); DELAY(5); /* SCLK low */ -+ LPT_WRITE_CTRL (dev, 0); DELAY(5); /* SCLK high */ -+ LPT_READ_STAT (dev, dsr); -+ -+ if (dsr & LPT_STAT_SDA) -+ break; -+ } -+ PRINTF (DBG_ECPP, ("jtagdrv_i2c_stop - stodeved after %d clocks\n", i)); -+ } -+} -+ -+static int -+jtagdrv_i2c_strobe (JTAG_DEV *dev, u_char data) -+{ -+ u_char dsr; -+ -+ PRINTF (DBG_ECPP, ("jtagdrv_i2c_strobe : %s", (data & LPT_DATA_SDA) ? "SDA" : "sda")); -+ -+ LPT_WRITE_CTRL (dev, LPT_CTRL_SCLK); DELAY(1); /* SCLK low */ -+ LPT_WRITE_DATA (dev, data); DELAY(5); /* write data */ -+ LPT_WRITE_CTRL (dev, 0); /* SCLK high */ -+ LPT_READ_STAT (dev, dsr); DELAY(4); /* Sample SDA */ -+ -+ PRINTF (DBG_ECPP, (" -> %s\n", (dsr & LPT_STAT_SDA) ? "SDA" : "sda")); -+ -+ return ((dsr & LPT_STAT_SDA) ? 1 : 0); -+} -+ -+static int -+jtagdrv_i2c_get_ack (JTAG_DEV *dev) -+{ -+ u_char dsr; -+ -+ LPT_WRITE_CTRL (dev, LPT_CTRL_SCLK); DELAY(1); /* SCLK low */ -+ LPT_WRITE_DATA (dev, LPT_DATA_SDA); DELAY(5); /* SDA high */ -+ LPT_WRITE_CTRL (dev, 0); /* SCLK high */ -+ LPT_READ_STAT (dev, dsr); DELAY(4); /* Sample SDA */ -+ -+ PRINTF (DBG_ECPP, ("jtagdrv_i2c_get_ack -> %s\n", (dsr & LPT_STAT_SDA) ? "no ack" : "ack")); -+ -+ return ((dsr & LPT_STAT_SDA) ? 0 : 1); -+} -+ -+static int -+jtagdrv_i2c_drive_ack (JTAG_DEV *dev, int nack) -+{ -+ u_char dsr; -+ -+ LPT_WRITE_CTRL (dev, LPT_CTRL_SCLK); DELAY(1); /* SCLK low */ -+ LPT_WRITE_DATA (dev, nack ? LPT_DATA_SDA : 0); DELAY(5); /* SDA low for ack, high for nack */ -+ LPT_WRITE_CTRL (dev, 0); /* SCLK high */ -+ LPT_READ_STAT (dev, dsr); DELAY(4); /* Sample SDA for ack */ -+ -+ PRINTF (DBG_ECPP, ("jtagdrv_i2c_drive_ack %d -> %s\n", nack, (dsr & LPT_STAT_SDA) ? "done" : "more")); -+ -+ return ((dsr & LPT_STAT_SDA) ? 1 : 0); -+} -+ -+static void -+jtagdrv_i2c_shift_addr (JTAG_DEV *dev, u_int address, int readNotWrite) -+{ -+ register int i; -+ -+ PRINTF (DBG_ECPP, ("jtagdrv_i2c_shift_addr: %x\n", address)); -+ -+ for (i = I2C_ADDR_LEN-1; i >= 0; i--) -+ jtagdrv_i2c_strobe (dev, (address & (1 << i)) ? LPT_DATA_SDA : 0); -+ -+ jtagdrv_i2c_strobe (dev, readNotWrite ? LPT_DATA_SDA : 0); -+} -+ -+static u_char -+jtagdrv_i2c_shift_data (JTAG_DEV *dev, u_char data) -+{ -+ register int i; -+ u_char val = 0; -+ -+ PRINTF (DBG_ECPP, ("jtagdrv_i2c_shift_data : %02x\n", data)); -+ -+ for (i = I2C_DATA_LEN-1; i >= 0; i--) -+ if (jtagdrv_i2c_strobe (dev, data & (1 << i) ? LPT_DATA_SDA : 0)) -+ val |= (1 << i); -+ -+ PRINTF (DBG_ECPP, ("jtagdrv_i2c_shift_data : -> %02x\n", val)); -+ -+ return (val); -+} -+ -+int -+jtagdrv_i2c_write (JTAG_DEV *dev, u_int address, u_int count, u_char *data) -+{ -+ register int i; -+ -+ PRINTF (DBG_FN, ("jtagdrv_i2c_write: address=%x count=%d data=%02x\n", address, count, data[0])); -+ -+ if (! jtagdrv_i2c_start (dev)) -+ return (I2C_OP_NOT_IDLE); -+ -+ jtagdrv_i2c_shift_addr (dev, address, 0); -+ -+ if (! jtagdrv_i2c_get_ack (dev)) -+ { -+ PRINTF (DBG_FN, ("jtagdrv_i2c_write: no ack on address phase\n")); -+ -+ jtagdrv_i2c_stop (dev); -+ return (I2C_OP_NO_DEVICE); -+ } -+ -+ for (i = 0; i < count; i++) -+ { -+ jtagdrv_i2c_shift_data (dev, data[i]); -+ -+ if (! jtagdrv_i2c_get_ack (dev)) -+ { -+ PRINTF (DBG_FN, ("jtagdrv_i2c_write: no ack on data phase %d\n", i)); -+ -+ jtagdrv_i2c_stop (dev); -+ return (I2C_OP_WRITE_TO_BIG); -+ } -+ } -+ -+ jtagdrv_i2c_stop (dev); -+ return (I2C_OP_SUCCESS); -+} -+ -+int -+jtagdrv_i2c_read (JTAG_DEV *dev, u_int address, u_int count, u_char *data) -+{ -+ register int i; -+ -+ PRINTF (DBG_FN, ("jtagdrv_i2c_read: address=%x count=%d\n", address, count)); -+ -+ if (! jtagdrv_i2c_start (dev)) -+ return (I2C_OP_NOT_IDLE); -+ -+ jtagdrv_i2c_shift_addr (dev, address, 1); -+ -+ if (! jtagdrv_i2c_get_ack (dev)) -+ { -+ PRINTF (DBG_FN, ("jtagdrv_i2c_read: no ack on address phase\n")); -+ -+ jtagdrv_i2c_stop (dev); -+ return (I2C_OP_NO_DEVICE); -+ } -+ -+ for (i = 0; i < count; i++) -+ { -+ data[i] = jtagdrv_i2c_shift_data (dev, 0xff); -+ -+ jtagdrv_i2c_drive_ack (dev, (i == (count-1) ? 1 : 0)); -+ } -+ -+ jtagdrv_i2c_stop (dev); -+ -+ return (I2C_OP_SUCCESS); -+} -+ -+int -+jtagdrv_i2c_writereg (JTAG_DEV *dev, u_int address, u_int intaddress, u_int count, u_char *data) -+{ -+ register int i; -+ -+ PRINTF (DBG_FN, ("jtagdrv_i2c_writereg: address=%x count=%d\n", address, count)); -+ -+ if (! jtagdrv_i2c_start (dev)) -+ return (I2C_OP_NOT_IDLE); -+ -+ jtagdrv_i2c_shift_addr (dev, address, 0); -+ -+ if (! jtagdrv_i2c_get_ack (dev)) -+ { -+ PRINTF (DBG_FN, ("jtagdrv_i2c_writereg: no ack on address phase\n")); -+ -+ jtagdrv_i2c_stop (dev); -+ return (I2C_OP_NO_DEVICE); -+ } -+ -+ jtagdrv_i2c_shift_data (dev, intaddress); -+ -+ if (! jtagdrv_i2c_get_ack (dev)) -+ { -+ PRINTF (DBG_FN, ("jtagdrv_i2c_writereg: no ack on intaddress phase\n")); -+ jtagdrv_i2c_stop (dev); -+ return (I2C_OP_NO_DEVICE); -+ } -+ -+ for (i = 0; i < count; i++) -+ { -+ jtagdrv_i2c_shift_data (dev, data[i]); -+ if (! jtagdrv_i2c_get_ack (dev)) -+ { -+ PRINTF (DBG_FN, ("jtagdrv_i2c_writedate: no ack on byte %d\n", i)); -+ jtagdrv_i2c_stop (dev); -+ return (I2C_OP_WRITE_TO_BIG); -+ } -+ } -+ -+ jtagdrv_i2c_stop (dev); -+ return (I2C_OP_SUCCESS); -+} -+ -+int -+jtagdrv_i2c_readreg (JTAG_DEV *dev, u_int address, u_int intaddress, u_int count, u_char *data) -+{ -+ PRINTF (DBG_FN, ("jtagdrv_i2c_readreg: address=%x count=%d\n", address, count)); -+ -+ if (! jtagdrv_i2c_start (dev)) -+ return (I2C_OP_NOT_IDLE); -+ -+ jtagdrv_i2c_shift_addr (dev, address, 0); -+ -+ if (! jtagdrv_i2c_get_ack (dev)) -+ { -+ PRINTF (DBG_FN, ("jtagdrv_i2c_readreg: no ack on address phase\n")); -+ -+ jtagdrv_i2c_stop (dev); -+ return (I2C_OP_NO_DEVICE); -+ } -+ -+ jtagdrv_i2c_shift_data (dev, intaddress); -+ -+ if (! jtagdrv_i2c_get_ack (dev)) -+ { -+ PRINTF (DBG_FN, ("jtagdrv_i2c_readreg: no ack on intaddress phase\n")); -+ jtagdrv_i2c_stop (dev); -+ return (I2C_OP_NO_DEVICE); -+ } -+ -+ jtagdrv_i2c_stop (dev); -+ -+ return (jtagdrv_i2c_read (dev, address, count, data)); -+} -+ -+void -+jtagdrv_i2c_clock_shift (JTAG_DEV *dev, u_int t, u_int n, u_int m) -+{ -+ int i; -+ -+ for (i = 2; i >= 0; i--) -+ { -+ LPT_WRITE_DATA (dev, ((t & (1 << i)) ? LPT_DATA_TDI : 0)); DELAY(1); /* clock low | data */ -+ LPT_WRITE_DATA (dev, ((t & (1 << i)) ? LPT_DATA_TDI : 0) | LPT_DATA_TMS); DELAY(1); /* clock high | data */ -+ } -+ -+ for (i = 1; i >= 0; i--) -+ { -+ LPT_WRITE_DATA (dev, ((n & (1 << i)) ? LPT_DATA_TDI : 0)); DELAY(1); /* clock low | data */ -+ LPT_WRITE_DATA (dev, ((n & (1 << i)) ? LPT_DATA_TDI : 0)| LPT_DATA_TMS); DELAY(1); /* clock high | data */ -+ } -+ -+ for (i = 6; i >= 0; i--) -+ { -+ LPT_WRITE_DATA (dev, ((m & (1 << i)) ? LPT_DATA_TDI : 0)); DELAY(1); /* clock low | data */ -+ LPT_WRITE_DATA (dev, ((m & (1 << i)) ? LPT_DATA_TDI : 0) | LPT_DATA_TMS); DELAY(1); /* clock high | data */ -+ } -+ -+ LPT_WRITE_DATA (dev, 0); DELAY(1); /* clock low | 0 */ -+ -+ LPT_WRITE_CTRL (dev, LPT_CTRL_TCLK); DELAY(1); /* strobe low */ -+ LPT_WRITE_CTRL (dev, 0); DELAY(1); /* strobe low */ -+} -+ -diff -urN clean/drivers/net/qsnet/jtag/jtagdrv.h linux-2.6.9/drivers/net/qsnet/jtag/jtagdrv.h ---- clean/drivers/net/qsnet/jtag/jtagdrv.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/jtag/jtagdrv.h 2002-08-09 07:18:37.000000000 -0400 -@@ -0,0 +1,57 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __JTAGDRV_COMMON_H -+#define __JTAGDRV_COMMON_H -+ -+#ident "@(#)$Id: jtagdrv.h,v 1.5 2002/08/09 11:18:37 addy Exp $" -+/* $Source: /cvs/master/quadrics/jtagmod/jtagdrv.h,v $*/ -+ -+#include -+ -+/* include OS specific header file */ -+#if defined(LINUX) -+# include "jtagdrv_Linux.h" -+#elif defined(DIGITAL_UNIX) -+# include "jtagdrv_OSF1.h" -+#elif defined(QNX) -+# include "jtagdrv_QNX.h" -+#else -+# error cannot determint os type -+#endif -+ -+extern int jtagdebug; -+ -+#define DBG_CFG (1 << 0) -+#define DBG_OPEN (1 << 1) -+#define DBG_IOCTL (1 << 2) -+#define DBG_ECPP (1 << 3) -+#define DBG_FN (1 << 4) -+ -+#define DRIVER_NAME "jtag" -+ -+#if defined(LINUX) -+#define PRINTF(n,X) ((n) & jtagdebug ? (void) printk X : (void) 0) -+#define PRINTMSG(fmt, arg...) printk(KERN_INFO DRIVER_NAME ": " fmt, ##arg) -+#else -+#define PRINTF(n,X) ((n) & jtagdebug ? (void) printf X : (void) 0) -+#define PRINTMSG(M, A) printf ("jtag: " M, A) -+#endif -+ -+extern void jtagdrv_select_ring (JTAG_DEV *pp, u_int ring); -+extern void jtagdrv_reset (JTAG_DEV *pp); -+extern void jtagdrv_shift_ir (JTAG_DEV *pp, u_char *value, int nbits); -+extern void jtagdrv_shift_dr (JTAG_DEV *pp, u_char *value, int nbits); -+ -+extern int jtagdrv_i2c_write (JTAG_DEV *pp, u_int address, u_int count, u_char *data); -+extern int jtagdrv_i2c_read (JTAG_DEV *pp, u_int address, u_int count, u_char *data); -+extern int jtagdrv_i2c_writereg (JTAG_DEV *pp, u_int address, u_int intaddress, u_int count, u_char *data); -+extern int jtagdrv_i2c_readreg (JTAG_DEV *pp, u_int address, u_int intaddress, u_int count, u_char *data); -+extern void jtagdrv_i2c_clock_shift (JTAG_DEV *pp, u_int t, u_int n, u_int m); -+ -+ -+#endif /* __JTAGDRV_COMMON_H */ -diff -urN clean/drivers/net/qsnet/jtag/jtagdrv_Linux.c linux-2.6.9/drivers/net/qsnet/jtag/jtagdrv_Linux.c ---- clean/drivers/net/qsnet/jtag/jtagdrv_Linux.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/jtag/jtagdrv_Linux.c 2005-09-07 10:35:03.000000000 -0400 -@@ -0,0 +1,326 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+/* -+ * $Id: jtagdrv_Linux.c,v 1.19.2.3 2005/09/07 14:35:03 mike Exp $ -+ * $Source: /cvs/master/quadrics/jtagmod/jtagdrv_Linux.c,v $ -+ */ -+ -+#include "jtagdrv.h" -+#include -+#include -+ -+#include -+#include -+ -+MODULE_AUTHOR("Quadrics Ltd."); -+MODULE_DESCRIPTION("JTAG Parallel port QsNet switch interface"); -+ -+MODULE_LICENSE("GPL"); -+ -+#define MAJOR_INSTANCE 0 /* 0 is dynamic assign of device major */ -+#define MAX_JTAG_DEV 4 -+ -+int jtag_major = MAJOR_INSTANCE; -+int jtagdebug = 0; -+module_param(jtag_major, uint, 0); -+module_param(jtagdebug, uint, 0); -+ -+JTAG_DEV jtag_devs[MAX_JTAG_DEV]; -+ -+int io[MAX_JTAG_DEV]= { 0, }; -+MODULE_PARM(io, "1-4i"); -+ -+ -+/* The fops functions */ -+int jtag_open(struct inode *, struct file *); -+int jtag_close(struct inode *, struct file *); -+int jtag_ioctl(struct inode *, struct file *, unsigned int, unsigned long ); -+ -+struct file_operations jtag_fops = { -+ ioctl: jtag_ioctl, -+ open: jtag_open, -+ release: jtag_close, -+}; -+ -+int -+jtag_probe(void) -+{ -+ int i=0; -+ int default_io = 1; -+ JTAG_DEV *dev; -+ unsigned char value=0xff; -+ -+ -+ /* see if there are any user supplied io addr */ -+ for ( i = 0; i < MAX_JTAG_DEV; i++) { -+ if ( io[i] != 0x00) -+ default_io = 0; -+ jtag_devs[i].base = io[i]; -+ } -+ -+ if ( default_io ) { -+ jtag_devs[0].base = 0x3bc; -+ jtag_devs[1].base = 0x378; -+ jtag_devs[2].base = 0x278; -+ jtag_devs[3].base = 0x268; -+ } -+ -+ for ( i = 0 ; i < MAX_JTAG_DEV; i++) { -+ if ( jtag_devs[i].base == 0x3bc ) -+ jtag_devs[i].region = 3; -+ else -+ jtag_devs[i].region = 8; -+ jtag_devs[i].present = 0; -+ } -+ -+ -+ if( default_io ) -+ { -+ for( i = 0 ; i < MAX_JTAG_DEV; i++) { -+ dev=&(jtag_devs[i]); -+ if(dev->base && request_region(dev->base, dev->region, "jtag")) { -+ LPT_WRITE(dev, 0,0); -+ LPT_READ(dev, 0,value); -+ if ( value != 0xff) { -+ PRINTMSG("(%d , %d) present, io=0x%04lx\n",jtag_major,i,dev->base); -+ -+ dev->present=1; -+ } -+ else -+ release_region(dev->base, dev->region); -+ } -+ else -+ { -+ PRINTMSG("failed to request_region (%d , %d), io=0x%04lx\n",jtag_major,i,dev->base); -+ return -1; -+ } -+ } -+ return 0; -+ } -+ else /* Force the region to be present, this makes the PCI parallel cards work */ -+ { -+ for( i = 0 ; i < MAX_JTAG_DEV; i++) -+ { -+ dev=&(jtag_devs[i]); -+ if(dev->base && request_region(dev->base, dev->region, "jtag") && (dev->base != 0)) -+ { -+ PRINTMSG("(%d , %d) forced by user, io=0x%04lx\n",jtag_major,i,dev->base); -+ dev->present=1; -+ } -+ else -+ { -+ if( dev->base != 0) -+ release_region(dev->base, dev->region); -+ } -+ } -+ return 0; -+ } -+} -+ -+int init_module(void) -+{ -+ int result,i; -+ result = register_chrdev(jtag_major, DRIVER_NAME, &jtag_fops); -+ if (result < 0) { -+ PRINTMSG("Couldn't register char device err == %d\n",jtag_major); -+ return -1; -+ } -+ -+ if ( jtag_major == 0 ) -+ jtag_major = result; -+ -+ for ( i = 0; i < MAX_JTAG_DEV; i++) { -+ jtag_devs[i].base=io[i]; -+ } -+ -+ jtag_probe(); -+ -+ PRINTMSG("Registered character device, major == %d\n",jtag_major); -+ return 0; -+} -+ -+void cleanup_module(void) -+{ -+ int i=0; -+ -+ for( i = 0; i < MAX_JTAG_DEV; i++) { -+ if( jtag_devs[i].present) -+ release_region(jtag_devs[i].base, jtag_devs[i].region); -+ } -+ -+ unregister_chrdev(jtag_major, DRIVER_NAME); -+ PRINTMSG("Unloaded char device\n"); -+} -+ -+ -+int -+jtag_open (struct inode *inode, struct file *filp) -+{ -+ int unit = MINOR(inode->i_rdev); -+ JTAG_DEV *dev = &jtag_devs[unit]; -+ -+ if (unit < 0 || unit > MAX_JTAG_DEV || !dev->present) -+ return (-ENXIO); -+ -+ /* -+ * Only allow a single open at a time -+ */ -+ if (dev->open) -+ return (-EBUSY); -+ dev->open = 1; -+ -+ /* -+ * Initialise the hardware registers -+ */ -+ -+ LPT_WRITE (dev, LPT_CTRL, 0); -+ DELAY(50); -+ LPT_WRITE (dev, LPT_CTRL, LPT_CTRL_INIT); -+ -+ MOD_INC_USE_COUNT; -+ -+ return (0); -+} -+ -+int -+jtag_close(struct inode *inode, struct file *filp) -+{ -+ -+ int unit = MINOR(inode->i_rdev); -+ JTAG_DEV *dev = &jtag_devs[unit]; -+ -+ if (unit < 0 || unit > MAX_JTAG_DEV || !dev->present) -+ return (-ENXIO); -+ -+ dev->open = 0; -+ -+ MOD_DEC_USE_COUNT; -+ -+ return (0); -+} -+ -+int -+jtag_ioctl (struct inode *inode, struct file *filp, unsigned int io_cmd, unsigned long io_data) -+{ -+ int unit = MINOR(inode->i_rdev); -+ JTAG_DEV *dev = &jtag_devs[unit]; -+ JTAG_RESET_ARGS *resetargs; -+ JTAG_SHIFT_ARGS *shiftargs; -+ I2C_ARGS *i2cargs; -+ I2C_CLOCK_SHIFT_ARGS *clockargs; -+ u_char *buf; -+ int freq; -+ -+ if (unit < 0 || unit > MAX_JTAG_DEV || !dev->present) -+ return (-ENXIO); -+ -+ PRINTF (DBG_IOCTL, ("jtag_ioctl: device %d cmd=%x\n", unit, io_cmd)); -+ -+ switch (io_cmd) -+ { -+ case JTAG_RESET: -+ resetargs = (JTAG_RESET_ARGS *) io_data; -+ -+ if (! VALID_JTAG_RING (resetargs->ring)) -+ return (-EINVAL); -+ -+ jtagdrv_select_ring (dev, resetargs->ring); -+ jtagdrv_reset (dev); -+ return (0); -+ -+ case JTAG_SHIFT_IR: -+ case JTAG_SHIFT_DR: -+ shiftargs = (JTAG_SHIFT_ARGS *) io_data; -+ -+ if (! VALID_JTAG_RING (shiftargs->ring) || shiftargs->nbits > (JTAG_MAX_DATA_LEN*JTAG_MAX_CHIPS)) { -+ return (-EFAULT); -+ } -+ -+ buf = (u_char *) kmalloc (JTAG_NBYTES(shiftargs->nbits), GFP_KERNEL); -+ -+ if (buf == (u_char *) NULL) -+ return (-ENOMEM); -+ -+ if (copy_from_user (buf, shiftargs->value, JTAG_NBYTES(shiftargs->nbits))) -+ { -+ kfree(buf); -+ return (-EFAULT); -+ } -+ -+ -+ jtagdrv_select_ring (dev, shiftargs->ring); -+ -+ if (io_cmd == JTAG_SHIFT_IR) -+ jtagdrv_shift_ir (dev, buf, shiftargs->nbits); -+ else -+ jtagdrv_shift_dr (dev, buf, shiftargs->nbits); -+ -+ if (copy_to_user (shiftargs->value, buf, JTAG_NBYTES (shiftargs->nbits))) -+ { -+ kfree (buf); -+ return (-EFAULT); -+ } -+ -+ kfree (buf); -+ return (0); -+ -+ case I2C_WRITE: -+ case I2C_READ: -+ case I2C_WRITEREG: -+ case I2C_READREG: -+ i2cargs = (I2C_ARGS *) io_data; -+ -+ if (! VALID_I2C_RING(i2cargs->ring) || i2cargs->count > I2C_MAX_DATA_LEN) -+ return (-EFAULT); -+ -+ jtagdrv_select_ring (dev, RING_I2C_BIT | i2cargs->ring); -+ switch (io_cmd) -+ { -+ case I2C_WRITE: -+ i2cargs->ok = jtagdrv_i2c_write (dev, i2cargs->device, i2cargs->count, i2cargs->data); -+ break; -+ -+ case I2C_READ: -+ i2cargs->ok = jtagdrv_i2c_read (dev, i2cargs->device, i2cargs->count, i2cargs->data); -+ break; -+ -+ case I2C_WRITEREG: -+ i2cargs->ok = jtagdrv_i2c_writereg (dev, i2cargs->device, i2cargs->reg, i2cargs->count, i2cargs->data); -+ break; -+ -+ case I2C_READREG: -+ i2cargs->ok = jtagdrv_i2c_readreg (dev, i2cargs->device, i2cargs->reg, i2cargs->count, i2cargs->data); -+ break; -+ } -+ return (0); -+ -+ case I2C_CLOCK_SHIFT: -+ clockargs = (I2C_CLOCK_SHIFT_ARGS *) io_data; -+ -+ freq = (10 * clockargs->m / (1 << (((clockargs->n + 1) & 3)))); -+ -+ /* validate the value, and initialise the ring */ -+ if (clockargs->t != 0 || clockargs->n > 3 || clockargs->m > 127) -+ return (-EINVAL); -+ -+ jtagdrv_select_ring (dev, RING_I2C_BIT | RING_CLOCK_SHIFT); -+ jtagdrv_i2c_clock_shift (dev, clockargs->t, clockargs->n, clockargs->m); -+ jtagdrv_select_ring (dev, 0); -+ return (0); -+ -+ default: -+ return (-EINVAL); -+ } -+ return (-EINVAL); -+} -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/jtag/jtagdrv_Linux.h linux-2.6.9/drivers/net/qsnet/jtag/jtagdrv_Linux.h ---- clean/drivers/net/qsnet/jtag/jtagdrv_Linux.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/jtag/jtagdrv_Linux.h 2002-08-09 07:18:37.000000000 -0400 -@@ -0,0 +1,174 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: jtagdrv_Linux.h,v 1.3 2002/08/09 11:18:37 addy Exp $" -+/* $Source: /cvs/master/quadrics/jtagmod/jtagdrv_Linux.h,v $*/ -+ -+#ifndef __JTAGDRV_LINUX_H -+#define __JTAGDRV_LINUX_H -+ -+#include -+#include -+ -+typedef struct jtag_dev -+{ -+ unsigned long base; -+ int region; -+ -+ u_int present:1; -+ u_int open:1; -+} JTAG_DEV; -+ -+/* -+** -+** Hardware Defines -+** -+*/ -+ -+/* -+ * Assume that bit 4 of the Control Register is set to 1 (by default) -+ * to enable the printer port (CS3). -+ * -+ * The default base address is 3BC-3BF. -+ */ -+ -+#define LPT0 0x3BC /* CSR Base Address - note this can -+ * change depending on the setting -+ * in the Control Register 0. -+ * -+ * LPT1 0x378 -+ * LPT2 0x278 -+ * LPT3 0x268 -+ */ -+ -+/* -+ * Register offsets from the port base address -+ */ -+ -+#define LPT_REGISTER_0 0 -+#define LPT_REGISTER_1 1 -+#define LPT_REGISTER_2 2 -+#define LPT_REGISTER_3 0x400 -+#define LPT_REGISTER_4 0x401 -+#define LPT_REGISTER_5 0x402 -+ -+/* -+ * Chip control registers -+ */ -+ /* Base address for Super I/O National*/ -+ -+#define SIO_BASE_ADDR 0x26e /* Semiconductor PC87332VLJ combo-chip*/ -+#define CR4_REG 0x04 /* index 4, printer control reg 4 */ -+ -+#define LPT_EPP 0x01 /* Enable bit for epp */ -+#define LPT_ECP 0x04 /* Enable bit for ecp */ -+ -+/* -+ * Registers for use with centronics, nibble and byte modes. -+ */ -+ -+#define LPT_DATA LPT_REGISTER_0 /* line printer port data */ -+#define LPT_STAT LPT_REGISTER_1 /* LPT port status */ -+#define LPT_CTRL LPT_REGISTER_2 /* LPT port control */ -+ -+/* -+ * Registers for use with ECP mode. -+ */ -+ -+#define LPT_DFIFO LPT_REGISTER_3 /* r/w fifo register */ -+#define LPT_CFGB LPT_REGISTER_4 /* Configuration B */ -+#define LPT_ECR LPT_REGISTER_5 /* Exteded control */ -+ -+/* -+ * Bit assignments for ECR register. -+ */ -+ -+ /* Bits 0-4 */ -+ -+#define LPT_ECR_EMPTY 0x01 /* FIFO is empty */ -+#define LPT_ECR_FULL 0x02 /* FIFO is full */ -+#define LPT_ECR_SERV 0x04 /* Service bit */ -+#define LPT_ECR_DMA 0x08 /* DMA enable */ -+#define LPT_ECR_nINTR 0x10 /* Interrupt disable */ -+ -+ /* -+ * Bits 5-7 are ECR modes. -+ */ -+ -+#define LPT_ECR_PAR 0x20 /* Parallel port FIFO mode */ -+#define LPT_ECR_ECP 0x60 /* ECP mode */ -+#define LPT_ECR_CFG 0xE0 /* Configuration mode */ -+#define LPT_ECR_CLEAR ~0xE0 /* Cear mode bits */ -+ -+/* -+ * Bit assignments for the parallel port STATUS register: -+ */ -+ -+#define LPT_STAT_BIT0 0X1 /* Reserved. Bit always set. */ -+#define LPT_STAT_BIT1 0X2 /* Reserved. Bit always set. */ -+#define LPT_STAT_IRQ 0x4 /* interrupt status bit */ -+#define LPT_STAT_ERROR 0x8 /* set to 0 to indicate error */ -+#define LPT_STAT_SLCT 0x10 /* status of SLCT lead from printer */ -+#define LPT_STAT_PE 0x20 /* set to 1 when out of paper */ -+#define LPT_STAT_ACK 0x40 /* acknowledge - set to 0 when ready */ -+#define LPT_STAT_nBUSY 0x80 /* busy status bit, 0=busy, 1=ready */ -+ -+/* -+ * Bit assignments for the parallel port CONTROL register: -+ */ -+ -+#define LPT_CTRL_nSTROBE 0x1 /* Printer Strobe Control */ -+#define LPT_CTRL_nAUTOFD 0x2 /* Auto Feed Control */ -+#define LPT_CTRL_INIT 0x4 /* Initialize Printer Control */ -+#define LPT_CTRL_nSLCTIN 0x8 /* 0=select printer, 1=not selected */ -+#define LPT_CTRL_IRQ 0x10 /* Interrupt Request Enable Control */ -+#define LPT_CTRL_DIR 0x20 /* Direction control */ -+#define LPT_CTRL_BIT6 0X40 /* Reserved. Bit always set. */ -+#define LPT_CTRL_BIT7 0X80 /* Reserved. Bit always set. */ -+ -+ -+#define LPT_WRITE(dev, regname, value) do { outb(value, (dev)->base + regname); } while (0) -+#define LPT_READ(dev, regname,value) do { value = inb((dev)->base + regname); } while (0) -+ -+ -+ -+/* Standard register access macros */ -+#define LPT_WRITE_CTRL(dev, value) LPT_WRITE(dev, LPT_CTRL, LPT_CTRL_INIT | value) -+#define LPT_WRITE_DATA(dev, value) LPT_WRITE(dev, LPT_DATA, value) -+#define LPT_READ_STAT(dev, value) LPT_READ(dev, LPT_STAT, value) -+ -+/* -+ * The jtag signals are connected to the parallel port as follows : -+ * -+ * TRST bit 0 -+ * TDI bit 1 -+ * TMS bit 2 -+ * TCLK AFX -+ * TDO PE -+ */ -+#define LPT_DATA_TRST 1 -+#define LPT_DATA_TDI 2 -+#define LPT_DATA_TMS 4 -+#define LPT_CTRL_TCLK LPT_CTRL_nAUTOFD -+#define LPT_STAT_TDO LPT_STAT_PE -+ -+/* -+ * The I2C signals are connected as follows : -+ */ -+#define LPT_DATA_SDA 2 -+#define LPT_CTRL_SCLK LPT_CTRL_nAUTOFD -+#define LPT_STAT_SDA LPT_STAT_PE -+ -+/* -+ * The ring selection signals are as follows : -+ * addr bit 0-7 -+ * clock nSLCTIN -+ */ -+#define LPT_CTRL_RCLK LPT_CTRL_nSLCTIN -+ -+ -+#endif /* __JTAGDRV_LINUX_H */ -diff -urN clean/drivers/net/qsnet/jtag/Makefile linux-2.6.9/drivers/net/qsnet/jtag/Makefile ---- clean/drivers/net/qsnet/jtag/Makefile 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/jtag/Makefile 2005-10-10 17:47:31.000000000 -0400 -@@ -0,0 +1,15 @@ -+# -+# Makefile for Quadrics QsNet -+# -+# Copyright (c) 2002-2004 Quadrics Ltd -+# -+# File: drivers/net/qsnet/jtag/Makefile -+# -+ -+ -+# -+ -+obj-$(CONFIG_JTAG) += jtag.o -+jtag-objs := jtagdrv_Linux.o jtagdrv.o -+ -+EXTRA_CFLAGS += -DDEBUG -DDEBUG_PRINTF -DDEBUG_ASSERT -diff -urN clean/drivers/net/qsnet/jtag/Makefile.conf linux-2.6.9/drivers/net/qsnet/jtag/Makefile.conf ---- clean/drivers/net/qsnet/jtag/Makefile.conf 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/jtag/Makefile.conf 2005-09-07 10:39:49.000000000 -0400 -@@ -0,0 +1,10 @@ -+# Flags for generating QsNet Linux Kernel Makefiles -+MODNAME = jtag.o -+MODULENAME = jtag -+KOBJFILES = jtagdrv_Linux.o jtagdrv.o -+EXPORT_KOBJS = -+CONFIG_NAME = CONFIG_JTAG -+SGALFC = -+# EXTRALINES START -+ -+# EXTRALINES END -diff -urN clean/drivers/net/qsnet/jtag/quadrics_version.h linux-2.6.9/drivers/net/qsnet/jtag/quadrics_version.h ---- clean/drivers/net/qsnet/jtag/quadrics_version.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/jtag/quadrics_version.h 2005-09-07 10:39:49.000000000 -0400 -@@ -0,0 +1 @@ -+#define QUADRICS_VERSION "5.11.3qsnet" -diff -urN clean/drivers/net/qsnet/Kconfig linux-2.6.9/drivers/net/qsnet/Kconfig ---- clean/drivers/net/qsnet/Kconfig 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/Kconfig 2005-10-10 17:47:30.000000000 -0400 -@@ -0,0 +1,79 @@ -+# -+# Kconfig for Quadrics QsNet -+# -+# Copyright (c) 2004 Quadrics Ltd -+# -+# File: driver/net/qsnet/Kconfig -+# -+ -+menu "Quadrics QsNet" -+ depends on NETDEVICES -+ -+config QSNET -+ tristate "Quadrics QsNet support" -+ default m -+ depends on PCI -+ ---help--- -+ Quadrics QsNet is a high bandwidth, ultra low latency cluster interconnect -+ which provides both user and kernel programmers with secure, direct access -+ to the Quadrics network. -+ -+config ELAN3 -+ tristate "Elan 3 device driver" -+ default m -+ depends on QSNET -+ ---help--- -+ This is the main device driver for the Quadrics QsNet (Elan3) PCI device. -+ This is a high bandwidth, ultra low latency interconnect which provides -+ both user and kernel programmers with secure, direct access to the -+ Quadrics network. -+ -+config ELAN4 -+ tristate "Elan 4 device driver" -+ default m -+ depends on QSNET -+ ---help--- -+ This is the main device driver for the Quadrics QsNetII (Elan4) PCI-X device. -+ This is a high bandwidth, ultra low latency interconnect which provides -+ both user and kernel programmers with secure, direct access to the -+ Quadrics network. -+ -+config EP -+ tristate "Elan Kernel Comms" -+ default m -+ depends on QSNET && (ELAN4 || ELAN3) -+ ---help--- -+ This module implements the QsNet kernel communications layer. This -+ is used to layer kernel level facilities on top of the basic Elan -+ device drivers. These can be used to implement subsystems such as -+ TCP/IP and remote filing systems over the QsNet interconnect. -+ -+config EIP -+ tristate "Elan IP device driver" -+ default m -+ depends on QSNET && EP && NET -+ ---help--- -+ This is a network IP device driver for the Quadrics QsNet device. -+ It allows the TCP/IP protocol to be run over the Quadrics interconnect. -+ -+config RMS -+ tristate "Resource Management System support" -+ default m -+ depends on QSNET -+ ---help--- -+ This is a support module for the Quadrics RMS resource manager. It provides kernel -+ services for monitoring and controlling user job execution, termination and cleanup. -+ -+config JTAG -+ tristate "Switch monitoring" -+ default m -+ depends on QSNET -+ ---help--- -+ The jtag interface is used to allow processes to send and retrieve jtag -+ information to a Quadrics QsNet Elite switch via the parallel port. -+ The module requires a /dev/jtag[0-3] entry (usually there is only a /dev/jtag0) -+ device and a particular device only allows one process at a time to access this -+ resource. -+ For more information about JTag interface, please refer to the IEEE document on -+ http://www.ieee.org/ -+endmenu -diff -urN clean/drivers/net/qsnet/Makefile linux-2.6.9/drivers/net/qsnet/Makefile ---- clean/drivers/net/qsnet/Makefile 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/Makefile 2005-10-10 17:47:30.000000000 -0400 -@@ -0,0 +1,15 @@ -+# -+# Makefile for Quadrics QsNet -+# -+# Copyright (c) 2002-2005 Quadrics Ltd. -+# -+# File: driver/net/qsnet/Makefile -+# -+ -+obj-$(CONFIG_QSNET) += qsnet/ elan/ -+obj-$(CONFIG_ELAN3) += elan3/ -+obj-$(CONFIG_ELAN4) += elan4/ -+obj-$(CONFIG_EP) += ep/ -+obj-$(CONFIG_EIP) += eip/ -+obj-$(CONFIG_RMS) += rms/ -+obj-$(CONFIG_JTAG) += jtag/ -diff -urN clean/drivers/net/qsnet/qsnet/debug.c linux-2.6.9/drivers/net/qsnet/qsnet/debug.c ---- clean/drivers/net/qsnet/qsnet/debug.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/qsnet/debug.c 2005-03-23 06:04:54.000000000 -0500 -@@ -0,0 +1,583 @@ -+/* -+ * Copyright (c) 2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: debug.c,v 1.22 2005/03/23 11:04:54 david Exp $" -+/* $Source: /cvs/master/quadrics/qsnet/debug.c,v $ */ -+ -+#include -+#include -+#include -+ -+caddr_t qsnet_debug_buffer_ptr = NULL; -+int qsnet_debug_front = 0; -+int qsnet_debug_back = 0; -+int qsnet_debug_lost_lines = 0; -+int qsnet_debug_disabled = 0; -+ -+int qsnet_debug_line_size = 256; -+int qsnet_debug_num_lines = 8192; -+ -+int qsnet_assfail_mode = 1; /* default to BUG() */ -+ -+int qsnet_debug_running = 0; -+int kqsnet_debug_running = 0; -+ -+static spinlock_t qsnet_debug_lock; -+static kcondvar_t qsnet_debug_wait; -+static char qsnet_debug_buffer_space[8192]; -+ -+#define QSNET_DEBUG_PREFIX_MAX_SIZE 32 -+#define QSNET_DEBUG_MAX_WORDWRAP 15 -+ -+/* must be larger than QSNET_DEBUG_PREFIX_MAX_SIZE + QSNET_DEBUG_MAX_WORDWRAP + 2 */ -+#if defined(DIGITAL_UNIX) -+#define QSNET_DEBUG_CONSOLE_WIDTH 80 -+#elif defined(LINUX) -+#define QSNET_DEBUG_CONSOLE_WIDTH 128 -+#endif -+ -+#define isspace(CH) ((CH==' ') | (CH=='\t') | (CH=='\n')) -+ -+#ifdef LINUX -+#define ALLOC_DEBUG_BUFFER(ptr) do { (ptr) = (void *)__get_free_pages (GFP_KERNEL, get_order (qsnet_debug_num_lines * qsnet_debug_line_size)); } while (0) -+#define FREE_DEBUG_BUFFER(ptr) free_pages ((unsigned long) ptr, get_order (qsnet_debug_num_lines * qsnet_debug_line_size)) -+#else -+#define ALLOC_DEBUG_BUFFER(ptr) KMEM_ALLOC (ptr, caddr_t, qsnet_debug_num_lines * qsnet_debug_line_size, 1) -+#define FREE_DEBUG_BUFFER(ptr) KMEM_FREE (ptr, qsnet_debug_num_lines * qsnet_debug_line_size) -+#endif -+ -+void -+qsnet_debug_init () -+{ -+ spin_lock_init (&qsnet_debug_lock); -+ kcondvar_init (&qsnet_debug_wait); -+ -+ qsnet_debug_front = 0; -+ qsnet_debug_back = 0; -+ qsnet_debug_lost_lines = 0; -+ -+ if (qsnet_debug_line_size < (QSNET_DEBUG_PREFIX_MAX_SIZE + QSNET_DEBUG_MAX_WORDWRAP + 2)) -+ qsnet_debug_line_size = 256; -+ -+ qsnet_debug_running = 1; -+ -+ qsnet_proc_register_int (qsnet_procfs_config, "assfail_mode", &qsnet_assfail_mode, 0); -+} -+ -+void -+qsnet_debug_fini() -+{ -+ if (!qsnet_debug_running) return; -+ -+ remove_proc_entry ("assfail_mode", qsnet_procfs_config); -+ -+ spin_lock_destroy (&qsnet_debug_lock); -+ kcondvar_destroy (&qsnet_debug_wait); -+ -+ if (qsnet_debug_buffer_ptr) -+ FREE_DEBUG_BUFFER (qsnet_debug_buffer_ptr); -+ -+ qsnet_debug_buffer_ptr = NULL; -+ qsnet_debug_lost_lines = 0; -+ qsnet_debug_running = 0; -+} -+ -+void -+qsnet_debug_disable(int val) -+{ -+ qsnet_debug_disabled = val; -+} -+ -+void -+qsnet_debug_alloc() -+{ -+ caddr_t ptr; -+ unsigned long flags; -+ -+ if (!qsnet_debug_running) return; -+ -+ if (qsnet_debug_buffer_ptr == NULL) -+ { -+ ALLOC_DEBUG_BUFFER (ptr); -+ -+ if (ptr != NULL) -+ { -+ spin_lock_irqsave (&qsnet_debug_lock, flags); -+ if (qsnet_debug_buffer_ptr == NULL) -+ { -+ qsnet_debug_buffer_ptr = ptr; -+ spin_unlock_irqrestore (&qsnet_debug_lock, flags); -+ } -+ else -+ { -+ spin_unlock_irqrestore (&qsnet_debug_lock, flags); -+ -+ FREE_DEBUG_BUFFER (ptr); -+ } -+ } -+ } -+ -+} -+ -+static void -+qsnet_prefix_debug(unsigned int mode, const char *prefix, char *buffer) -+{ -+ /* assumes caller has lock */ -+ -+ int prefixlen = strlen(prefix); -+ char pref[QSNET_DEBUG_PREFIX_MAX_SIZE]; -+ int prefix_done = 0; -+ -+ if (!qsnet_debug_running) return; -+ -+ if (qsnet_debug_disabled) -+ return; -+ -+ if (prefixlen >= QSNET_DEBUG_PREFIX_MAX_SIZE) -+ { -+ strncpy(pref,prefix,QSNET_DEBUG_PREFIX_MAX_SIZE -2); -+ strcpy (&pref[QSNET_DEBUG_PREFIX_MAX_SIZE-5],"... "); -+ -+ prefix = pref; -+ prefixlen = strlen(prefix); -+ } -+ -+#ifdef CONFIG_MPSAS -+ { -+ char *p; -+#define TRAP_PUTCHAR_B (0x17a - 256) -+#define SAS_PUTCHAR(c) do {\ -+ register int o0 asm ("o0") = (c);\ -+\ -+ asm volatile ("ta %0; nop" \ -+ : /* no outputs */\ -+ : /* inputs */ "i" (TRAP_PUTCHAR_B), "r" (o0)\ -+ : /* clobbered */ "o0");\ -+\ -+ if (o0 == '\n') {\ -+ o0 = '\r';\ -+\ -+ asm volatile ("ta %0; nop" \ -+ : /* no outputs */\ -+ : /* inputs */ "i" (TRAP_PUTCHAR_B), "r" (o0)\ -+ : /* clobbered */ "o0");\ -+ }\ -+ } while(0) -+ -+ for (p = prefix; *p; p++) -+ SAS_PUTCHAR (*p); -+ -+ for (p = buffer; *p; p++) -+ SAS_PUTCHAR (*p); -+ } -+#else -+ if (mode & QSNET_DEBUG_BUFFER) -+ { -+ if (qsnet_debug_buffer_ptr == NULL) -+ qsnet_debug_lost_lines++; -+ else -+ { -+ caddr_t base = &qsnet_debug_buffer_ptr[qsnet_debug_line_size * qsnet_debug_back]; -+ caddr_t lim = base + qsnet_debug_line_size - 2; -+ caddr_t p; -+ -+ p = buffer; -+ prefix_done = 0; -+ while (*p) -+ { -+ /* sort out prefix */ -+ if ( prefix_done++ ) -+ { -+ int i; -+ for(i=0;i 0 ) -+ { -+ int i; -+ for(i=0;i remaining) len = remaining; -+ -+ strncpy(line, p, len); -+ line[len] = 0; -+ p += len; -+ -+ /* word wrap */ -+ if ((len == remaining) && *p && !isspace(*p)) -+ { -+ /* lets see if we can back track and find a white space to break on */ -+ char * ptr = &line[len-1]; -+ int count = 1; -+ -+ while ( ( !isspace(*ptr) ) && ( count < QSNET_DEBUG_MAX_WORDWRAP )) -+ { -+ count++; -+ ptr--; -+ } -+ -+ if ( isspace(*ptr) ) -+ { -+ /* found somewhere to wrap to */ -+ p -= (count-1); /* need to loose the white space */ -+ len -= count; -+ } -+ } -+ -+ if (line[len-1] != '\n' ) -+ { -+ line[len] = '\n'; -+ line[len+1] = 0; -+ } -+ -+ /* we put a \n in so dont need another one next */ -+ if ( *p == '\n') -+ p++; -+ -+#if defined(DIGITAL_UNIX) -+ { -+ char *pr; -+ -+ for (pr = pref; *pr; pr++) -+ cnputc (*pr); -+ -+ for (pr = line; *pr; pr++) -+ cnputc (*pr); -+ } -+#elif defined(LINUX) -+ printk("%s%s",pref,line); -+#endif -+ } -+ } -+#endif /* CONFIG_MPSAS */ -+} -+ -+void -+qsnet_vdebugf (unsigned int mode, const char *prefix, const char *fmt, va_list ap) -+{ -+ unsigned long flags; -+ -+ if (!qsnet_debug_running) return; -+ -+ spin_lock_irqsave (&qsnet_debug_lock, flags); -+ -+ qsnet_debug_buffer_space[0] = '\0'; -+ -+#if defined(DIGITAL_UNIX) -+ prf (qsnet_debug_buffer_space+strlen(qsnet_debug_buffer_space), NULL, fmt, ap); -+#elif defined(LINUX) -+ vsprintf (qsnet_debug_buffer_space+strlen(qsnet_debug_buffer_space), fmt, ap); -+#endif -+ -+ if (prefix == NULL) -+ printk ("qsnet_vdebugf: prefix==NULL\n"); -+ else -+ qsnet_prefix_debug(mode, prefix, qsnet_debug_buffer_space); -+ -+ spin_unlock_irqrestore (&qsnet_debug_lock, flags); -+} -+ -+void kqsnet_debugf(char *fmt,...) -+{ -+ if ( kqsnet_debug_running ) { -+ va_list ap; -+ char string[20]; -+ -+ sprintf (string, "mm=%p:", current->mm); -+ va_start(ap, fmt); -+ qsnet_vdebugf(QSNET_DEBUG_BUFFER, string, fmt, ap); -+ va_end(ap); -+ } -+} -+void -+qsnet_debugf(unsigned int mode, const char *fmt,...) -+{ -+ va_list ap; -+ unsigned long flags; -+ -+ if (!qsnet_debug_running) return; -+ -+ spin_lock_irqsave (&qsnet_debug_lock, flags); -+ -+ qsnet_debug_buffer_space[0] = '\0'; -+ -+ va_start (ap, fmt); -+#if defined(DIGITAL_UNIX) -+ prf (qsnet_debug_buffer_space+strlen(qsnet_debug_buffer_space), NULL, fmt, ap); -+#elif defined(LINUX) -+ vsprintf (qsnet_debug_buffer_space+strlen(qsnet_debug_buffer_space), fmt, ap); -+#endif -+ va_end (ap); -+ -+ qsnet_prefix_debug(mode, "", qsnet_debug_buffer_space); -+ -+ spin_unlock_irqrestore (&qsnet_debug_lock, flags); -+} -+ -+int -+qsnet_debug_buffer (caddr_t ubuffer, int len) -+{ -+ caddr_t buffer, ptr, base; -+ int remain, len1; -+ unsigned long flags; -+ static char qsnet_space[65536]; -+ -+ if (!qsnet_debug_running) return (0); -+ -+ if (len < qsnet_debug_line_size) -+ return (-1); -+ -+ if (len > (qsnet_debug_line_size * qsnet_debug_num_lines)) -+ len = qsnet_debug_line_size * qsnet_debug_num_lines; -+ -+ if ( len > 65536 ) { -+ KMEM_ZALLOC (buffer, caddr_t, len, 1); -+ } else -+ buffer = qsnet_space; -+ -+ if (buffer == NULL) -+ return (-1); -+ -+ if (qsnet_debug_buffer_ptr == NULL) -+ qsnet_debug_alloc(); -+ -+ if (qsnet_debug_buffer_ptr == NULL) -+ { -+ if ( len > 65536 ) -+ KMEM_FREE (buffer, len); -+ return (-1); -+ } -+ -+ spin_lock_irqsave (&qsnet_debug_lock, flags); -+ -+ while (!qsnet_debug_lost_lines && (qsnet_debug_back == qsnet_debug_front)) -+ if (kcondvar_waitsig (&qsnet_debug_wait, &qsnet_debug_lock, &flags) == 0) -+ break; -+ -+ ptr = buffer; -+ remain = len; -+ -+ if (qsnet_debug_lost_lines) -+ { -+ qsnet_debug_lost_lines = 0; -+ strcpy (ptr, "Debug Buffer has overflowed!!\n"); -+ len1 = strlen (ptr); -+ -+ remain -= len1; -+ ptr += len1; -+ } -+ -+ while (qsnet_debug_front != qsnet_debug_back) -+ { -+ /* copy the line from DebugFront */ -+ base = &qsnet_debug_buffer_ptr[qsnet_debug_front*qsnet_debug_line_size]; -+ -+ len1 = strlen (base); -+ -+ if (len1 > remain) -+ break; -+ -+ bcopy (base, ptr, len1); -+ -+ ptr += len1; -+ remain -= len1; -+ -+ qsnet_debug_front = (++qsnet_debug_front == qsnet_debug_num_lines) ? 0 : qsnet_debug_front; -+ } -+ -+ spin_unlock_irqrestore (&qsnet_debug_lock, flags); -+ -+ len1 = ptr - buffer; -+ -+ if (len1 != 0 && copyout (buffer, ubuffer, len1)) -+ len1 = -1; -+ -+ if ( len > 65536 ) -+ KMEM_FREE (buffer, len); -+ -+ return (len1); -+} -+ -+void -+qsnet_debug_buffer_on() -+{ -+ if (qsnet_debug_buffer_ptr == NULL) -+ qsnet_debug_alloc(); -+} -+ -+void -+qsnet_debug_buffer_clear() -+{ -+ unsigned long flags; -+ -+ qsnet_debug_buffer_on(); -+ -+ if (qsnet_debug_buffer_ptr != NULL){ -+ spin_lock_irqsave (&qsnet_debug_lock, flags); -+ qsnet_debug_front = 0; -+ qsnet_debug_back = 0; -+ qsnet_prefix_debug(QSNET_DEBUG_BUFFER,"Clear",""); -+ spin_unlock_irqrestore (&qsnet_debug_lock, flags); -+ } -+} -+ -+void -+qsnet_debug_buffer_mark(char *str) -+{ -+ unsigned long flags; -+ -+ qsnet_debug_buffer_on(); -+ -+ if (qsnet_debug_buffer_ptr != NULL) { -+ spin_lock_irqsave (&qsnet_debug_lock, flags); -+ qsnet_prefix_debug(QSNET_DEBUG_BUFFER,"Mark",str); -+ spin_unlock_irqrestore (&qsnet_debug_lock, flags); -+ } -+} -+int -+qsnet_debug_dump () -+{ -+ unsigned long flags; -+ -+ if (!qsnet_debug_running) return (0); -+ -+ if (qsnet_debug_buffer_ptr == NULL) -+ qsnet_debug_alloc(); -+ -+ if (qsnet_debug_buffer_ptr == NULL) -+ return (-1); -+ -+ spin_lock_irqsave (&qsnet_debug_lock, flags); -+ -+ while (qsnet_debug_front != qsnet_debug_back) -+ { -+ printk ("%s", &qsnet_debug_buffer_ptr[qsnet_debug_front*qsnet_debug_line_size]); -+ -+ qsnet_debug_front = (++qsnet_debug_front == qsnet_debug_num_lines) ? 0 : qsnet_debug_front; -+ } -+ -+ if (qsnet_debug_lost_lines) -+ printk ("\n**** Debug buffer has lost %d lines\n****\n",qsnet_debug_lost_lines); -+ -+ spin_unlock_irqrestore (&qsnet_debug_lock, flags); -+ -+ return (0); -+} -+ -+int -+qsnet_debug_kmem (void *handle) -+{ -+ if (!qsnet_debug_running) return (0); -+ -+#ifdef KMEM_DEBUG -+ qsnet_kmem_display(handle); -+#endif -+ return (0); -+} -+ -+int -+qsnet_assfail (char *ex, const char *func, char *file, int line) -+{ -+ qsnet_debugf (QSNET_DEBUG_BUFFER, "qsnet: assertion failure: %s, function: %s, file %s, line: %d\n", ex, func, file, line); -+ -+ printk (KERN_EMERG "qsnet: assertion failure: %s, function: %s, file %s, line: %d\n", ex, func, file, line); -+ -+ if (panicstr) -+ return (0); -+ -+ if (qsnet_assfail_mode & 1) /* return to BUG() */ -+ return 1; -+ -+ if (qsnet_assfail_mode & 2) -+ panic ("qsnet: assertion failure: %s, function: %s, file %s, line: %d\n", ex, func, file, line); -+ if (qsnet_assfail_mode & 4) -+ qsnet_debug_disable (1); -+ -+ return 0; -+ -+} -+ -+ -+/* -+ * Local variables: -+ * c-file-style: "linux" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/qsnet/i686_mmx.c linux-2.6.9/drivers/net/qsnet/qsnet/i686_mmx.c ---- clean/drivers/net/qsnet/qsnet/i686_mmx.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/qsnet/i686_mmx.c 2004-01-05 07:08:25.000000000 -0500 -@@ -0,0 +1,99 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: i686_mmx.c,v 1.11 2004/01/05 12:08:25 mike Exp $" -+/* $Source: /cvs/master/quadrics/qsnet/i686_mmx.c,v $*/ -+ -+#include -+ -+#if defined(LINUX_I386) -+ -+#include -+#include -+#include -+#include -+ -+int mmx_disabled = 0; -+ -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0) -+/* These functions are lifted from arch/i386/kernel/i387.c -+ * and MUST be kept in step with the kernel (currently 2.4.17) -+ * alternatively we should export the kernel_fpu_begin() function -+ */ -+static inline void __save_init_fpu( struct task_struct *tsk ) -+{ -+ if ( cpu_has_fxsr ) { -+ asm volatile( "fxsave %0 ; fnclex" -+ : "=m" (tsk->thread.i387.fxsave) ); -+ } else { -+ asm volatile( "fnsave %0 ; fwait" -+ : "=m" (tsk->thread.i387.fsave) ); -+ } -+ tsk->flags &= ~PF_USEDFPU; -+} -+#if defined(MODULE) -+void kernel_fpu_begin(void) -+{ -+ struct task_struct *tsk = current; -+ -+ if (tsk->flags & PF_USEDFPU) { -+ __save_init_fpu(tsk); -+ return; -+ } -+ clts(); -+} -+#endif -+#endif -+ -+extern inline int -+mmx_preamble(void) -+{ -+ if (mmx_disabled || in_interrupt()) -+ return (0); -+ -+ kernel_fpu_begin(); -+ -+ return (1); -+} -+ -+extern inline void -+mmx_postamble(void) -+{ -+ kernel_fpu_end(); -+} -+ -+extern u64 -+qsnet_readq (volatile u64 *ptr) -+{ -+ u64 value; -+ -+ if (! mmx_preamble()) -+ value = *ptr; -+ else -+ { -+ asm volatile ("movq (%0), %%mm0\n" -+ "movq %%mm0, (%1)\n" -+ : : "r" (ptr), "r" (&value) : "memory"); -+ mmx_postamble(); -+ } -+ return (value); -+} -+ -+void -+qsnet_writeq(u64 value, volatile u64 *ptr) -+{ -+ if (! mmx_preamble()) -+ *ptr = value; -+ else -+ { -+ asm volatile ("movq (%0), %%mm0\n" -+ "movq %%mm0, (%1)\n" -+ : : "r" (&value), "r" (ptr) : "memory"); -+ mmx_postamble(); -+ } -+} -+#endif -diff -urN clean/drivers/net/qsnet/qsnet/kernel_linux.c linux-2.6.9/drivers/net/qsnet/qsnet/kernel_linux.c ---- clean/drivers/net/qsnet/qsnet/kernel_linux.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/qsnet/kernel_linux.c 2005-09-07 10:35:03.000000000 -0400 -@@ -0,0 +1,902 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: kernel_linux.c,v 1.74.2.5 2005/09/07 14:35:03 mike Exp $" -+/* $Source: /cvs/master/quadrics/qsnet/kernel_linux.c,v $*/ -+ -+#include -+#include -+#include -+ -+#include -+#include -+#include -+#include -+ -+#include -+ -+#include /* for smp_call_function() prototype */ -+#include -+#include -+ -+#include -+ -+extern int mmx_disabled; -+extern int qsnet_debug_line_size; -+extern int qsnet_debug_num_lines; -+ -+gid_t qsnet_procfs_gid; -+struct proc_dir_entry *qsnet_procfs_root; -+struct proc_dir_entry *qsnet_procfs_config; -+ -+MODULE_AUTHOR("Quadrics Ltd."); -+MODULE_DESCRIPTION("QsNet Kernel support code"); -+ -+MODULE_LICENSE("GPL"); -+ -+#if defined(LINUX_I386) -+module_param(mmx_disabled, uint, 0); -+#endif -+ -+module_param(qsnet_debug_line_size, uint, 0); -+module_param(qsnet_debug_num_lines, uint, 0); -+ -+module_param(qsnet_procfs_gid, uint, 0); -+ -+#ifdef KMEM_DEBUG -+EXPORT_SYMBOL(qsnet_kmem_alloc_debug); -+EXPORT_SYMBOL(qsnet_kmem_free_debug); -+#else -+EXPORT_SYMBOL(qsnet_kmem_alloc); -+EXPORT_SYMBOL(qsnet_kmem_free); -+#endif -+ -+EXPORT_SYMBOL(qsnet_kmem_display); -+EXPORT_SYMBOL(kmem_to_phys); -+ -+EXPORT_SYMBOL(cpu_hold_all); -+EXPORT_SYMBOL(cpu_release_all); -+ -+#if defined(LINUX_I386) -+EXPORT_SYMBOL(qsnet_readq); -+EXPORT_SYMBOL(qsnet_writeq); -+#endif -+ -+/* debug.c */ -+EXPORT_SYMBOL(qsnet_debugf); -+EXPORT_SYMBOL(kqsnet_debugf); -+EXPORT_SYMBOL(qsnet_vdebugf); -+EXPORT_SYMBOL(qsnet_debug_buffer); -+EXPORT_SYMBOL(qsnet_debug_alloc); -+EXPORT_SYMBOL(qsnet_debug_dump); -+EXPORT_SYMBOL(qsnet_debug_kmem); -+EXPORT_SYMBOL(qsnet_debug_disable); -+ -+EXPORT_SYMBOL(qsnet_assfail); -+ -+EXPORT_SYMBOL(qsnet_procfs_gid); -+EXPORT_SYMBOL(qsnet_procfs_root); -+ -+static int qsnet_open (struct inode *ino, struct file *fp); -+static int qsnet_release (struct inode *ino, struct file *fp); -+static int qsnet_ioctl (struct inode *ino, struct file *fp, unsigned int cmd, unsigned long arg); -+ -+static struct file_operations qsnet_ioctl_fops = -+{ -+ ioctl: qsnet_ioctl, -+ open: qsnet_open, -+ release: qsnet_release, -+}; -+ -+static int -+qsnet_open (struct inode *inode, struct file *fp) -+{ -+ MOD_INC_USE_COUNT; -+ fp->private_data = NULL; -+ return (0); -+} -+ -+static int -+qsnet_release (struct inode *inode, struct file *fp) -+{ -+ MOD_DEC_USE_COUNT; -+ return (0); -+} -+ -+static int -+qsnet_ioctl(struct inode *inode, struct file *fp, unsigned int cmd, unsigned long arg) -+{ -+ int res=0; -+ -+ switch (cmd) -+ { -+ case QSNETIO_DEBUG_KMEM: -+ { -+ QSNETIO_DEBUG_KMEM_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (QSNETIO_DEBUG_KMEM_STRUCT))) -+ return (-EFAULT); -+ -+ /* doesnt use handle as a pointer */ -+ qsnet_kmem_display(args.handle); -+ break; -+ } -+ -+ case QSNETIO_DEBUG_DUMP : -+ { -+ res = qsnet_debug_dump(); -+ break; -+ } -+ -+ case QSNETIO_DEBUG_BUFFER : -+ { -+ QSNETIO_DEBUG_BUFFER_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (QSNETIO_DEBUG_BUFFER_STRUCT))) -+ return (-EFAULT); -+ -+ /* qsnet_debug_buffer uses copyout */ -+ if ((res = qsnet_debug_buffer (args.addr, args.len)) != -1) -+ { -+ args.len = res; -+ if (copy_to_user ((void *) arg, &args, sizeof (QSNETIO_DEBUG_BUFFER_STRUCT))) -+ return (-EFAULT); -+ res = 0; -+ } -+ break; -+ } -+ default: -+ res = EINVAL; -+ break; -+ } -+ -+ return ((res == 0) ? 0 : -res); -+} -+ -+#ifdef KMEM_DEBUG -+static int qsnet_kmem_open (struct inode *ino, struct file *fp); -+static int qsnet_kmem_release (struct inode *ino, struct file *fp); -+static ssize_t qsnet_kmem_read (struct file *file, char *buf, size_t count, loff_t *ppos); -+ -+static struct file_operations qsnet_kmem_fops = -+{ -+ open: qsnet_kmem_open, -+ release: qsnet_kmem_release, -+ read: qsnet_kmem_read, -+}; -+ -+typedef struct qsnet_private_space -+{ -+ char * space; -+ int size; -+ struct qsnet_private_space *next; -+} QSNET_PRIVATE_SPACE; -+ -+typedef struct qsnet_private -+{ -+ QSNET_PRIVATE_SPACE *space_chain; -+ QSNET_PRIVATE_SPACE *current_space; -+ int current_pos; -+ -+} QSNET_PRIVATE; -+ -+#define QSNET_KMEM_DEBUG_LINE_SIZE ((int)512) -+#define QSNET_PRIVATE_PAGE_SIZE ((int)(4*1024)) -+ -+static int qsnet_kmem_fill(QSNET_PRIVATE *pd); -+ -+void -+destroy_chain(QSNET_PRIVATE * pd) -+{ -+ QSNET_PRIVATE_SPACE *mem, *next; -+ -+ if (pd == NULL) return; -+ -+ for(mem = pd->space_chain ; mem != NULL; ) -+ { -+ next = mem->next; -+ if ( mem->space ) -+ kfree ( mem->space); -+ kfree(mem); -+ mem = next; -+ } -+ kfree (pd); -+} -+ -+QSNET_PRIVATE * -+make_chain(int len) -+{ -+ QSNET_PRIVATE * pd; -+ QSNET_PRIVATE_SPACE * mem; -+ int i; -+ -+ /* make the private data block */ -+ if ((pd = kmalloc (sizeof (QSNET_PRIVATE), GFP_KERNEL)) == NULL) -+ return NULL; -+ pd->space_chain = NULL; -+ -+ /* first make the holders */ -+ for(i=0;inext = pd->space_chain; -+ mem->size = 0; -+ mem->space = 0; -+ pd->space_chain = mem; -+ -+ /* now add the space */ -+ if ((mem->space = kmalloc (QSNET_PRIVATE_PAGE_SIZE, GFP_KERNEL)) == NULL) -+ { -+ destroy_chain(pd); -+ return (NULL); -+ } -+ -+ mem->space[0] = 0; -+ -+ } -+ -+ pd->current_space = pd->space_chain; -+ pd->current_pos = 0; -+ -+ return pd; -+} -+ -+static int -+qsnet_kmem_open (struct inode *inode, struct file *fp) -+{ -+ MOD_INC_USE_COUNT; -+ fp->private_data = NULL; -+ return (0); -+} -+ -+static int -+qsnet_kmem_release (struct inode *inode, struct file *fp) -+{ -+ if ( fp->private_data ) -+ { -+ QSNET_PRIVATE * pd = (QSNET_PRIVATE *) fp->private_data; -+ -+ /* free the space */ -+ if (pd->space_chain) -+ kfree (pd->space_chain); -+ -+ /* free struct */ -+ kfree (pd); -+ } -+ MOD_DEC_USE_COUNT; -+ return (0); -+} -+ -+static ssize_t -+qsnet_kmem_read (struct file *file, char *buf, size_t count, loff_t *ppos) -+{ -+ QSNET_PRIVATE * pd = (QSNET_PRIVATE *) file->private_data; -+ int error; -+ int output_count; -+ int num_of_links=10; -+ -+ /* make a buffer to output count bytes in */ -+ if ((error = verify_area (VERIFY_WRITE, buf, count)) != 0) -+ return (error); -+ -+ if ( pd == NULL) -+ { -+ /* first time */ -+ -+ /* ok we have to guess at how much space we are going to need */ -+ /* if it fails we up the space and carry try again */ -+ /* we have to do it this way as we cant get more memory whilst */ -+ /* holding the lock */ -+ if ((pd = make_chain(num_of_links)) == NULL) -+ return (-ENOMEM); -+ -+ while ( qsnet_kmem_fill(pd) ) -+ { -+ destroy_chain(pd); -+ num_of_links += 10; -+ if ((pd = make_chain(num_of_links)) == NULL) -+ return (-ENOMEM); -+ } -+ -+ /* we have the space and filled it */ -+ file->private_data = (void *)pd; -+ } -+ -+ /* output buffer */ -+ if ( pd->current_pos >= pd->current_space->size ) -+ return (0); /* finished */ -+ -+ output_count = pd->current_space->size - pd->current_pos; -+ if ( output_count > count ) -+ output_count = count; -+ -+ copy_to_user(buf, (pd->current_space->space + pd->current_pos), output_count); -+ -+ pd->current_pos += output_count; -+ ppos += output_count; -+ -+ /* just check to see if we have finished the current space */ -+ if ( pd->current_pos >= pd->current_space->size ) -+ { -+ if ( pd->current_space->next ) -+ { -+ pd->current_space = pd->current_space->next; -+ pd->current_pos = 0; -+ } -+ } -+ -+ return (output_count); -+} -+#endif /* KMEM_DEBUG */ -+ -+static int -+proc_write_qsnetdebug(struct file *file, const char *buffer, -+ unsigned long count, void *data) -+{ -+ char tmpbuf[128]; -+ int res; -+ -+ if (count > sizeof (tmpbuf)-1) -+ return (-EINVAL); -+ -+ MOD_INC_USE_COUNT; -+ -+ if (copy_from_user (tmpbuf, buffer, count)) -+ res = -EFAULT; -+ else -+ { -+ tmpbuf[count] = '\0'; -+ -+ if (tmpbuf[count-1] == '\n') -+ tmpbuf[count-1] = '\0'; -+ -+ if (! strcmp (tmpbuf, "on")) -+ qsnet_debug_buffer_on(); -+ -+ if (! strcmp (tmpbuf, "clear")) -+ qsnet_debug_buffer_clear(); -+ -+ if (! strncmp (tmpbuf, "mark",4)) -+ qsnet_debug_buffer_mark( &tmpbuf[4] ); -+ -+ res = count; -+ } -+ -+ MOD_DEC_USE_COUNT; -+ -+ return (res); -+} -+ -+static int -+proc_read_qsnetdebug(char *page, char **start, off_t off, -+ int count, int *eof, void *data) -+{ -+ int len = sprintf (page, "echo command > /proc/qsnet/config/qsnetdebug\ncommand = on | off | clear | mark text\n"); -+ return (qsnet_proc_calc_metrics (page, start, off, count, eof, len)); -+} -+ -+#include "quadrics_version.h" -+extern int kqsnet_debug_running; -+static char quadrics_version[] = QUADRICS_VERSION; -+ -+static int __init qsnet_init(void) -+{ -+ struct proc_dir_entry *p; -+ -+ printk ("qsnet Module (version %s)\n", quadrics_version); -+ if ((qsnet_procfs_root = proc_mkdir ("qsnet", 0)) == NULL) -+ { -+ printk ("qsnet: failed to create /proc/qsnet \n"); -+ return (-ENXIO); -+ } -+ -+ if ((p = create_proc_entry ("ioctl", S_IRUGO|S_IWUSR|S_IWGRP, qsnet_procfs_root)) == NULL) -+ { -+ printk ("qsnet: failed to register /proc/qsnet/ioctl\n"); -+ return (-ENXIO); -+ } -+ p->proc_fops = &qsnet_ioctl_fops; -+ p->owner = THIS_MODULE; -+ p->data = NULL; -+ p->gid = qsnet_procfs_gid; -+ -+ qsnet_proc_register_str (qsnet_procfs_root, "version", quadrics_version, S_IRUGO); -+ -+ if ((qsnet_procfs_config = proc_mkdir ("config", qsnet_procfs_root)) == NULL) -+ { -+ printk ("qsnet: failed to create /proc/qsnet/config \n"); -+ return (-ENXIO); -+ } -+ -+#ifdef KMEM_DEBUG -+ if ((p = create_proc_entry ("kmem_debug", S_IRUGO|S_IWUSR|S_IWGRP, qsnet_procfs_config)) == NULL) -+ { -+ printk ("qsnet: failed to register /proc/qsnet/config/kmem_debug\n"); -+ return (-ENXIO); -+ } -+ p->proc_fops = &qsnet_kmem_fops; -+ p->owner = THIS_MODULE; -+ p->data = NULL; -+ p->gid = qsnet_procfs_gid; -+#endif -+ -+ qsnet_debug_init(); -+ -+ qsnet_proc_register_int (qsnet_procfs_config, "kqsnet_debug_running", &kqsnet_debug_running, 0); -+ -+ if ((p = create_proc_entry ("qsnetdebug", S_IRUGO|S_IWUSR|S_IWGRP, qsnet_procfs_config)) == NULL) -+ { -+ printk ("qsnet: failed to register /proc/qsnet/config/qsnetdebug\n"); -+ return (-ENXIO); -+ } -+ p->read_proc = proc_read_qsnetdebug; -+ p->write_proc = proc_write_qsnetdebug; -+ p->owner = THIS_MODULE; -+ p->data = NULL; -+ p->gid = qsnet_procfs_gid; -+ -+ return (0); -+} -+ -+static void __exit qsnet_exit(void) -+{ -+#ifdef KMEM_DEBUG -+ qsnet_kmem_display(0); -+#endif -+ qsnet_debug_fini(); -+ -+ remove_proc_entry ("qsnetdebug", qsnet_procfs_config); -+ remove_proc_entry ("kqsnet_debug_running", qsnet_procfs_config); -+#ifdef KMEM_DEBUG -+ remove_proc_entry ("kmem_debug", qsnet_procfs_config); -+#endif -+ remove_proc_entry ("config", qsnet_procfs_root); -+ -+ remove_proc_entry ("version", qsnet_procfs_root); -+ remove_proc_entry ("ioctl", qsnet_procfs_root); -+ -+ remove_proc_entry ("qsnet", 0); -+} -+ -+/* Declare the module init and exit functions */ -+module_init(qsnet_init); -+module_exit(qsnet_exit); -+ -+#ifdef KMEM_DEBUG -+/* -+ * Kernel memory allocation. We maintain our own list of allocated mem -+ * segments so we can free them on module cleanup. -+ * -+ * We use kmalloc for allocations less than one page in size; vmalloc for -+ * larger sizes. -+ */ -+ -+typedef struct { -+ struct list_head list; -+ void *ptr; -+ int len; -+ int used_vmalloc; -+ void *owner; -+ void *caller; -+ unsigned int time; -+ int line; -+ char filename[20]; -+} kmalloc_t; -+ -+static LIST_HEAD(kmalloc_head); -+ -+static spinlock_t kmalloc_lock = SPIN_LOCK_UNLOCKED; -+ -+/* -+ * Kernel memory allocation. We use kmalloc for allocations less -+ * than one page in size; vmalloc for larger sizes. -+ */ -+ -+static int -+qsnet_kmem_fill(QSNET_PRIVATE *pd) -+{ -+ kmalloc_t *kp; -+ struct list_head *lp; -+ unsigned long flags; -+ char str[QSNET_KMEM_DEBUG_LINE_SIZE]; -+ QSNET_PRIVATE_SPACE * current_space; -+ int current_pos; -+ int len; -+ current_space = pd->space_chain; -+ current_pos = 0; -+ -+ -+ current_space->space[0] = 0; -+ spin_lock_irqsave(&kmalloc_lock, flags); -+ for (lp = kmalloc_head.next; lp != &kmalloc_head; lp = lp->next) { -+ kp = list_entry(lp, kmalloc_t, list); -+ -+ /* make the next line */ -+ sprintf(str,"%p %d %d %p %p %u %d %s\n", -+ kp->ptr, kp->len, kp->used_vmalloc, kp->caller, kp->owner, kp->time, kp->line, kp->filename); -+ len = strlen(str); -+ -+ /* does it fit on the current page */ -+ if ( (current_pos + len + 1) >= QSNET_PRIVATE_PAGE_SIZE) -+ { -+ /* move onto next page */ -+ if ((current_space = current_space->next) == NULL) -+ { -+ /* run out of space !!!! */ -+ spin_unlock_irqrestore(&kmalloc_lock, flags); -+ return (1); -+ } -+ current_space->space[0] = 0; -+ current_pos = 0; -+ } -+ strcat( current_space->space + current_pos, str); -+ current_pos += len; -+ -+ /* remember how much we wrote to this page */ -+ current_space->size = current_pos; -+ -+ } -+ spin_unlock_irqrestore(&kmalloc_lock, flags); -+ -+ return (0); -+} -+ -+void * -+qsnet_kmem_alloc_debug(int len, int cansleep, int zerofill, char *file, int line) -+{ -+ void *new; -+ unsigned long flags; -+ kmalloc_t *kp; -+ -+ if (len < PAGE_SIZE || !cansleep) -+ new = kmalloc(len, cansleep ? GFP_KERNEL : GFP_ATOMIC); -+ else -+ new = vmalloc(len); -+ -+ if (len >= PAGE_SIZE) -+ ASSERT(PAGE_ALIGNED((uintptr_t) new)); -+ -+ if (new && zerofill) -+ memset(new,0,len); -+ -+ /* record allocation */ -+ kp = kmalloc(sizeof(kmalloc_t), cansleep ? GFP_KERNEL : GFP_ATOMIC); -+ ASSERT(kp != NULL); -+ kp->len = len; -+ kp->ptr = new; -+ kp->used_vmalloc = (len >= PAGE_SIZE || cansleep); -+ kp->owner = current; -+ kp->caller = __builtin_return_address(0); -+ kp->time = lbolt; -+ kp->line = line; -+ len = strlen(file); -+ -+ if (len > 18) -+ strcpy(kp->filename,&file[len-18]); -+ else -+ strcpy(kp->filename,file); -+ -+ spin_lock_irqsave(&kmalloc_lock, flags); -+ list_add(&kp->list, &kmalloc_head); -+ spin_unlock_irqrestore(&kmalloc_lock, flags); -+ -+ return new; -+} -+ -+void -+qsnet_kmem_free_debug(void *ptr, int len, char *file, int line) -+{ -+ unsigned long flags; -+ kmalloc_t *kp; -+ struct list_head *lp; -+ -+ spin_lock_irqsave(&kmalloc_lock, flags); -+ for (lp = kmalloc_head.next; lp != &kmalloc_head; lp = lp->next) { -+ kp = list_entry(lp, kmalloc_t, list); -+ if (kp->ptr == ptr) { -+ if (kp->len != len) -+ printk("qsnet_kmem_free_debug(%p) ptr %p len %d mismatch: expected %d caller %p owner %p (%s:%d)\n", -+ current, ptr, len, kp->len, __builtin_return_address(0), kp->caller, file, line); -+ list_del(lp); -+ kfree(kp); /* free off descriptor */ -+ break; -+ } -+ } -+ spin_unlock_irqrestore(&kmalloc_lock, flags); -+ -+ if (lp == &kmalloc_head) /* segment must be found */ -+ { -+ printk( "qsnet_kmem_free_debug(%p) ptr %p len %d not found: caller %p (%s:%d)\n", -+ current, ptr, len, __builtin_return_address(0), file, line); -+ } -+ -+ if ((((unsigned long) ptr) >= VMALLOC_START && ((unsigned long) ptr) < VMALLOC_END)) -+ vfree (ptr); -+ else -+ kfree (ptr); -+} -+ -+#else /* !KMEM_DEBUG */ -+ -+void * -+qsnet_kmem_alloc(int len, int cansleep, int zerofill) -+{ -+ void *new; -+ -+ if (len < PAGE_SIZE || !cansleep) -+ new = kmalloc(len, cansleep ? GFP_KERNEL : GFP_ATOMIC); -+ else -+ new = vmalloc(len); -+ -+ if (len >= PAGE_SIZE) -+ ASSERT(PAGE_ALIGNED((unsigned long) new)); -+ -+ if (new && zerofill) -+ memset(new,0,len); -+ -+ return new; -+} -+ -+void -+qsnet_kmem_free(void *ptr, int len) -+{ -+ if ((((unsigned long) ptr) >= VMALLOC_START && ((unsigned long) ptr) < VMALLOC_END)) -+ vfree (ptr); -+ else -+ kfree (ptr); -+} -+#endif /* !KMEM_DEBUG */ -+ -+void -+qsnet_kmem_display(void *handle) -+{ -+#ifdef KMEM_DEBUG -+ kmalloc_t *kp; -+ struct list_head *lp; -+ unsigned long flags; -+ int count = 0, totsize = 0; -+ -+ spin_lock_irqsave(&kmalloc_lock, flags); -+ for (lp = kmalloc_head.next; lp != &kmalloc_head; lp = lp->next) { -+ kp = list_entry(lp, kmalloc_t, list); -+ -+ if (!handle || handle == kp->owner) -+ { -+ printk("qsnet_kmem_display(%p): mem %p len %d unfreed caller %p (%p) \n", -+ handle, kp->ptr, kp->len, kp->caller, kp->owner); -+ -+ count++; -+ totsize += kp->len; -+ } -+ } -+ spin_unlock_irqrestore(&kmalloc_lock, flags); -+ -+ printk("qsnet_kmem_display(%p): %d bytes left in %d objects\n", handle, totsize, count); -+#endif -+} -+ -+physaddr_t -+kmem_to_phys(void *ptr) -+{ -+ virtaddr_t virt = (virtaddr_t) ptr; -+ physaddr_t phys; -+ pte_t *pte; -+ -+ if ((virt >= VMALLOC_START && virt < VMALLOC_END)) -+ { -+ pte = find_pte_kernel(virt); -+ ASSERT(pte && !pte_none(*pte)); -+ phys = pte_phys(*pte) + (virt & (PAGE_SIZE-1)); -+ } -+#if defined(PKMAP_BASE) -+ else if (virt >= PKMAP_BASE && virt < (PKMAP_BASE + LAST_PKMAP * PAGE_SIZE)) -+ { -+ pte = find_pte_kernel(virt); -+ ASSERT(pte && !pte_none(*pte)); -+ phys = pte_phys(*pte) + (virt & (PAGE_SIZE-1)); -+ } -+#endif -+#if defined(__ia64) -+ else if (virt >= __IA64_UNCACHED_OFFSET && virt < PAGE_OFFSET) -+ { -+ /* ia64 non-cached KSEG */ -+ phys = ((physaddr_t) ptr - __IA64_UNCACHED_OFFSET); -+ } -+#endif -+ else /* otherwise it's KSEG */ -+ { -+ phys = __pa(virt); -+ } -+ -+#if defined(CONFIG_ALPHA_GENERIC) || (defined(CONFIG_ALPHA_EV6) && !defined(USE_48_BIT_KSEG)) -+ /* -+ * with TS_BIAS as bit 40 - the tsunami pci space is mapped into -+ * the kernel at 0xfffff500.00000000 however we need to convert -+ * this to the true physical address 0x00000800.00000000. -+ * -+ * there is no need for PHYS_TWIDDLE since we knew we'd get a kernel -+ * virtual address already and handled this with __pa(). -+ */ -+ if (phys & (1ul << 40)) { -+ phys &= ~(1ul << 40); /* clear bit 40 (kseg I/O select) */ -+ phys |= (1ul << 43); /* set bit 43 (phys I/O select) */ -+ } -+#endif -+ return phys; -+} -+ -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) -+ -+EXPORT_SYMBOL(pci_resource_size); -+EXPORT_SYMBOL(pci_get_base_address); -+EXPORT_SYMBOL(pci_base_to_kseg); -+ -+ -+/* -+ * PCI stuff. -+ * -+ * XXX pci_base_to_kseg() and pci_kseg_to_phys() are problematic -+ * in that they may not work on non-Tsunami (DS20, ES40, etc) -+ * architectures, and may not work in non-zero PCI bus numbers. -+ */ -+ -+unsigned long -+pci_get_base_address(struct pci_dev *pdev, int index) -+{ -+ unsigned long base; -+ -+ ASSERT(index >= 0 && index <= 5); -+ /* borrowed in part from drivers/scsi/sym53c8xx.c */ -+ base = pdev->base_address[index++]; -+ -+#if BITS_PER_LONG > 32 -+ if ((base & 0x7) == 0x4) -+ base |= (((unsigned long)pdev->base_address[index]) << 32); -+#endif -+ return base; -+} -+ -+unsigned long -+pci_resource_size(struct pci_dev *pdev, int index) -+{ -+ u32 addr, mask, size; -+ -+ static u32 bar_addr[] = { -+ PCI_BASE_ADDRESS_0, -+ PCI_BASE_ADDRESS_1, -+ PCI_BASE_ADDRESS_2, -+ PCI_BASE_ADDRESS_3, -+ PCI_BASE_ADDRESS_4, -+ PCI_BASE_ADDRESS_5, -+ }; -+ ASSERT(index >= 0 && index <= 5); -+ -+ /* algorithm from Rubini book */ -+ pci_read_config_dword (pdev, bar_addr[index], &addr); -+ pci_write_config_dword(pdev, bar_addr[index], ~0); -+ pci_read_config_dword (pdev, bar_addr[index], &mask); -+ pci_write_config_dword(pdev, bar_addr[index], addr); -+ -+ mask &= PCI_BASE_ADDRESS_MEM_MASK; -+ size = ~mask + 1; -+ return size; -+} -+ -+/* -+ * Convert BAR register value to KSEG address. -+ */ -+void * -+pci_base_to_kseg(u64 baddr, int bus) -+{ -+ u64 kseg; -+ -+ /* XXX tsunami specific */ -+ baddr &= ~(u64)0x100000000; /* mask out hose bit */ -+ kseg = TSUNAMI_MEM(bus) + baddr; -+ return (void *)kseg; -+} -+ -+#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(2,4,0) */ -+ -+/* -+ * Spin the other CPU's in an SMP system. -+ * smp_call_function() needed to be exported to modules. It will be -+ * papered over in if running on a non-SMP box. -+ */ -+static spinlock_t hold_lock = SPIN_LOCK_UNLOCKED; -+ -+#if 0 -+static void cpu_hold(void *unused) -+{ -+ spin_lock(&hold_lock); -+ spin_unlock(&hold_lock); -+} -+#endif -+ -+void cpu_hold_all(void) -+{ -+ spin_lock(&hold_lock); -+ -+#if 0 -+ { -+ int res; -+ int retries = 10; -+ -+ /* XXXXX: cannot call smp_call_function() from interrupt context */ -+ -+ do { -+ /* only request blocking retry if not in interrupt context */ -+ res = smp_call_function(cpu_hold, NULL, !in_interrupt(), 0); -+ if (res) -+ mdelay(5); -+ } while (res && retries--); -+ -+ if (res) -+ printk("cpu_hold_all: IPI timeout\n"); -+ } -+#endif -+} -+ -+void cpu_release_all(void) -+{ -+ spin_unlock(&hold_lock); -+} -+ -+void -+qsnet_proc_character_fill (long mode, char *fmt, ...) -+{ -+ int len; -+ va_list ap; -+ QSNET_PROC_PRIVATE *private = (QSNET_PROC_PRIVATE *)mode; -+ -+ /* is the buffer already full */ -+ if (private->pr_len >= private->pr_data_len) -+ return; -+ -+ /* attempt to fill up to the remaining space */ -+ va_start (ap, fmt); -+ len = vsnprintf ( & private->pr_data[private->pr_len], (private->pr_data_len - private->pr_len), fmt, ap); -+ va_end (ap); -+ -+ if (len < 0 ) -+ { -+ /* we have reached the end of buffer and need to fail all future writes -+ * the caller can check (pr_len >= pr_data_len) and recall with more space -+ */ -+ private->pr_len = private->pr_data_len; -+ return; -+ } -+ -+ /* move the length along */ -+ private->pr_len += len; -+} -+ -+int -+qsnet_proc_release (struct inode *inode, struct file *file) -+{ -+ QSNET_PROC_PRIVATE *pr = (QSNET_PROC_PRIVATE *) file->private_data; -+ -+ if (pr->pr_data) -+ KMEM_FREE (pr->pr_data, pr->pr_data_len); -+ kfree (pr); -+ -+ MOD_DEC_USE_COUNT; -+ return (0); -+} -+ -+EXPORT_SYMBOL(qsnet_proc_character_fill); -+EXPORT_SYMBOL(qsnet_proc_release); -+ -+/* -+ * Local variables: -+ * c-file-style: "linux" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/qsnet/Makefile linux-2.6.9/drivers/net/qsnet/qsnet/Makefile ---- clean/drivers/net/qsnet/qsnet/Makefile 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/qsnet/Makefile 2005-10-10 17:47:31.000000000 -0400 -@@ -0,0 +1,15 @@ -+# -+# Makefile for Quadrics QsNet -+# -+# Copyright (c) 2002-2004 Quadrics Ltd -+# -+# File: drivers/net/qsnet/qsnet/Makefile -+# -+ -+ -+# -+ -+obj-$(CONFIG_QSNET) += qsnet.o -+qsnet-objs := debug.o kernel_linux.o i686_mmx.o -+ -+EXTRA_CFLAGS += -DDEBUG -DDEBUG_PRINTF -DDEBUG_ASSERT -diff -urN clean/drivers/net/qsnet/qsnet/Makefile.conf linux-2.6.9/drivers/net/qsnet/qsnet/Makefile.conf ---- clean/drivers/net/qsnet/qsnet/Makefile.conf 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/qsnet/Makefile.conf 2005-09-07 10:39:34.000000000 -0400 -@@ -0,0 +1,10 @@ -+# Flags for generating QsNet Linux Kernel Makefiles -+MODNAME = qsnet.o -+MODULENAME = qsnet -+KOBJFILES = debug.o kernel_linux.o i686_mmx.o -+EXPORT_KOBJS = kernel_linux.o -+CONFIG_NAME = CONFIG_QSNET -+SGALFC = -+# EXTRALINES START -+ -+# EXTRALINES END -diff -urN clean/drivers/net/qsnet/qsnet/qsnetkmem_linux.c linux-2.6.9/drivers/net/qsnet/qsnet/qsnetkmem_linux.c ---- clean/drivers/net/qsnet/qsnet/qsnetkmem_linux.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/qsnet/qsnetkmem_linux.c 2003-08-13 06:03:27.000000000 -0400 -@@ -0,0 +1,325 @@ -+/* -+ * Copyright (c) 2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: qsnetkmem_linux.c,v 1.3 2003/08/13 10:03:27 fabien Exp $" -+/* $Source: /cvs/master/quadrics/qsnet/qsnetkmem_linux.c,v $*/ -+ -+/* macro macros */ -+#define MACRO_BEGIN do { -+#define MACRO_END } while (0) -+#define offsetof(T,F) ((int )&(((T *)0)->F)) -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define LIST_HEAD_INIT(name) { &(name), &(name) } -+ -+#define LIST_HEAD(name) \ -+ struct list_head name = LIST_HEAD_INIT(name) -+ -+typedef struct { -+ struct list_head list; -+ void *ptr; -+ int len; -+ int used_vmalloc; -+ void *owner; -+ void *caller; -+ unsigned int time; -+ int mark; -+ int line; -+ char file[256]; -+ -+} kmalloc_t; -+ -+ -+static LIST_HEAD(current_kmem); -+static LIST_HEAD(stored_kmem); -+ -+void -+count_kmem(struct list_head * list, long * count, long * size ) -+{ -+ long c,s; -+ struct list_head *tmp; -+ kmalloc_t *kmem_ptr = NULL; -+ -+ -+ c = s = 0L; -+ -+ list_for_each(tmp, list) { -+ kmem_ptr = list_entry(tmp, kmalloc_t , list); -+ c++; -+ s += kmem_ptr->len; -+ } -+ -+ *count = c; -+ *size = s; -+} -+ -+void -+clear_kmem(struct list_head * list) -+{ -+ struct list_head *tmp,*tmp2; -+ kmalloc_t *kmem_ptr = NULL; -+ -+ list_for_each_safe(tmp, tmp2, list) { -+ kmem_ptr = list_entry(tmp, kmalloc_t , list); -+ list_del_init(&kmem_ptr->list); -+ free( kmem_ptr ); -+ } -+} -+ -+void -+move_kmem(struct list_head * dest, struct list_head *src) -+{ -+ struct list_head *tmp,*tmp2; -+ kmalloc_t *kp= NULL; -+ -+ list_for_each_safe(tmp, tmp2, src) { -+ kp = list_entry(tmp, kmalloc_t , list); -+ list_del_init(&kp->list); -+ -+/* -+ printf("mem %p len %d (vm=%d) caller %p owner %p (%s:%d)\n", -+ kp->ptr, kp->len, kp->used_vmalloc, kp->caller, kp->owner, kp->file, kp->line); -+*/ -+ -+ list_add_tail(&kp->list, dest); -+ } -+} -+ -+void -+read_kmem(struct list_head * list) -+{ -+ FILE * fd; -+ char line[1024]; -+ int line_size = 100; -+ char * rep; -+ kmalloc_t * kp; -+ -+ clear_kmem(list); -+ -+ fd = fopen(QSNET_PROCFS_KMEM_DEBUG,"r"); -+ if ( fd == NULL) -+ { -+ printf("No Kmem Debug\n"); -+ return; -+ } -+ -+ rep = fgets(line,line_size, fd); -+ -+ while ( rep != NULL ) -+ { -+ kp = malloc(sizeof(kmalloc_t)); -+ -+ sscanf(line,"%p %d %d %p %p %u %d %s\n", -+ &kp->ptr, &kp->len, &kp->used_vmalloc, &kp->caller, &kp->owner, &kp->time, &kp->line, &kp->file[0]); -+ -+/* -+ printf(">>%s<<\n",line); -+ printf("%p %d %d %p %p %u %d %s\n", -+ kp->ptr, kp->len, kp->used_vmalloc, kp->caller, kp->owner, kp->time, kp->line, kp->file); -+*/ -+ -+ list_add_tail(&kp->list, list); -+ -+ rep = fgets(line,line_size, fd); -+ } -+ fclose(fd); -+} -+ -+void -+mark_kmem(struct list_head * list, int mark) -+{ -+ struct list_head *tmp; -+ kmalloc_t *kp = NULL; -+ -+ list_for_each(tmp, list) { -+ kp = list_entry(tmp, kmalloc_t , list); -+ -+ kp->mark = mark; -+ } -+} -+ -+kmalloc_t * -+find_kmem(kmalloc_t * value, struct list_head * list) -+{ -+ struct list_head *tmp; -+ kmalloc_t *kp = NULL; -+ -+ -+ list_for_each(tmp, list) { -+ kp = list_entry(tmp, kmalloc_t , list); -+ if ( (kp->ptr == value->ptr) -+ && (kp->len == value->len) -+ && (kp->used_vmalloc == value->used_vmalloc ) -+ && (kp->owner == value->owner ) -+ && (kp->caller == value->caller ) -+ && (kp->time == value->time ) -+ && (kp->line == value->line ) -+ && !(strcmp(kp->file,value->file) )) -+ return kp; -+ } -+ return NULL; -+} -+ -+void -+diff_kmem(struct list_head *curr, struct list_head *stored) -+{ -+ struct list_head *tmp; -+ kmalloc_t *kp = NULL; -+ long c,s; -+ -+ mark_kmem(stored, 0); -+ mark_kmem(curr, 0); -+ -+ list_for_each(tmp, stored) { -+ kp = list_entry(tmp, kmalloc_t , list); -+ if (find_kmem( kp, curr) != NULL) -+ kp->mark = 1; -+ } -+ -+ list_for_each(tmp, curr) { -+ kp = list_entry(tmp, kmalloc_t , list); -+ if (find_kmem( kp, stored) != NULL) -+ kp->mark = 1; -+ } -+ -+ c=s=0L; -+ list_for_each(tmp, stored) { -+ kp = list_entry(tmp, kmalloc_t , list); -+ if (kp->mark != 1) -+ { -+ printf("-- mem %p len %d (vm=%d) caller %p owner %p (%s:%d)\n", -+ kp->ptr, kp->len, kp->used_vmalloc, kp->caller, kp->owner, kp->file, kp->line); -+ c++; -+ s+= kp->len; -+ } -+ } -+ printf("-- %4ld %10ld \n",c,s); -+ -+ c=s=0L; -+ list_for_each(tmp, curr) { -+ kp = list_entry(tmp, kmalloc_t , list); -+ if (kp->mark != 1) -+ { -+ printf("++ mem %p len %d (vm=%d) caller %p owner %p (%s:%d)\n", -+ kp->ptr, kp->len, kp->used_vmalloc, kp->caller, kp->owner, kp->file, kp->line); -+ c++; -+ s+= kp->len; -+ } -+ } -+ printf("++ %4ld %10ld \n",c,s); -+} -+ -+ -+void -+print_kmem(struct list_head * list) -+{ -+ struct list_head *tmp; -+ kmalloc_t *kp = NULL; -+ -+ list_for_each(tmp, list) { -+ kp = list_entry(tmp, kmalloc_t , list); -+ -+ printf("mem %p len %d (vm=%d) caller %p owner %p (%s:%d)\n", -+ kp->ptr, kp->len, kp->used_vmalloc, kp->caller, kp->owner, kp->file, kp->line); -+ -+ } -+} -+ -+void -+print_cmds() -+{ -+ long c,s; -+ -+ printf("q : quits \n"); -+ printf("r : read\n"); -+ printf("c : print current\n"); -+ printf("o : print stored\n"); -+ printf("s : store\n"); -+ -+ count_kmem(¤t_kmem, &c, &s ); -+ printf("\ncurrent : %4ld %10ld\n", c , s); -+ -+ count_kmem(&stored_kmem, &c, &s ); -+ printf("store : %4ld %10ld\n", c , s); -+ -+} -+ -+int -+main() -+{ -+ char line[128]; -+ int line_size=127; -+ int len; -+ -+ -+ while (1) -+ { -+ -+ printf(">> "); -+ fgets(line,line_size, stdin); -+ -+ -+ len = strlen( line ) -1; -+ if ( len ) -+ { -+ switch ( tolower(line[0]) ) -+ { -+ case 'q': -+ exit(0); -+ -+ case 'r' : -+ read_kmem(¤t_kmem); -+ break; -+ -+ case 'c' : -+ print_kmem(¤t_kmem); -+ break; -+ -+ case 'o' : -+ print_kmem(&stored_kmem); -+ break; -+ -+ case 's' : -+ clear_kmem(&stored_kmem); -+ move_kmem(&stored_kmem, ¤t_kmem); -+ break; -+ -+ case 'd' : -+ diff_kmem(¤t_kmem, &stored_kmem); -+ break; -+ -+ default: -+ print_cmds(); -+ } -+ -+ -+ -+ } -+ else -+ print_cmds(); -+ } -+ -+} -+ -+ -+/* -+ * Local variables: -+ * c-file-style: "linux" -+ * End: -+ */ -diff -urN clean/drivers/net/qsnet/qsnet/quadrics_version.h linux-2.6.9/drivers/net/qsnet/qsnet/quadrics_version.h ---- clean/drivers/net/qsnet/qsnet/quadrics_version.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/qsnet/quadrics_version.h 2005-09-07 10:39:49.000000000 -0400 -@@ -0,0 +1 @@ -+#define QUADRICS_VERSION "5.11.3qsnet" -diff -urN clean/drivers/net/qsnet/rms/Makefile linux-2.6.9/drivers/net/qsnet/rms/Makefile ---- clean/drivers/net/qsnet/rms/Makefile 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/rms/Makefile 2005-10-10 17:47:31.000000000 -0400 -@@ -0,0 +1,15 @@ -+# -+# Makefile for Quadrics QsNet -+# -+# Copyright (c) 2002-2004 Quadrics Ltd -+# -+# File: drivers/net/qsnet/rms/Makefile -+# -+ -+ -+# -+ -+obj-$(CONFIG_RMS) += rms.o -+rms-objs := rms_kern.o rms_kern_Linux.o -+ -+EXTRA_CFLAGS += -DDEBUG -DDEBUG_PRINTF -DDEBUG_ASSERT -diff -urN clean/drivers/net/qsnet/rms/Makefile.conf linux-2.6.9/drivers/net/qsnet/rms/Makefile.conf ---- clean/drivers/net/qsnet/rms/Makefile.conf 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/rms/Makefile.conf 2005-09-07 10:39:48.000000000 -0400 -@@ -0,0 +1,10 @@ -+# Flags for generating QsNet Linux Kernel Makefiles -+MODNAME = rms.o -+MODULENAME = rms -+KOBJFILES = rms_kern.o rms_kern_Linux.o -+EXPORT_KOBJS = -+CONFIG_NAME = CONFIG_RMS -+SGALFC = -+# EXTRALINES START -+ -+# EXTRALINES END -diff -urN clean/drivers/net/qsnet/rms/quadrics_version.h linux-2.6.9/drivers/net/qsnet/rms/quadrics_version.h ---- clean/drivers/net/qsnet/rms/quadrics_version.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/rms/quadrics_version.h 2005-09-07 10:39:49.000000000 -0400 -@@ -0,0 +1 @@ -+#define QUADRICS_VERSION "5.11.3qsnet" -diff -urN clean/drivers/net/qsnet/rms/rms_kern.c linux-2.6.9/drivers/net/qsnet/rms/rms_kern.c ---- clean/drivers/net/qsnet/rms/rms_kern.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/rms/rms_kern.c 2005-09-07 10:35:04.000000000 -0400 -@@ -0,0 +1,1484 @@ -+/* -+ * Copyright (c) 1996-2003 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2004-2006 by Quadrics Ltd -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ * rms_kern.c: RMS kernel module -+ * -+ * $Source: /cvs/master/quadrics/rmsmod/rms_kern.c,v $ -+ */ -+ -+#ident "@(#)$Id: rms_kern.c,v 1.77.2.8 2005/09/07 14:35:04 mike Exp $" -+ -+#include -+#include -+#include -+#include -+ -+ -+/* -+ * extended stats added in version 5 -+ * patch free kernel support added in version 6 -+ */ -+#define RMS_MODVERSION 6 -+ -+#ifdef PROCESS_ACCT -+#define TIMEVAL_TO_MSEC(tv) ((tv)->tv_sec * 1000 + (tv)->tv_usec / 1000) -+#define TIMEVAL_TO_CT(tv) ((tv)->tv_sec * HZ + (tv)->tv_usec / (1000000L / HZ)) -+#endif -+ -+#ifdef get_mm_counter -+#define PROC_RSS(proc) ((proc)->mm ? get_mm_counter(proc->mm, rss) : 0) -+#else -+#ifdef RSS_ATOMIC -+#define PROC_RSS(proc) ((proc)->mm ? atomic_read(&(proc)->mm->rss) : 0) -+#else -+#define PROC_RSS(proc) ((proc)->mm ? (proc)->mm->rss : 0) -+#endif -+#endif -+ -+/* -+ * 2.6 kernels don't consistently export put_task_struct -+ */ -+ -+#ifdef free_task_struct -+#undef NO_PUTGET_TASK -+#else -+#define NO_PUTGET_TASK -+#endif -+ -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0) -+# define RMS_NCPUS() smp_num_cpus -+#else -+# define RMS_NCPUS() num_online_cpus() -+#endif -+ -+#define CURUID() CURPROC()->uid -+#define p_pid pid -+ -+ -+/* care needed with conversion to millisecs on 32-bit Linux */ -+#ifdef LINUX_I386 -+#define CT_TO_MSEC(x) ct_to_msec(x) -+ -+uint64_t ct_to_msec(clock_t t) -+{ -+ uint64_t msecs; -+ if (t < 2000000) -+ { -+ t = (1000 * t)/HZ; -+ msecs = t; -+ } -+ else -+ { -+ t = t / HZ; -+ msecs = t * 1000; -+ } -+ return(msecs); -+} -+#else -+#define CT_TO_MSEC(x) (((x) * 1000)/HZ) -+#endif -+ -+#ifndef FALSE -+#define FALSE (0) -+#define TRUE (!FALSE) -+#endif -+ -+#include -+#include -+#ifndef NO_PTRACK -+#include -+#endif -+#include -+#ifndef NO_SHM_CLEANUP -+extern int shm_cleanup(void); -+#endif -+ -+struct cap_desc { -+ -+ struct cap_desc *next; -+ int index; /* index of capability in program */ -+ ELAN_CAPABILITY cap; /* elan capability */ -+ -+}; -+ -+struct proc_desc { -+ -+ struct proc_desc *next; -+ struct task_struct *task; -+ struct prg_desc *program; /* controlling program */ -+ int mycap; /* index of my capability */ -+ int myctx; /* context number for process */ -+ int flags; -+ int vp; /* elan virtual process number */ -+ int pid; /* process id */ -+ -+ /* last set of stats sampled */ -+ uint64_t utime; -+ uint64_t stime; -+ uint64_t majflt; -+ int maxrss; -+ -+}; -+ -+struct prg_desc { -+ -+ struct prg_desc *next; -+ int id; /* program id */ -+ int flags; /* program status flags */ -+ uid_t uid; /* user id */ -+ int ncpus; /* number of cpus allocated to program */ -+ int nprocs; /* number of processes in program */ -+ struct proc_desc *pdescs; /* processes in this program */ -+ int ncaps; /* number of capabilities */ -+ struct cap_desc *caps; /* elan capabilities */ -+ char *corepath; /* core path for parallel program */ -+ int psid; /* processor set id */ -+ -+ uint64_t cutime; /* user time accumulated by children */ -+ uint64_t cstime; /* system time accumulated by children */ -+ uint64_t start_time; /* time program created */ -+ uint64_t end_time; /* time last process exited */ -+ uint64_t sched_time; /* last time job was scheduled */ -+ uint64_t accum_atime; /* allocated time last deschedule */ -+ uint64_t memint; /* accumulated memory integral */ -+ uint64_t ebytes; /* data transferred by the Elan(s) */ -+ uint64_t exfers; /* number of Elan data transfers */ -+ uint64_t majflt; /* number of major faults */ -+ int maxrss; /* maximum size to date */ -+ -+ struct proc_dir_entry *proc_entry; -+ -+}; -+ -+static int rms_ptrack_callback (void *arg, int phase, struct task_struct *child); -+ -+static void prgsignal(struct prg_desc *program, int signo); -+static uint64_t gettime(void); -+static void freeProgram(struct prg_desc *program); -+ -+static struct prg_desc *programs = 0; -+ -+kmutex_t rms_lock; -+ -+#ifdef NO_PTRACK -+int ptrack_enabled = 0; -+#else -+int ptrack_enabled = 1; -+#endif -+ -+int rms_init(void) -+{ -+ kmutex_init (&rms_lock); -+ -+ DBG(printk("rms: initialising ptrack %d\n", ptrack_enabled)); -+ -+ return(ESUCCESS); -+} -+ -+int rms_reconfigure(void) -+{ -+ return(ESUCCESS); -+} -+ -+int rms_programs_registered(void) -+{ -+ /* -+ ** Called when trying to unload rms.mod will not succeed -+ ** if programs registered -+ */ -+ -+ struct prg_desc *program, **pp; -+ -+ kmutex_lock(&rms_lock); -+ -+ for (program = programs; program; program = program->next) -+ { -+ if (program->nprocs != 0) -+ { -+ kmutex_unlock(&rms_lock); -+ return(EBUSY); -+ } -+ } -+ -+ /* -+ ** We have traversed the programs list and no processes registered -+ ** Now free the memory -+ */ -+ -+ pp = &programs; -+ while ((program = *pp) != NULL) -+ { -+ *pp = program->next; -+ freeProgram(program); -+ } -+ kmutex_unlock(&rms_lock); -+ -+ return(ESUCCESS); -+ -+} -+ -+int rms_fini(void) -+{ -+ /* -+ * don't allow an unload if there are programs registered -+ */ -+ if (rms_programs_registered()) -+ return(EBUSY); -+ -+ kmutex_destroy (&rms_lock); -+ -+ DBG(printk("rms: removed\n")); -+ -+ return(ESUCCESS); -+} -+ -+extern struct proc_dir_entry *rms_procfs_programs; -+ -+/* -+ * display one pid per line if there isn't enough space -+ * for another pid then add "...\n" and stop -+ */ -+int pids_callback(char* page, char** start, off_t off, int count, int* eof, void* data) -+{ -+ struct prg_desc *program = (struct prg_desc *)data; -+ struct proc_desc *pdesc; -+ char *ptr = page; -+ int bytes = 0, nb; -+ -+ kmutex_lock(&rms_lock); -+ -+ for (pdesc = program->pdescs; pdesc; pdesc = pdesc->next) -+ { -+ if (bytes > count - 15) -+ { -+ bytes += sprintf(ptr,"...\n"); -+ break; -+ } -+ nb = sprintf(ptr, "%d %d\n", pdesc->pid, pdesc->vp); -+ bytes += nb; -+ ptr += nb; -+ } -+ kmutex_unlock(&rms_lock); -+ -+ return(bytes); -+} -+ -+int status_callback(char* page, char** start, off_t off, int count, int* eof, void* data) -+{ -+ struct prg_desc *program = (struct prg_desc *)data; -+ int bytes; -+ if (program->flags & PRG_KILLED) -+ bytes = sprintf(page, "killed\n"); -+ else if (program->flags & PRG_SUSPEND) -+ bytes = sprintf(page, "suspended\n"); -+ else -+ bytes = sprintf(page, "running\n"); -+ return(bytes); -+} -+ -+void rms_create_proc_entry(struct prg_desc *program) -+{ -+ struct proc_dir_entry *p; -+ char name[32]; -+ -+ if (rms_procfs_programs) -+ { -+ sprintf(name,"%d", program->id); -+ if ((program->proc_entry = proc_mkdir(name, rms_procfs_programs)) != NULL) -+ { -+ if ((p = create_proc_entry ("pids", S_IRUGO, program->proc_entry)) != NULL) -+ { -+ p->owner = THIS_MODULE; -+ p->data = program; -+ p->read_proc = pids_callback; -+ } -+ if ((p = create_proc_entry ("status", S_IRUGO, program->proc_entry)) != NULL) -+ { -+ p->owner = THIS_MODULE; -+ p->data = program; -+ p->read_proc = status_callback; -+ } -+ } -+ } -+} -+ -+void rms_remove_proc_entry(struct prg_desc *program) -+{ -+ char name[32]; -+ if (rms_procfs_programs) -+ { -+ if (program->proc_entry) -+ { -+ remove_proc_entry ("pids", program->proc_entry); -+ remove_proc_entry ("status", program->proc_entry); -+ } -+ sprintf(name,"%d", program->id); -+ remove_proc_entry (name, rms_procfs_programs); -+ } -+} -+ -+/* -+ * find a program from its index/pid -+ */ -+static struct prg_desc *findProgram(const int id) -+{ -+ struct prg_desc *program; -+ for (program = programs; program; program = program->next) -+ if (program->id == id) -+ return(program); -+ return(0); -+} -+ -+static struct proc_desc *findProcess(const int pid) -+{ -+ struct prg_desc *program; -+ struct proc_desc *pdesc; -+ -+ for (program = programs; program; program = program->next) -+ for (pdesc = program->pdescs; pdesc; pdesc = pdesc->next) -+ if (pdesc->pid == pid) -+ return(pdesc); -+ -+ return(0); -+} -+ -+static void freeProgram(struct prg_desc *program) -+{ -+ struct proc_desc *pdesc; -+ struct cap_desc *cdesc; -+ -+ rms_remove_proc_entry(program); -+ -+ while ((pdesc = program->pdescs) != NULL) -+ { -+ program->pdescs = pdesc->next; -+ KMEM_FREE(pdesc, sizeof(struct proc_desc)); -+ } -+ -+ while ((cdesc = program->caps) != NULL) -+ { -+ program->caps = cdesc->next; -+ KMEM_FREE(cdesc, sizeof(struct cap_desc)); -+ } -+ -+ if (program->corepath) -+ KMEM_FREE(program->corepath, MAXCOREPATHLEN + 1); -+ -+ KMEM_FREE(program, sizeof(struct prg_desc)); -+ -+ MOD_DEC_USE_COUNT; -+} -+ -+/* -+ * rms_prgcreate -+ * -+ * create a new program description -+ */ -+int rms_prgcreate(int id, uid_t uid, int cpus) -+{ -+ struct prg_desc *program; -+ struct proc_desc *pdesc; -+ -+ DBG(printk("rms_prgcreate :: program %d pid %d uid %d cpus %d\n", id, CURPROC()->p_pid, uid, cpus)); -+ -+ /* -+ * parallel programs are created as root by the rmsd as it forks the loader -+ */ -+ if (CURUID()) -+ return(EACCES); -+ -+ /* -+ * program ids must be unique -+ */ -+ kmutex_lock(&rms_lock); -+ program = findProgram(id); -+ kmutex_unlock(&rms_lock); -+ if (program) -+ return(EINVAL); -+ -+ /* -+ * create a new program description -+ */ -+ KMEM_ALLOC(program, struct prg_desc *, sizeof(struct prg_desc), TRUE); -+ if (!program) -+ return(ENOMEM); -+ -+ program->id = id; -+ program->flags = PRG_RUNNING; -+ program->ncpus = cpus; -+ program->nprocs = 1; -+ program->uid = uid; -+ program->ncaps = 0; -+ program->caps = 0; -+ program->corepath = 0; -+ program->psid = 0; -+ program->start_time = program->sched_time = gettime(); -+ program->end_time = 0; -+ program->accum_atime = 0; -+ program->cutime = 0; -+ program->cstime = 0; -+ program->maxrss = 0; -+ program->memint = 0; -+ program->majflt = 0; -+ program->ebytes = 0; -+ program->exfers = 0; -+ -+ KMEM_ALLOC(pdesc, struct proc_desc *, sizeof(struct proc_desc), TRUE); -+ if (!pdesc) -+ { -+ KMEM_FREE (program,sizeof (struct prg_desc)); -+ return(ENOMEM); -+ } -+ -+ /* bump the reference count on the caller */ -+ pdesc->task = ptrack_enabled ? CURPROC() : NULL; -+ -+ pdesc->next = 0; -+ pdesc->mycap = ELAN_CAP_UNINITIALISED; -+ pdesc->myctx = ELAN_CAP_UNINITIALISED; -+ pdesc->vp = -1; /* rmsloader */ -+ pdesc->program = program; -+ pdesc->pid = CURPROC()->p_pid; -+ program->pdescs = pdesc; -+ -+ rms_create_proc_entry(program); -+ -+ kmutex_lock(&rms_lock); -+ -+#ifndef NO_PTRACK -+ -+ if (ptrack_enabled) -+ { -+ DBG(printk("rms_prgcreate :: ptrack enabled - fork callback\n")); -+ if (ptrack_register (rms_ptrack_callback, NULL) != 0) -+ { -+ kmutex_unlock(&rms_lock); -+ KMEM_FREE(pdesc,sizeof(struct proc_desc)); -+ KMEM_FREE(program,sizeof(struct prg_desc)); -+ return(ENOMEM); -+ } -+ } -+ else -+ { -+ DBG(printk("rms_prgcreate :: ptrack disabled - no fork callback\n")); -+ } -+#endif -+ -+ program->next = programs; -+ programs = program; -+ -+ MOD_INC_USE_COUNT; -+ -+ kmutex_unlock(&rms_lock); -+ return(ESUCCESS); -+} -+ -+ -+/* -+ * can't rely on put_task_struct being exported -+ * so we need to make sure that a proc is valid -+ * before extracting the stats -+ */ -+ -+int getProcessStats(struct proc_desc *pdesc) -+{ -+ struct task_struct *task = 0; -+ -+ if (ptrack_enabled) -+ task = pdesc->task; -+ else -+ { -+ read_lock(&tasklist_lock); -+ task = find_task_by_pid(pdesc->pid); -+ } -+ -+ if (task) -+ { -+#ifdef PROCESS_ACCT -+ pdesc->utime = TIMEVAL_TO_MSEC(&task->utime); -+ pdesc->stime = TIMEVAL_TO_MSEC(&task->stime); -+ -+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0) -+ pdesc->utime = CT_TO_MSEC(task->times.tms_utime); -+ pdesc->stime = CT_TO_MSEC(task->times.tms_stime); -+ -+#else -+ pdesc->utime = CT_TO_MSEC(task->utime); -+ pdesc->stime = CT_TO_MSEC(task->stime); -+#endif -+ pdesc->majflt = task->maj_flt; -+ -+ /* -+ * the ptrack exit callbacks occur before exit_mm -+ * but if ptrack is not present we can get called -+ * with task->mm = 0 -+ */ -+ pdesc->maxrss = PROC_RSS(task) >> (20 - PAGE_SHIFT); -+ } -+ -+ if (!ptrack_enabled) -+ read_unlock(&tasklist_lock); -+ -+ return(task ? 0 : -1); -+} -+ -+ -+ -+ -+/* -+ * remove a process descriptor keeping track of the -+ * accumulated resource usage -+ */ -+ -+static void removeProcDesc(struct prg_desc *program, struct proc_desc *pdesc) -+{ -+#ifndef NO_PTRACK -+ struct proc_desc *p; -+#endif -+ int maxrss; -+ -+ /* -+ * keep track of the resources used by processes that have -+ * exited, if ptrack is enabled then we will be called -+ * as the process exists, otherwise we will have the last -+ * sample -+ */ -+ getProcessStats(pdesc); -+ -+ program->cutime += pdesc->utime; -+ program->cstime += pdesc->stime; -+ program->majflt += pdesc->majflt; -+ maxrss = pdesc->maxrss; -+ -+ /* -+ * process specific shared memory cleanup requires the shm_cleanup -+ * patch, otherwise the run time system is left to do the job with -+ * a blunt axe -+ */ -+#ifndef NO_SHM_CLEANUP -+ shm_cleanup(); -+#endif -+ -+ /* -+ * remove process from program -+ */ -+ KMEM_FREE(pdesc, sizeof(struct proc_desc)); -+ program->nprocs--; -+ -+ /* -+ * update the memory high water mark for the program -+ * -+ * safe to access the task structures if we have incremented -+ * their reference counts as they were added to the program -+ * however, the mm can be zero -+ */ -+#ifndef NO_PTRACK -+ for (p = program->pdescs; p; p = p->next) -+ maxrss += PROC_RSS(p->task) >> (20 - PAGE_SHIFT); -+ -+ if (maxrss > program->maxrss) -+ program->maxrss = maxrss; -+#endif -+ -+ DBG(printk("rms_removproc :: program %d procs %d cutime %lld cstime %lld mem %d\n", -+ program->id, program->nprocs, -+ (long long)program->cutime, (long long)program->cstime, -+ program->maxrss)); -+ -+ /* -+ * final update to the program if this is the last process -+ */ -+ if (program->nprocs == 0) -+ { -+ program->end_time = gettime(); -+ program->flags &= ~PRG_RUNNING; -+ program->accum_atime += program->ncpus * (program->end_time - program->sched_time); -+ DBG(printk("rms_removproc :: last process has gone\n")); -+ } -+} -+ -+ -+/* -+ * rms_prgdestroy -+ * -+ * destroy a program description -+ */ -+int rms_prgdestroy(int id) -+{ -+ struct prg_desc *program, **pp; -+ struct proc_desc *pdesc; -+ -+ int status = ESRCH; -+ -+ /* -+ * parallel programs are created and destroyed by the rmsd -+ */ -+ if (CURUID()) -+ return(EACCES); -+ -+ kmutex_lock(&rms_lock); -+ -+ pp = &programs; -+ while ((program = *pp) != NULL) -+ { -+ if (program->id == id) -+ { -+ /* -+ * with ptrack disabled there won't be any exit callbacks -+ */ -+ if (!ptrack_enabled) -+ { -+ while ((pdesc = program->pdescs)) -+ { -+ program->pdescs = pdesc->next; -+ removeProcDesc(program, pdesc); -+ } -+ } -+ -+ if (program->nprocs == 0) -+ { -+ DBG(printk("rms_prgdestro :: removing program %d\n", program->id)); -+ *pp = program->next; -+ freeProgram(program); -+ status = ESUCCESS; -+ } -+ else -+ { -+ DBG(printk("rms_prgdestro :: failed to remove program %d: %d\n", program->id, program->nprocs)); -+ status = ECHILD; -+ pp = &program->next; -+ } -+ } -+ else -+ pp = &program->next; -+ } -+ -+ kmutex_unlock(&rms_lock); -+ return(status); -+} -+ -+/* -+ * rms_prgids -+ */ -+int rms_prgids(int maxids, int *prgids, int *nprgs) -+{ -+ struct prg_desc *program; -+ int count = 0, *buf, *bufp; -+ int status = ESUCCESS; -+ -+ if (maxids < 1) -+ return(EINVAL); -+ -+ kmutex_lock(&rms_lock); -+ -+ for (program = programs; program; program = program->next) -+ count++; -+ count = MIN(count, maxids); -+ -+ if (count > 0) -+ { -+ KMEM_ALLOC(buf, int *, count * sizeof(int), TRUE); -+ if (buf) -+ { -+ for (program = programs, bufp=buf; bufp < buf + count; -+ program = program->next) -+ *bufp++ = program->id; -+ -+ if (copyout(buf, prgids, sizeof(int) * count)) -+ status = EFAULT; -+ -+ KMEM_FREE(buf, count * sizeof(int)); -+ } -+ else -+ status = ENOMEM; -+ } -+ -+ if (copyout(&count, nprgs, sizeof(int))) -+ status = EFAULT; -+ -+ kmutex_unlock(&rms_lock); -+ -+ return(status); -+} -+ -+/* -+ * rms_prginfo -+ */ -+int rms_prginfo(int id, int maxpids, pid_t *pids, int *nprocs) -+{ -+ struct prg_desc *program; -+ struct proc_desc *pdesc; -+ pid_t *pidp, *buf; -+ int status = ESUCCESS; -+ -+ kmutex_lock(&rms_lock); -+ -+ if ((program = findProgram(id)) != NULL) -+ { -+ if (program->nprocs > 0) -+ { -+ KMEM_ALLOC(buf, pid_t *, program->nprocs * sizeof(pid_t), TRUE); -+ if (buf) -+ { -+ for (pidp = buf, pdesc = program->pdescs; pdesc; pdesc = pdesc->next) -+ *pidp++ = pdesc->pid; -+ -+ if (copyout(buf, pids, sizeof(pid_t) * MIN(program->nprocs, maxpids))) -+ status = EFAULT; -+ -+ KMEM_FREE(buf, program->nprocs * sizeof(pid_t)); -+ } -+ else -+ status = ENOMEM; -+ } -+ -+ if (copyout(&program->nprocs, nprocs, sizeof(int))) -+ status = EFAULT; -+ } -+ else -+ status = ESRCH; -+ -+ kmutex_unlock(&rms_lock); -+ -+ return(status); -+} -+ -+/* -+ * Deliver a signal to all the processes in a program -+ */ -+static void prgsignal(struct prg_desc *program, int signo) -+{ -+ struct proc_desc *pdesc; -+ DBG(printk("rms_prgsignal :: program %d signal %d\n", program->id, signo)); -+ for (pdesc = program->pdescs; pdesc; pdesc = pdesc->next) -+ kill_proc(pdesc->pid, signo, 1); -+} -+ -+int rms_prgsignal(int id, int signo) -+{ -+ struct prg_desc *program; -+ int status = ESUCCESS; -+ -+ kmutex_lock(&rms_lock); -+ -+ if ((program = findProgram(id)) != NULL) -+ { -+ if (CURUID() == 0 || CURUID() == program->uid) -+ { -+ prgsignal(program, signo); -+ if (signo == SIGKILL) -+ program->flags |= PRG_KILLED; -+ } -+ else -+ status = EACCES; -+ } -+ else -+ status = ESRCH; -+ -+ kmutex_unlock(&rms_lock); -+ -+ return(status); -+} -+ -+int rms_prgaddcap(int id, int index, ELAN_CAPABILITY *cap) -+{ -+ struct prg_desc *program; -+ struct cap_desc *cdesc; -+ int status = ESUCCESS; -+ -+ if (cap == NULL) -+ return(EINVAL); -+ -+ kmutex_lock(&rms_lock); -+ if ((program = findProgram(id)) != NULL) -+ { -+ KMEM_ALLOC(cdesc, struct cap_desc *, sizeof(struct cap_desc), TRUE); -+ if (cdesc) -+ { -+ cdesc->index = index; -+ if (copyin(cap, &cdesc->cap, sizeof(ELAN_CAPABILITY))) -+ { -+ KMEM_FREE(cdesc, sizeof(struct cap_desc)); -+ status = EFAULT; -+ } -+ else -+ { -+ DBG(printk("rms_prgaddcap :: program %d index %d context %d<-->%d\n", -+ program->id, index, cdesc->cap.cap_lowcontext, cdesc->cap.cap_highcontext)); -+ cdesc->next = program->caps; -+ program->caps = cdesc; -+ program->ncaps++; -+ } -+ } -+ else -+ status = ENOMEM; -+ } -+ else -+ status = ESRCH; -+ -+ kmutex_unlock(&rms_lock); -+ return(status); -+} -+ -+static uint64_t gettime(void) -+{ -+ uint64_t now; -+ -+ struct timeval tv; -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,17) -+ get_fast_time(&tv); -+#else -+ do_gettimeofday(&tv); -+#endif -+ now = tv.tv_sec * 1000 + tv.tv_usec / 1000; -+ return(now); -+} -+ -+ -+/* -+ * new stats collection interface, 64-bit with addition of Elan stats -+ */ -+int rms_prggetstats(int id, prgstats_t *stats) -+{ -+ struct prg_desc *program = 0; -+ struct proc_desc *pdesc; -+ -+ int maxrss, status = ESUCCESS; -+ prgstats_t totals; -+ uint64_t now = gettime(); -+ -+ kmutex_lock(&rms_lock); -+ -+ if (id < 0) -+ { -+ if ((pdesc = findProcess(CURPROC()->p_pid)) != NULL) -+ program = pdesc->program; -+ } -+ else -+ program = findProgram(id); -+ -+ if (program) -+ { -+ if (CURUID() == 0 || CURUID() == program->uid) -+ { -+ totals.flags = program->flags; -+ totals.ncpus = program->ncpus; -+ maxrss = 0; -+ -+ if (program->nprocs > 0) -+ totals.etime = now - program->start_time; -+ else -+ totals.etime = program->end_time - program->start_time; -+ -+ totals.atime = program->accum_atime; -+ if (program->flags & PRG_RUNNING) -+ totals.atime += program->ncpus * (now - program->sched_time); -+ -+ totals.utime = program->cutime; -+ totals.stime = program->cstime; -+ totals.pageflts = program->majflt; -+ totals.memint = program->memint; -+ -+ for (pdesc = program->pdescs; pdesc; pdesc = pdesc->next) -+ { -+ getProcessStats(pdesc); -+ totals.utime += pdesc->utime; -+ totals.stime += pdesc->stime; -+ totals.pageflts += pdesc->majflt; -+ maxrss += pdesc->maxrss; -+ } -+ -+ if (maxrss > program->maxrss) -+ program->maxrss = maxrss; -+ -+ totals.mem = program->maxrss; -+ totals.ebytes = program->ebytes; -+ totals.exfers = program->exfers; -+ -+ DBG(printk("rms_prggetsta :: program %d utime %lld stime %lld mem %d flt %lld\n", -+ program->id, (long long)totals.utime, (long long)totals.stime, -+ totals.mem, (long long)totals.pageflts)); -+ -+ if (copyout(&totals, stats, sizeof(prgstats_t))) -+ status = EFAULT; -+ } -+ else -+ status = EACCES; -+ } -+ else -+ status = ESRCH; -+ -+ kmutex_unlock(&rms_lock); -+ return(status); -+} -+ -+int rms_prgsuspend(int id) -+{ -+ struct prg_desc *program; -+ int status = ESUCCESS; -+ -+ kmutex_lock(&rms_lock); -+ -+ if ((program = findProgram(id)) != NULL) -+ { -+ if (CURUID() == 0 || CURUID() == program->uid) -+ { -+ program->flags &= ~PRG_RUNNING; -+ program->flags |= PRG_SUSPEND; -+ program->accum_atime += program->ncpus * (gettime() - program->sched_time); -+ -+ /* suspend/resume just use signals for now */ -+ prgsignal(program, SIGSTOP); -+ } -+ else -+ status = EACCES; -+ } -+ else -+ status = ESRCH; -+ -+ kmutex_unlock(&rms_lock); -+ return(status); -+} -+ -+int rms_prgresume(int id) -+{ -+ struct prg_desc *program; -+ int status = ESUCCESS; -+ -+ kmutex_lock(&rms_lock); -+ -+ if ((program = findProgram(id)) != NULL) -+ { -+ if (CURUID() == 0 || CURUID() == program->uid) -+ { -+ program->flags &= ~PRG_SUSPEND; -+ program->flags |= PRG_RUNNING; -+ program->sched_time = gettime(); -+ prgsignal(program, SIGCONT); -+ } -+ else -+ status = EACCES; -+ } -+ else -+ status = ESRCH; -+ -+ kmutex_unlock(&rms_lock); -+ return(status); -+} -+ -+ -+int rms_ncaps(int *ncaps) -+{ -+ struct proc_desc *pdesc; -+ int status = ESUCCESS; -+ -+ kmutex_lock(&rms_lock); -+ if ((pdesc = findProcess(CURPROC()->p_pid)) != NULL) -+ { -+ if (copyout(&pdesc->program->ncaps, ncaps, sizeof(int))) -+ status = EFAULT; -+ } -+ else -+ status = ESRCH; -+ -+ kmutex_unlock(&rms_lock); -+ return(status); -+} -+ -+int rms_getprgid(pid_t pid, int *id) -+{ -+ struct proc_desc *pdesc; -+ int status = ESUCCESS; -+ -+ if (pid == 0) -+ pid = CURPROC()->p_pid; -+ -+ kmutex_lock(&rms_lock); -+ if ((pdesc = findProcess(pid)) != NULL) -+ { -+ if (copyout(&pdesc->program->id, id, sizeof(int))) -+ status = EFAULT; -+ } -+ else -+ status = ESRCH; -+ -+ kmutex_unlock(&rms_lock); -+ return(status); -+} -+ -+int rms_setcap(int index, int ctx) -+{ -+ struct proc_desc *pdesc; -+ struct cap_desc *cdesc; -+ int status = EINVAL; -+ -+ DBG(printk("rms_setcap :: process %d cap %d ctx %d\n",CURPROC()->p_pid,index,ctx)); -+ -+ kmutex_lock(&rms_lock); -+ if ((pdesc = findProcess(CURPROC()->p_pid)) != NULL) -+ { -+ for (cdesc = pdesc->program->caps; cdesc; cdesc = cdesc->next) -+ if (cdesc->index == index && 0 <= ctx && ctx <= (cdesc->cap.cap_highcontext - cdesc->cap.cap_lowcontext + 1)) -+ { -+ pdesc->mycap = index; -+ pdesc->myctx = cdesc->cap.cap_lowcontext + ctx; -+ status = ESUCCESS; -+ } -+ } -+ else -+ status = ESRCH; -+ -+ kmutex_unlock(&rms_lock); -+ return(status); -+} -+ -+ -+int rms_mycap(int *index) -+{ -+ struct proc_desc *pdesc; -+ int status = ESUCCESS; -+ -+ DBG(printk("rms_mycap :: process %d\n", CURPROC()->p_pid)); -+ -+ kmutex_lock(&rms_lock); -+ if ((pdesc = findProcess(CURPROC()->p_pid)) != NULL) -+ { -+ DBG(printk("rms_mycap :: found process %d mycap = %d\n", CURPROC()->p_pid, pdesc->mycap)); -+ if (copyout(&pdesc->mycap, index, sizeof(int))) -+ status = EFAULT; -+ } -+ else -+ status = ESRCH; -+ -+ kmutex_unlock(&rms_lock); -+ return(status); -+} -+ -+int rms_getcap(int index, ELAN_CAPABILITY *cap) -+{ -+ struct proc_desc *pdesc; -+ struct cap_desc *cdesc; -+ int status = ESUCCESS; -+ -+ kmutex_lock(&rms_lock); -+ if ((pdesc = findProcess(CURPROC()->p_pid)) != NULL) -+ { -+ for (cdesc = pdesc->program->caps; cdesc; cdesc = cdesc->next) -+ if (cdesc->index == index) -+ break; -+ -+ if (cdesc) -+ { -+ /* tell each process about its own context */ -+ cdesc->cap.cap_mycontext = pdesc->myctx; -+ -+ if (copyout(&cdesc->cap, cap, ELAN_CAP_SIZE(&cdesc->cap))) -+ status = EFAULT; -+ -+ DBG(printk("rms_getcap :: program %d index %d context %d<-->%d\n", pdesc->program->id, -+ cdesc->index, cdesc->cap.cap_lowcontext, cdesc->cap.cap_highcontext)); -+ } -+ else -+ status = EINVAL; -+ } -+ else -+ status = ESRCH; -+ -+ kmutex_unlock(&rms_lock); -+ return(status); -+} -+ -+static void -+addProcDesc (struct proc_desc *pdesc, -+ struct prg_desc *program, -+ struct proc_desc *parent, -+ int pid) -+{ -+ pdesc->mycap = (parent ? parent->mycap : ELAN_CAP_UNINITIALISED); -+ pdesc->myctx = (parent ? parent->myctx : ELAN_CAP_UNINITIALISED); -+ pdesc->program = program; -+ pdesc->vp = -1; /* assigned by elaninitdone */ -+ pdesc->pid = pid; -+ -+ pdesc->next = program->pdescs; -+ program->pdescs = pdesc; -+ program->nprocs++; -+} -+ -+static int -+rms_fork_callback (struct task_struct *curproc, struct task_struct *child) -+{ -+ struct prg_desc *program; -+ struct proc_desc *parent; -+ struct proc_desc *pdesc = NULL; -+ -+ kmutex_lock(&rms_lock); -+ -+ DBG(printk("rms_fork_func :: phase is fork pid %d child %d\n", curproc->p_pid, child->p_pid)); -+ -+ /* -+ * find the process that forked -+ */ -+ if ((parent = findProcess(curproc->p_pid)) != NULL) -+ { -+ program = parent->program; -+ -+ DBG(printk("rms_fork_func :: program is %d flags %d\n", program->id, program->flags)); -+ -+ /* -+ * processes can be blocked in fork while prgsignal is in progress -+ * so check to see if the PRG_KILLED flag is set -+ */ -+ if (program->flags & PRG_KILLED) -+ DBG(printk("rms_fork_func :: fork handler called after program killed\n")); -+ else -+ { -+ /* -+ * create a new process description and add to program -+ */ -+ KMEM_ALLOC(pdesc, struct proc_desc *, sizeof(struct proc_desc), TRUE); -+ if (pdesc == NULL) -+ printk("rms_creatproc :: memory allocation failed\n"); -+ else -+ { -+ addProcDesc (pdesc, program, parent, child->p_pid); -+ -+ pdesc->task = child; -+ -+ } -+ } -+ } -+ else -+ DBG(printk("rms_fork_func :: no program\n")); -+ -+ kmutex_unlock (&rms_lock); -+ -+ return(pdesc == NULL); -+} -+ -+static void -+rms_exit_callback (struct task_struct *curproc) -+{ -+ struct prg_desc *program; -+ struct proc_desc *pdesc, **pdescp; -+ -+ kmutex_lock(&rms_lock); -+ -+ DBG(printk("rms_exit_func :: process %d exiting\n", curproc->p_pid)); -+ -+ /* -+ * find the process that exited and accumulate -+ * resource usage in its parent program -+ */ -+ for (program = programs, pdesc = 0; program && !pdesc; program = program->next) -+ { -+ pdescp = &program->pdescs; -+ while ((pdesc = *pdescp) != NULL) -+ { -+ if (pdesc->pid == curproc->p_pid) -+ { -+ *pdescp = pdesc->next; -+ removeProcDesc(program, pdesc); -+ break; -+ } -+ else -+ pdescp = &pdesc->next; -+ } -+ } -+ kmutex_unlock (&rms_lock); -+} -+ -+#ifndef NO_PTRACK -+ -+static int -+rms_ptrack_callback (void *arg, int phase, struct task_struct *child) -+{ -+ switch (phase) -+ { -+ case PTRACK_PHASE_CLONE: -+ if (rms_fork_callback (current, child)) -+ return PTRACK_DENIED; -+ else -+ return PTRACK_INNHERIT; -+ -+ case PTRACK_PHASE_CLONE_FAIL: -+ DBG(printk("rms_fork_func :: fork failed pid %d child %d\n", current->p_pid, child->p_pid)); -+ rms_exit_callback(child); -+ break; -+ -+ case PTRACK_PHASE_EXIT: -+ rms_exit_callback(current); -+ break; -+ } -+ return PTRACK_FINISHED; -+} -+ -+#endif -+ -+/* -+ * rms_elaninitdone - mark a process as having successfully completed elan initialisation -+ */ -+int rms_elaninitdone(int vp) -+{ -+ int status = ESUCCESS; -+ struct proc_desc *pdesc; -+ -+ DBG(printk("rms_elaninit :: process %d vp %d\n", CURPROC()->p_pid, vp)); -+ -+ kmutex_lock(&rms_lock); -+ if ((pdesc = findProcess(CURPROC()->p_pid)) != NULL) -+ pdesc->vp = vp; -+ else -+ status = ESRCH; -+ kmutex_unlock(&rms_lock); -+ return(status); -+} -+ -+ -+/* -+ * rms_prgelanpids - return the ids of processes that have completed elan initialisation -+ */ -+int rms_prgelanpids(int id, int maxpids, int *vps, pid_t *pids, int *npids) -+{ -+ struct prg_desc *program; -+ struct proc_desc *pdesc; -+ pid_t *pidbuf; -+ int status = ESUCCESS, count = 0, *vpbuf; -+ -+ DBG(printk("rms_elanpids :: process %d id %d\n", CURPROC()->p_pid, id)); -+ -+ kmutex_lock(&rms_lock); -+ -+ if ((program = findProgram(id)) != NULL) -+ { -+ if (program->nprocs > 0) -+ { -+ KMEM_ALLOC(pidbuf, pid_t *, program->nprocs * sizeof(pid_t), TRUE); -+ KMEM_ALLOC(vpbuf, int *, program->nprocs * sizeof(int), TRUE); -+ if (pidbuf && vpbuf) -+ { -+ for (pdesc = program->pdescs; pdesc; pdesc = pdesc->next) -+ if (pdesc->vp >= 0) -+ { -+ pidbuf[count] = pdesc->pid; -+ vpbuf[count] = pdesc->vp; -+ count++; -+ } -+ -+ if (count > 0 && (copyout(pidbuf, pids, sizeof(pid_t) * MIN(count, maxpids)) || -+ copyout(vpbuf, vps, sizeof(int) * MIN(count, maxpids)))) -+ status = EFAULT; -+ -+ KMEM_FREE(pidbuf, program->nprocs * sizeof(pid_t)); -+ KMEM_FREE(vpbuf, program->nprocs * sizeof(int)); -+ } -+ else -+ status = ENOMEM; -+ } -+ -+ if (copyout(&count, npids, sizeof(int))) -+ status = EFAULT; -+ } -+ else -+ status = ESRCH; -+ -+ kmutex_unlock(&rms_lock); -+ -+ return(status); -+ -+} -+ -+int rms_setpset(int psid) -+{ -+ struct prg_desc *program; -+ struct proc_desc *pdesc; -+ int status = ESUCCESS; -+ -+ if (CURUID()) -+ return(EACCES); -+ -+ kmutex_lock(&rms_lock); -+ -+ if ((pdesc = findProcess(CURPROC()->p_pid)) != NULL) -+ { -+ program = pdesc->program; -+ program->psid = psid; -+ } -+ else -+ status = ESRCH; -+ -+ kmutex_unlock(&rms_lock); -+ return(status); -+} -+ -+ -+int rms_getpset(int id, int *psid) -+{ -+ struct prg_desc *program; -+ int status = ESUCCESS; -+ -+ kmutex_lock(&rms_lock); -+ if ((program = findProgram(id)) != NULL) -+ { -+ if (copyout(&program->psid, psid, sizeof(int))) -+ status = EFAULT; -+ } -+ else -+ status = ESRCH; -+ -+ kmutex_unlock(&rms_lock); -+ return(status); -+} -+ -+int -+rms_setelanstats(int id, uint64_t ebytes, uint64_t exfers) -+{ -+ struct prg_desc *program; -+ int status = ESUCCESS; -+ -+ DBG(printk("rms_setelanst :: process %d id %d\n", CURPROC()->p_pid, id)); -+ -+ kmutex_lock(&rms_lock); -+ if ((program = findProgram(id)) != NULL) -+ { -+ if (CURUID() == 0 || CURUID() == program->uid) -+ { -+ program->ebytes = ebytes; -+ program->exfers = exfers; -+ } -+ else -+ status = EACCES; -+ } -+ else -+ status = ESRCH; -+ -+ kmutex_unlock(&rms_lock); -+ return(status); -+} -+ -+int -+rms_modversion(void) -+{ -+ return(RMS_MODVERSION); -+} -+ -+int -+rms_addproc(int id, pid_t pid) -+{ -+ struct prg_desc *program; -+ struct task_struct *task; -+ struct proc_desc *parent; -+ struct proc_desc *pdesc; -+ int status; -+ -+ DBG(printk("rms_addproc :: program %d proc %d pid %d\n", id, CURPROC()->p_pid, pid)); -+ -+ kmutex_lock(&rms_lock); -+ if ((program = findProgram(id)) != NULL) -+ { -+ if (CURUID() == 0 || CURUID() == program->uid) -+ { -+ if (findProcess(pid)) -+ status = ESRCH; -+ else -+ { -+ KMEM_ALLOC(pdesc, struct proc_desc *, sizeof(struct proc_desc), TRUE); -+ -+ if (pdesc == NULL) -+ status = ENOMEM; -+ else -+ { -+ read_lock(&tasklist_lock); -+ -+ if ((task = find_task_by_pid(pid)) == NULL) -+ status = ESRCH; -+ else -+ { -+#ifdef NO_NPTL -+ pid_t ppid = task->p_pptr->pid; -+#else -+ pid_t ppid = task->parent->pid; -+#endif -+ for (parent = program->pdescs; parent; parent = parent->next) -+ if (parent->pid == ppid) -+ break; -+ -+ addProcDesc (pdesc, program, parent, pid); -+ status = ESUCCESS; -+ } -+ -+ read_unlock (&tasklist_lock); -+ -+ if (status != ESUCCESS) -+ KMEM_FREE (pdesc, sizeof (struct proc_desc)); -+ } -+ } -+ } -+ else -+ status = EACCES; -+ } -+ else -+ status = ESRCH; -+ -+ kmutex_unlock(&rms_lock); -+ return(status); -+} -+ -+ -+int -+rms_removeproc(int id, pid_t pid) -+{ -+ struct prg_desc *program; -+ struct proc_desc *pdesc, **pdescp; -+ int status; -+ -+ DBG(printk("rms_removproc :: program %d proc %d pid %d\n", id, CURPROC()->p_pid, pid)); -+ -+ kmutex_lock(&rms_lock); -+ if ((program = findProgram(id)) != NULL) -+ { -+ if (CURUID() == 0 || CURUID() == program->uid) -+ { -+ status = ESRCH; -+ pdescp = &program->pdescs; -+ while ((pdesc = *pdescp) != NULL) -+ { -+ if (pdesc->pid == pid) -+ { -+ -+ *pdescp = pdesc->next; -+ removeProcDesc(program, pdesc); -+ status = ESUCCESS; -+ break; -+ } -+ else -+ pdescp = &pdesc->next; -+ } -+ } -+ else -+ status = EACCES; -+ } -+ else -+ status = ESRCH; -+ -+ kmutex_unlock(&rms_lock); -+ return(status); -+} -+ -+ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -+ -+ -+ -+ -+ -+ -+ -diff -urN clean/drivers/net/qsnet/rms/rms_kern_Linux.c linux-2.6.9/drivers/net/qsnet/rms/rms_kern_Linux.c ---- clean/drivers/net/qsnet/rms/rms_kern_Linux.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/drivers/net/qsnet/rms/rms_kern_Linux.c 2005-09-07 10:35:04.000000000 -0400 -@@ -0,0 +1,489 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "$Id: rms_kern_Linux.c,v 1.25.2.3 2005/09/07 14:35:04 mike Exp $" -+/* $Source: /cvs/master/quadrics/rmsmod/rms_kern_Linux.c,v $*/ -+ -+#include -+#include -+ -+#include -+#include -+#include -+#include -+#ifndef NO_PTRACK -+#include -+#endif -+ -+#include -+#include -+ -+extern int ptrack_enabled; -+ -+MODULE_AUTHOR("Quadrics Ltd"); -+MODULE_DESCRIPTION("RMS support module"); -+MODULE_LICENSE("GPL"); -+ -+#ifndef NO_PTRACK -+module_param(ptrack_enabled, uint, 0); -+#endif -+ -+int rms_debug = 0; -+ -+ctl_table rms_table[] = { -+ { -+ .ctl_name = 1, -+ .procname = "rms_debug", -+ .data = &rms_debug, -+ .maxlen = sizeof(int), -+ .mode = 0644, -+ .child = NULL, -+ .proc_handler = &proc_dointvec, -+ }, -+ {0} -+}; -+ -+ctl_table rms_root_table[] = { -+ { -+ .ctl_name = CTL_DEBUG, -+ .procname = "rms", -+ .data = NULL, -+ .maxlen = 0, -+ .mode = 0555, -+ .child = rms_table, -+ }, -+ {0} -+}; -+ -+static struct ctl_table_header *rms_sysctl_header; -+ -+static int rms_open (struct inode *ino, struct file *fp); -+static int rms_release (struct inode *ino, struct file *fp); -+static int rms_ioctl (struct inode *inode, struct file *fp, unsigned int cmd, unsigned long arg); -+ -+#if defined(CONFIG_PPC64) || defined(CONFIG_SPARC64) || defined(CONFIG_X86_64) -+static int -+rms_ioctl32_cmds[] = -+{ -+ RMSIO_GETPRGID32, -+ RMSIO_GETCAP32 -+}; -+ -+static int rms_ioctl32 (unsigned int fd, unsigned int cmd, -+ unsigned long arg, struct file *file); -+#endif -+ -+static struct file_operations rms_fops = -+{ -+ .owner = THIS_MODULE, -+ .ioctl = rms_ioctl, -+ .open = rms_open, -+ .release = rms_release, -+}; -+ -+struct proc_dir_entry *rms_procfs_programs; -+static struct proc_dir_entry *rms_procfs_root; -+ -+int version_callback(char* page, char** start, off_t off, int count, int* eof, void* data) -+{ -+ return(sprintf(page, "$Id: rms_kern_Linux.c,v 1.25.2.3 2005/09/07 14:35:04 mike Exp $\n")); -+} -+ -+int ptrack_callback(char *page, char **start, off_t off, int count, int *eof, void *data) -+{ -+ if (ptrack_enabled) -+ return(sprintf(page, "enabled\n")); -+ else -+ return(sprintf(page, "disabled\n")); -+} -+ -+ -+static int __init rms_start(void) -+{ -+ struct proc_dir_entry *p; -+ int res; -+ -+ if ((rms_sysctl_header = register_sysctl_table(rms_root_table, 1)) == 0) -+ { -+ printk ("rmsmod: failed to register sysctl table\n"); -+ return (-ENXIO); -+ } -+ -+ if ((rms_procfs_root = proc_mkdir("rms", NULL)) == NULL || -+ (rms_procfs_programs = proc_mkdir("programs", rms_procfs_root)) == NULL || -+ (p = create_proc_entry ("control", S_IRUGO, rms_procfs_root)) == NULL) -+ { -+ unregister_sysctl_table (rms_sysctl_header); -+ printk ("rmsmod: failed to register /proc/rms\n"); -+ return (-ENXIO); -+ } -+ p->proc_fops = &rms_fops; -+ p->owner = THIS_MODULE; -+ p->data = NULL; -+ -+ if ((p = create_proc_entry ("version", S_IRUGO, rms_procfs_root)) != NULL) -+ { -+ p->owner = THIS_MODULE; -+ p->data = NULL; -+ p->read_proc = version_callback; -+ } -+ -+#ifndef NO_PTRACK -+ if ((p = create_proc_entry ("ptrack", S_IRUGO, rms_procfs_root)) != NULL) -+ { -+ p->owner = THIS_MODULE; -+ p->data = NULL; -+ p->read_proc = ptrack_callback; -+ } -+#endif -+ -+ if ((res = rms_init()) != ESUCCESS) -+ { -+#ifndef NO_PTRACK -+ remove_proc_entry ("ptrack", rms_procfs_root); -+#endif -+ remove_proc_entry ("version", rms_procfs_root); -+ remove_proc_entry ("programs", rms_procfs_root); -+ remove_proc_entry ("control", rms_procfs_root); -+ remove_proc_entry ("rms", NULL); -+ unregister_sysctl_table (rms_sysctl_header); -+ return (-res); -+ } -+ -+#if defined(CONFIG_PPC64) || defined(CONFIG_SPARC64) || defined(CONFIG_X86_64) -+ lock_kernel(); -+ { -+ extern int register_ioctl32_conversion(unsigned int cmd, int (*handler)(unsigned int, unsigned int, unsigned long, struct file *)); -+ register int i; -+ for (i = 0; i < sizeof (rms_ioctl32_cmds)/sizeof(rms_ioctl32_cmds[0]); i++) -+ register_ioctl32_conversion (rms_ioctl32_cmds[i], rms_ioctl32); -+ } -+ unlock_kernel(); -+#endif -+ return (0); -+} -+ -+static void __exit rms_exit(void) -+{ -+ rms_fini(); -+ -+#if defined(CONFIG_PPC64) || defined(CONFIG_SPARC64) || defined(CONFIG_X86_64) -+ lock_kernel(); -+ { -+ extern void unregister_ioctl32_conversion(unsigned int cmd); -+ register int i; -+ -+ for (i = 0; i < sizeof (rms_ioctl32_cmds)/sizeof(rms_ioctl32_cmds[0]); i++) -+ unregister_ioctl32_conversion (rms_ioctl32_cmds[i]); -+ } -+ unlock_kernel(); -+#endif -+ -+#ifndef NO_PTRACK -+ remove_proc_entry ("ptrack", rms_procfs_root); -+#endif -+ remove_proc_entry ("version", rms_procfs_root); -+ remove_proc_entry ("programs", rms_procfs_root); -+ remove_proc_entry ("control", rms_procfs_root); -+ remove_proc_entry ("rms", NULL); -+ unregister_sysctl_table(rms_sysctl_header); -+} -+ -+/* Declare the module init and exit functions */ -+module_init(rms_start); -+module_exit(rms_exit); -+ -+static int -+rms_open (struct inode *inode, struct file *fp) -+{ -+ MOD_INC_USE_COUNT; -+ fp->private_data = NULL; -+ -+ return (0); -+} -+ -+static int -+rms_release (struct inode *inode, struct file *fp) -+{ -+ MOD_DEC_USE_COUNT; -+ return (0); -+} -+ -+static int -+rms_ioctl(struct inode *inode, struct file *fp, unsigned int cmd, unsigned long arg) -+{ -+ int res; -+ -+ /* printk ("rmsmod: ioctl %x\n", cmd); */ -+ -+ switch (cmd) -+ { -+/* no corepath support in Linux yet */ -+#if 0 -+ case RMSIO_SETCOREPATH: -+ res = rms_setcorepath((caddr_t)arg); -+ break; -+ -+ case RMSIO_GETCOREPATH: -+ { -+ RMSIO_GETCOREPATH_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (args))) -+ return (-EFAULT); -+ -+ res = rms_getcorepath(args.pid, args.corepath, args.maxlen); -+ break; -+ } -+#endif -+ -+ case RMSIO_PRGCREATE: -+ { -+ RMSIO_PRGCREATE_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (args))) -+ return (-EFAULT); -+ -+ res = rms_prgcreate(args.id, args.uid, args.cpus); -+ break; -+ } -+ -+ case RMSIO_PRGDESTROY: -+ res = rms_prgdestroy(arg); -+ break; -+ -+ case RMSIO_PRGIDS: -+ { -+ RMSIO_PRGIDS_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (args))) -+ return (-EFAULT); -+ -+ res = rms_prgids(args.maxids, args.prgids, args.nprgs); -+ break; -+ } -+ -+ case RMSIO_PRGINFO: -+ { -+ RMSIO_PRGINFO_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (args))) -+ return (-EFAULT); -+ -+ res = rms_prginfo(args.id, args.maxpids, args.pids, args.nprocs); -+ break; -+ } -+ -+ case RMSIO_PRGSIGNAL: -+ { -+ RMSIO_PRGSIGNAL_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (args))) -+ return (-EFAULT); -+ -+ res = rms_prgsignal(args.id, args.signo); -+ break; -+ } -+ -+ case RMSIO_PRGADDCAP: -+ { -+ RMSIO_PRGADDCAP_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (args))) -+ return (-EFAULT); -+ -+ res = rms_prgaddcap(args.id, args.index, args.cap); -+ break; -+ } -+ -+ case RMSIO_SETCAP: -+ { -+ RMSIO_SETCAP_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (args))) -+ return (-EFAULT); -+ -+ res = rms_setcap(args.index, args.ctx); -+ break; -+ } -+ -+ case RMSIO_NCAPS: -+ res = rms_ncaps((int *)arg); -+ break; -+ -+ case RMSIO_GETPRGID: -+ { -+ RMSIO_GETPRGID_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (args))) -+ return (-EFAULT); -+ -+ res = rms_getprgid(args.pid, args.id); -+ break; -+ } -+ -+ case RMSIO_GETMYCAP: -+ res = rms_mycap((int *)arg); -+ break; -+ -+ case RMSIO_GETCAP: -+ { -+ RMSIO_GETCAP_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (args))) -+ return (-EFAULT); -+ -+ res = rms_getcap(args.index, args.cap); -+ break; -+ } -+ -+ case RMSIO_PRGGETSTATS: -+ { -+ /* no longer supported */ -+ res = EINVAL; -+ break; -+ } -+ -+ case RMSIO_PRGGETSTATS2: -+ { -+ RMSIO_PRGGETSTATS2_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (args))) -+ return (-EFAULT); -+ -+ res = rms_prggetstats(args.id, args.stats); -+ break; -+ } -+ -+ case RMSIO_PRGSUSPEND: -+ res = rms_prgsuspend(arg); -+ break; -+ -+ case RMSIO_PRGRESUME: -+ res = rms_prgresume(arg); -+ break; -+ -+ case RMSIO_ELANINITDONE: -+ res = rms_elaninitdone(arg); -+ break; -+ -+ case RMSIO_PRGELANPIDS: -+ { -+ RMSIO_PRGELANPIDS_STRUCT args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (args))) -+ return (-EFAULT); -+ -+ res = rms_prgelanpids(args.id, args.maxpids, args.vps, args.pids, args.npids); -+ break; -+ } -+ -+ case RMSIO_SETELANSTATS: -+ { -+ RMSIO_SETELANSTATS_STRUCT args; -+ elanstats_t estats; -+ -+ if (copy_from_user(&args, (void *)arg, sizeof(args)) || -+ copy_from_user(&estats, (void *)args.estats, sizeof(estats))) -+ return(-EFAULT); -+ -+ res = rms_setelanstats(args.id, estats.ebytes, estats.exfers); -+ break; -+ } -+ -+ case RMSIO_MODVERSION: -+ { -+ RMSIO_MODVERSION_STRUCT args; -+ int version = rms_modversion(); -+ -+ if (copy_from_user (&args, (void *)arg, sizeof (args))) -+ return (-EFAULT); -+ -+ if (copyout(&version, args.version, sizeof(int))) -+ res = EFAULT; -+ else -+ res = ESUCCESS; -+ -+ break; -+ } -+ -+ /* -+ * Patch free kernel support, proc entries added manually -+ */ -+ case RMSIO_ADDPROC: -+ { -+ RMSIO_PROC_STRUCT args; -+ -+ if (copy_from_user (&args, (void *)arg, sizeof (args))) -+ return (-EFAULT); -+ -+ res = rms_addproc(args.id, args.pid); -+ break; -+ } -+ case RMSIO_REMOVEPROC: -+ { -+ RMSIO_PROC_STRUCT args; -+ -+ if (copy_from_user (&args, (void *)arg, sizeof (args))) -+ return (-EFAULT); -+ -+ res = rms_removeproc(args.id, args.pid); -+ break; -+ } -+ -+ default: -+ res = EINVAL; -+ break; -+ } -+ -+ /* printk ("rmsmod: ioctl %x res %d\n", cmd, res); */ -+ -+ return ((res == 0) ? 0 : -res); -+} -+ -+#if defined(CONFIG_PPC64) || defined(CONFIG_SPARC64) || defined(CONFIG_X86_64) -+static int -+rms_ioctl32 (unsigned int fd, unsigned int cmd, unsigned long arg, struct file *file) -+{ -+ int res; -+ -+ switch (cmd) -+ { -+ case RMSIO_GETPRGID32: -+ { -+ RMSIO_GETPRGID_STRUCT32 args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (args))) -+ return (-EFAULT); -+ -+ res = rms_getprgid(args.pid, (int *)(unsigned long) args.idptr); -+ break; -+ } -+ -+ case RMSIO_GETCAP32: -+ { -+ RMSIO_GETCAP_STRUCT32 args; -+ -+ if (copy_from_user (&args, (void *) arg, sizeof (args))) -+ return (-EFAULT); -+ -+ res = rms_getcap(args.index, (ELAN_CAPABILITY *)(unsigned long) args.capptr); -+ break; -+ } -+ -+ default: -+ return (sys_ioctl (fd, cmd, arg)); -+ } -+ -+ return ((res == 0) ? 0 : -res); -+} -+#endif -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/drivers/net/Kconfig linux-2.6.9/drivers/net/Kconfig ---- clean/drivers/net/Kconfig 2005-10-10 18:50:31.000000000 -0400 -+++ linux-2.6.9/drivers/net/Kconfig 2005-10-10 18:50:34.000000000 -0400 -@@ -2271,6 +2271,8 @@ - - source "drivers/net/tokenring/Kconfig" - -+source "drivers/net/qsnet/Kconfig" -+ - source "drivers/net/wireless/Kconfig" - - source "drivers/net/pcmcia/Kconfig" ---- clean/drivers/net/Makefile 2005-10-10 18:59:11.000000000 -0400 -+++ linux-2.6.9/drivers/net/Makefile 2005-10-10 18:59:28.000000000 -0400 -@@ -197,3 +197,4 @@ - - obj-$(CONFIG_NETCONSOLE) += netconsole.o - obj-$(CONFIG_NETDUMP) += netdump.o -+obj-$(CONFIG_QSNET) += qsnet/ -diff -urN clean/fs/exec.c linux-2.6.9/fs/exec.c ---- clean/fs/exec.c 2005-10-10 17:43:57.000000000 -0400 -+++ linux-2.6.9/fs/exec.c 2005-10-10 17:47:17.000000000 -0400 -@@ -54,6 +54,8 @@ - #include - #endif - -+#include -+ - int core_uses_pid; - char core_pattern[65] = "core"; - int suid_dumpable = 0; -@@ -1175,6 +1177,9 @@ - if (retval < 0) - goto out; - -+ /* notify any ptrack callbacks of the process exec */ -+ ptrack_call_callbacks(PTRACK_PHASE_EXEC, NULL); -+ - retval = search_binary_handler(bprm,regs); - if (retval >= 0) { - free_arg_pages(bprm); -diff -urN clean/fs/open.c linux-2.6.9/fs/open.c ---- clean/fs/open.c 2005-10-10 17:43:57.000000000 -0400 -+++ linux-2.6.9/fs/open.c 2005-10-10 17:47:17.000000000 -0400 -@@ -1029,6 +1029,8 @@ - goto out; - } - -+EXPORT_SYMBOL(sys_open); -+ - #ifndef __alpha__ - - /* -diff -urN clean/fs/read_write.c linux-2.6.9/fs/read_write.c ---- clean/fs/read_write.c 2005-05-13 13:39:11.000000000 -0400 -+++ linux-2.6.9/fs/read_write.c 2005-10-10 17:47:17.000000000 -0400 -@@ -145,6 +145,7 @@ - bad: - return retval; - } -+EXPORT_SYMBOL(sys_lseek); - - #ifdef __ARCH_WANT_SYS_LLSEEK - asmlinkage long sys_llseek(unsigned int fd, unsigned long offset_high, -diff -urN clean/fs/select.c linux-2.6.9/fs/select.c ---- clean/fs/select.c 2005-05-13 13:39:11.000000000 -0400 -+++ linux-2.6.9/fs/select.c 2005-10-10 17:47:17.000000000 -0400 -@@ -529,3 +529,4 @@ - poll_freewait(&table); - return err; - } -+EXPORT_SYMBOL_GPL(sys_poll); -diff -urN clean/include/elan/bitmap.h linux-2.6.9/include/elan/bitmap.h ---- clean/include/elan/bitmap.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan/bitmap.h 2004-01-20 12:32:15.000000000 -0500 -@@ -0,0 +1,74 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __QSNET_BITMAP_H -+#define __QSNET_BITMAP_H -+ -+#ident "$Id: bitmap.h,v 1.5 2004/01/20 17:32:15 david Exp $" -+/* $Source: /cvs/master/quadrics/elanmod/modsrc/bitmap.h,v $ */ -+ -+typedef unsigned int bitmap_t; -+ -+#define BT_NBIPUL 32 /* n bits per bitmap_t */ -+#define BT_ULSHIFT 5 /* log 2 BT_NBIPUL to extract word index */ -+#define BT_ULMASK 0x1f /* to extract bit index */ -+ -+#define BT_WIM(bitmap,bitindex) ((bitmap)[(bitindex) >> BT_ULSHIFT]) /* word in map */ -+#define BT_BIW(bitindex) (1 << ((bitindex) & BT_ULMASK)) /* bit in word */ -+ -+/* BT_BITOUL -- n bits to n words */ -+#define BT_BITOUL(nbits) (((nbits) + BT_NBIPUL -1) / BT_NBIPUL) -+ -+#define BT_TEST(bitmap,bitindex) ((BT_WIM((bitmap), (bitindex)) & BT_BIW(bitindex)) ? 1 : 0) -+#define BT_SET(bitmap,bitindex) do { BT_WIM((bitmap), (bitindex)) |= BT_BIW(bitindex); } while (0) -+#define BT_CLEAR(bitmap,bitindex) do { BT_WIM((bitmap), (bitindex)) &= ~BT_BIW(bitindex); } while (0) -+ -+/* return first free bit in the bitmap, or -1 for failure */ -+extern int bt_freebit (bitmap_t *bitmap, int nbits); -+ -+/* return the index of the lowest set bit in the bitmap or -1 for failure */ -+extern int bt_lowbit (bitmap_t *bitmap, int nbits); -+ -+/* return the index of the next set/clear bit in the bitmap or -1 for failure */ -+extern int bt_nextbit (bitmap_t *bitmap, int nbits, int last, int isset); -+ -+/* copy/zero/fill/compare a bit map */ -+extern void bt_copy (bitmap_t *a, bitmap_t *b, int nbits); -+extern void bt_zero (bitmap_t *a, int nbits); -+extern void bt_fill (bitmap_t *a, int nbits); -+extern int bt_cmp (bitmap_t *a, bitmap_t *b, int nbits); -+ -+/* intersect bitmap 'a' with bitmap 'b' and return in 'a' */ -+extern void bt_intersect (bitmap_t *a, bitmap_t *b, int nbits); -+ -+/* remove/add bitmap 'b' from bitmap 'a' */ -+extern void bt_remove (bitmap_t *a, bitmap_t *b, int nbits); -+extern void bt_add (bitmap_t *a, bitmap_t *b, int nbits); -+ -+/* check whether bitmap 'a' spans bitmap 'b' */ -+extern int bt_spans (bitmap_t *a, bitmap_t *b, int nbits); -+ -+/* copy [base,base+nbits-1] from 'a' to 'b' */ -+extern void bt_subset (bitmap_t *a, bitmap_t *b, int base, int nbits); -+ -+/* find bits clear in 'a' and set in 'b', put result in 'c' */ -+extern void bt_up (bitmap_t *a, bitmap_t *b, bitmap_t *c, int nbits); -+ -+/* find bits set in 'a' and clear in 'b', put result in 'c' */ -+extern void bt_down (bitmap_t *a, bitmap_t *b, bitmap_t *c, int nbits); -+ -+/* return number of bits set in bitmap */ -+extern int bt_nbits (bitmap_t *a, int nbits); -+ -+ -+#endif /* __QSNET_BITMAP_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "linux" -+ * End: -+ */ -diff -urN clean/include/elan/capability.h linux-2.6.9/include/elan/capability.h ---- clean/include/elan/capability.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan/capability.h 2005-05-17 05:52:53.000000000 -0400 -@@ -0,0 +1,198 @@ -+/* -+ * Copyright (c) 2003 by Quadrics Limited. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: capability.h,v 1.18 2005/05/17 09:52:53 addy Exp $" -+/* $Source: /cvs/master/quadrics/elanmod/modsrc/capability.h,v $*/ -+ -+#ifndef __ELAN_CAPABILITY_H -+#define __ELAN_CAPABILITY_H -+ -+#include -+ -+/* Maximum number of rails */ -+#define ELAN_MAX_RAILS (31) -+/* Maximum number of virtual processes we support */ -+#define ELAN_MAX_VPS (16384) -+ -+/* Number of words in a bitmap capability */ -+#define ELAN_BITMAPSIZE BT_BITOUL(ELAN_MAX_VPS) -+ -+/* Guaranteed invalid values */ -+#define ELAN_INVALID_PROCESS (0x7fffffff) /* A GUARANTEED invalid process # */ -+#define ELAN_INVALID_NODE (0xFFFF) -+#define ELAN_INVALID_CONTEXT (0xFFFF) -+ -+/* Number of values in a user key */ -+#define ELAN_USERKEY_ENTRIES 4 -+ -+typedef void * ELAN_CAP_OWNER; -+ -+/* -+ * When used in userspace this is relative to the base of -+ * the capabality but is an absolute location for kernel space. -+ */ -+typedef struct elan_location -+{ -+ unsigned short loc_node; -+ unsigned short loc_context; -+} ELAN_LOCATION; -+ -+typedef struct elan_userkey -+{ -+ unsigned key_values[ELAN_USERKEY_ENTRIES]; -+} ELAN_USERKEY; -+ -+typedef struct elan_capability -+{ -+ ELAN_USERKEY cap_userkey; /* User defined protection */ -+ -+ int cap_version; /* Version number */ -+ unsigned short cap_type; /* Capability Type */ -+ unsigned short cap_spare; /* spare was cap_elan_type */ -+ -+ int cap_lowcontext; /* low context number in block */ -+ int cap_highcontext; /* high context number in block */ -+ int cap_mycontext; /* my context number */ -+ -+ int cap_lownode; /* low elan id of group */ -+ int cap_highnode; /* high elan id of group */ -+ -+ unsigned int cap_railmask; /* which rails this capability is valid for */ -+ -+ bitmap_t cap_bitmap[ELAN_BITMAPSIZE]; /* Bitmap of process to processor translation */ -+} ELAN_CAPABILITY; -+ -+#define ELAN_CAP_UNINITIALISED (-1) -+ -+#define ELAN_CAP_VERSION_NUMBER (0x00010002) -+ -+#define ELAN_CAP_NUM_NODES(cap) ((cap)->cap_highnode - (cap)->cap_lownode + 1) -+#define ELAN_CAP_NUM_CONTEXTS(cap) ((cap)->cap_highcontext - (cap)->cap_lowcontext + 1) -+ -+/* using or defining our own MIN/MAX had confilicts with dunix so we define ELAN_ ones */ -+#define ELAN_MIN(a,b) ((a) > (b) ? (b) : (a)) -+#define ELAN_MAX(a,b) ((a) > (b) ? (a) : (b)) -+#define ELAN_CAP_BITMAPSIZE(cap) (ELAN_MAX (ELAN_MIN (ELAN_CAP_NUM_NODES(cap) * ELAN_CAP_NUM_CONTEXTS(cap), ELAN_MAX_VPS), 0)) -+ -+#define ELAN_CAP_SIZE(cap) (offsetof (ELAN_CAPABILITY, cap_bitmap[BT_BITOUL(ELAN_CAP_BITMAPSIZE(cap))])) -+#define ELAN_CAP_ENTRIES(cap) (((cap)->cap_type & ELAN_CAP_TYPE_NO_BITMAP) ? ELAN_CAP_BITMAPSIZE((cap)) : bt_nbits((cap)->cap_bitmap, ELAN_CAP_BITMAPSIZE((cap)))) -+ -+#define ELAN_CAP_IS_RAIL_SET(cap,rail) ((cap)->cap_railmask & (1<cap_userkey.key_values[0] == (cap2)->cap_userkey.key_values[0] && \ -+ (cap1)->cap_userkey.key_values[1] == (cap2)->cap_userkey.key_values[1] && \ -+ (cap1)->cap_userkey.key_values[2] == (cap2)->cap_userkey.key_values[2] && \ -+ (cap1)->cap_userkey.key_values[3] == (cap2)->cap_userkey.key_values[3]) -+ -+#define ELAN_CAP_TYPE_MATCH(cap1,cap2) ((cap1)->cap_version == (cap2)->cap_version && \ -+ (cap1)->cap_type == (cap2)->cap_type) -+ -+#define ELAN_CAP_GEOM_MATCH(cap1,cap2) ((cap1)->cap_lowcontext == (cap2)->cap_lowcontext && \ -+ (cap1)->cap_highcontext == (cap2)->cap_highcontext && \ -+ (cap1)->cap_lownode == (cap2)->cap_lownode && \ -+ (cap1)->cap_highnode == (cap2)->cap_highnode && \ -+ (cap1)->cap_railmask == (cap2)->cap_railmask && \ -+ !bcmp (&(cap1)->cap_bitmap[0], &(cap2)->cap_bitmap[0], \ -+ BT_BITOUL(ELAN_CAP_BITMAPSIZE(cap1)*sizeof(bitmap_t)))) -+ -+#define ELAN_CAP_MATCH(cap1,cap2) (ELAN_CAP_KEY_MATCH (cap1, cap2) && \ -+ ELAN_CAP_TYPE_MATCH (cap1, cap2) && \ -+ ELAN_CAP_GEOM_MATCH (cap1, cap2)) -+ -+#define ELAN_CAP_VALID_MYCONTEXT(cap) ( ((cap)->cap_lowcontext != ELAN_CAP_UNINITIALISED) \ -+ && ((cap)->cap_mycontext != ELAN_CAP_UNINITIALISED) \ -+ && ((cap)->cap_highcontext != ELAN_CAP_UNINITIALISED) \ -+ && ((cap)->cap_lowcontext <= (cap)->cap_mycontext) \ -+ && ((cap)->cap_mycontext <= (cap)->cap_highcontext)) -+ -+/* -+ * Definitions for type -+ */ -+#define ELAN_CAP_TYPE_BLOCK 1 /* Block distribution */ -+#define ELAN_CAP_TYPE_CYCLIC 2 /* Cyclic distribution */ -+#define ELAN_CAP_TYPE_KERNEL 3 /* Kernel capability */ -+ -+#define ELAN_CAP_TYPE_MASK (0xFFF) /* Mask for type */ -+ -+/* OR these bits in for extra features */ -+#define ELAN_CAP_TYPE_HWTEST (1 << 12) /* Hardware test capability type */ -+#define ELAN_CAP_TYPE_MULTI_RAIL (1 << 13) /* "new" multi rail capability */ -+#define ELAN_CAP_TYPE_NO_BITMAP (1 << 14) /* don't use bit map */ -+#define ELAN_CAP_TYPE_BROADCASTABLE (1 << 15) /* broadcastable */ -+ -+ -+extern void elan_nullcap (ELAN_CAPABILITY *cap); -+extern char *elan_capability_string (ELAN_CAPABILITY *cap, char *str); -+extern ELAN_LOCATION elan_vp2location (unsigned process, ELAN_CAPABILITY *cap); -+extern int elan_location2vp (ELAN_LOCATION location, ELAN_CAPABILITY *cap); -+extern int elan_nvps (ELAN_CAPABILITY *cap); -+extern int elan_nlocal (int node, ELAN_CAPABILITY *cap); -+extern int elan_maxlocal (ELAN_CAPABILITY *cap); -+extern int elan_localvps (int node, ELAN_CAPABILITY *cap, int *vps, int size); -+extern int elan_nrails (ELAN_CAPABILITY *cap); -+extern int elan_rails (ELAN_CAPABILITY *cap, int *rails); -+extern int elan_cap_overlap (ELAN_CAPABILITY *cap1, ELAN_CAPABILITY *cap2); -+ -+/* -+ * capability creation/access fns provide for running -+ * new libelan code on old OS releases -+ */ -+extern int elan_lowcontext(ELAN_CAPABILITY *cap); -+extern int elan_mycontext(ELAN_CAPABILITY *cap); -+extern int elan_highcontext(ELAN_CAPABILITY *cap); -+extern int elan_lownode(ELAN_CAPABILITY *cap); -+extern int elan_highnode(ELAN_CAPABILITY *cap); -+extern int elan_captype(ELAN_CAPABILITY *cap); -+extern int elan_railmask(ELAN_CAPABILITY *cap); -+ -+extern int elan_getenvCap (ELAN_CAPABILITY *cap, int index); -+extern ELAN_CAPABILITY *elan_createCapability(void); -+extern ELAN_CAPABILITY *elan_copyCapability(ELAN_CAPABILITY *from, int ctxShift); -+extern int elan_generateCapability(char *string); -+extern int elan_getMachinesCap (char *filename, ELAN_CAPABILITY *cap); -+ -+typedef struct elan_cap_struct -+{ -+ ELAN_CAP_OWNER owner; -+ ELAN_CAPABILITY cap; -+ -+ int attached; /* count of people attached */ -+ unsigned int active; /* ie not being destroyed */ -+} ELAN_CAP_STRUCT; -+ -+#if ! defined(__KERNEL__) -+extern void elan_get_random_key(ELAN_USERKEY *key); -+extern int elan_prefrails(ELAN_CAPABILITY *cap, int *pref, int nvp); -+#endif -+ -+#if defined(__KERNEL__) -+/* capability.c */ -+extern int elan_validate_cap (ELAN_CAPABILITY *cap); -+extern int elan_validate_map (ELAN_CAPABILITY *cap, ELAN_CAPABILITY *map); -+ -+extern int elan_create_cap (ELAN_CAP_OWNER owner, ELAN_CAPABILITY *cap); -+extern int elan_destroy_cap (ELAN_CAP_OWNER owner, ELAN_CAPABILITY *cap); -+extern int elan_create_vp (ELAN_CAP_OWNER owner, ELAN_CAPABILITY *cap, ELAN_CAPABILITY *map); -+extern int elan_destroy_vp (ELAN_CAP_OWNER owner, ELAN_CAPABILITY *cap, ELAN_CAPABILITY *map); -+ -+typedef void (*ELAN_DESTROY_CB)(void *args, ELAN_CAPABILITY *cap, ELAN_CAPABILITY *map); -+ -+extern int elan_attach_cap (ELAN_CAPABILITY *cap, unsigned int rail, void *args, ELAN_DESTROY_CB callback); -+extern int elan_detach_cap (ELAN_CAPABILITY *cap, unsigned int rail); -+ -+extern int elan_get_caps (uint *number_of_results, uint array_size, ELAN_CAP_STRUCT *caps); -+extern int elan_cap_dump (void); -+#endif /* __KERNEL__ */ -+ -+ -+#endif /* __ELAN_CAPABILITY_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "linux" -+ * End: -+ */ -diff -urN clean/include/elan/cm.h linux-2.6.9/include/elan/cm.h ---- clean/include/elan/cm.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan/cm.h 2005-03-30 09:06:34.000000000 -0500 -@@ -0,0 +1,396 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __ELAN_CM_H -+#define __ELAN_CM_H -+ -+#ident "@(#)$Id: cm.h,v 1.16 2005/03/30 14:06:34 mike Exp $" -+/* $Source: /cvs/master/quadrics/epmod/cm.h,v $*/ -+ -+#include -+ -+#if defined(DIGITAL_UNIX) -+/* -+ * On Tru64 - SMP doesn't mean Symmetric - cpu 0 is a master cpu and is responsible -+ * for handling all PCI interrupts and "funneled" operations. When a kernel thread -+ * is made runnable, the scheduler will choose which cpu it will run on at that time, -+ * and will only execute a higher priority thread from another cpu's run queue when -+ * it becomes totally idle (apparently also including user processes). Also the -+ * assert_wait_mesg_timo function uses a per-cpu timeout - these can only get executed -+ * at "preemptable" places - so again have no guarantee on when they will execute if -+ * they happen to be queued on a "hogged" cpu. The combination of these mean that the Tru64 -+ * is incapable of scheduling a high priority kernel thread within a deterministic time -+ * of when it should have become runnable - wonderfull. -+ * -+ * Hence the solution Compaq have proposed it to schedule a timeout onto all of the -+ * cpu's timeouts lists at the maximum frequency that we could want to execute code, -+ * then to handle the scheduling of work between these ourselves. With a bit of luck -+ * ..... at least one cpu will be sufficiently unloaded to allow us to get a chance -+ * to do our important work. -+ * -+ * However ..... this still is not reliable, since timeouts under Tru64 are still -+ * only run when the currently running kernel thread "co-operates" by calling one -+ * of a number of functions which is permitted to run the "lwc"s AND is not holding -+ * any spinlocks AND is running ai IPL 0. However Compaq are unable to provide -+ * any upper limit on the time between the "lwc"'s being run and so it is possible -+ * for all 4 cpus to not run them for an unbounded time. -+ * -+ * The solution proposed is to use the RM_TEMP_BACKDOOR hook which was added to -+ * hardclock() to "solve" this problem for Memory Channel. However, since it -+ * is called within the clock interrupt it is not permissible to aquire any -+ * spinlocks, nor to run for "too long". This means that it is not possible to -+ * call the heartbeat algorithm from this hook. -+ * -+ * Our solution to these limitations is to use the hook to cause an elan interrupt -+ * to be delivered, by issueing a mis-aligned SetEvent command - this causes the device -+ * to trap and ep_cprocTrap() can then run the heartbeat code. However there is a lock -+ * order violation between the elan_dev::IntrLock and ep_dev::Lock, so we have to -+ * use a trylock and if we fail, then hope that when the interrupt is delievered again -+ * some time later we will succeed. -+ * -+ * However this only works if the kernel is able to respond to the Elan interrupt, -+ * so we panic inside the RM_TEMP_BACKDOOR hook if the SetEvent's interrupt has -+ * not been taken for more than an CM_TIMER_SCHEDULE_TIMEOUT interval. -+ * -+ * In fact this is exactly the mechanism that other operating systems use to -+ * execute timeouts, since the hardclock interrupt posts a low priority -+ * "soft interrupt" which "pre-eempts" the currently running thread and then -+ * executes the timeouts.To block timeouts you use splsoftclock() the same as -+ * in Tru64. -+ */ -+#define PER_CPU_TIMEOUT TRUE -+#endif -+ -+ -+#define CM_SGMTS_PER_LEVEL 8 /* maximum nodes in each segment */ -+#define CM_MAX_LEVELS 6 /* maximum depth of tree */ -+ -+/* message buffers/dmas/events etc */ -+#define CM_NUM_NODE_MSG_BUFFERS (CM_MAX_LEVELS * CM_SGMTS_PER_LEVEL) /* subordinates and leader */ -+#define CM_NUM_SPARE_MSG_BUFFERS 8 /* spare msg buffers for non-connected nodes */ -+#define CM_NUM_MSG_BUFFERS (CM_NUM_NODE_MSG_BUFFERS + CM_NUM_SPARE_MSG_BUFFERS) -+ -+#define CM_INPUTQ_ENTRIES 128 /* # entries in input queue */ -+ -+#define CM_PERIODIC_DISCOVER_INTERVAL (5000) /* 5s (infrequent resolution of established leader conflicts) */ -+#define CM_URGENT_DISCOVER_INTERVAL (50) /* 0.05s (more frequently than heartbeats 'cause they don't retry) */ -+#define CM_HEARTBEAT_INTERVAL (125) /* 0.125s */ -+#define CM_TIMER_SCHEDULE_TIMEOUT (4000) /* 4s Maximum time before a timer that's secheduled to run gets to run (eg blocked in interrupt handlers etc) */ -+#define CM_THREAD_SCHEDULE_TIMEOUT (30000) /* 30s Maximum time before a thread that's scheduled to run gets to run */ -+#define CM_THREAD_RUNNING_TIMEOUT (30000) /* 30s Don't expect the manager thread to be running longer than this */ -+ -+#ifdef PER_CPU_TIMEOUT -+#define CM_PERCPU_TIMEOUT_INTERVAL (50) /* 0.05s (must be less than all above intervals) */ -+#define CM_PACEMAKER_INTERVAL (500) /* 0.05s */ -+ -+#define CM_HEARTBEAT_OVERDUE (250) /* 0.25s Maximum time a timeout can be overdue before taking extreme action */ -+#endif -+ -+#define CM_P2P_DMA_RETRIES 31 -+ -+/* We expect at least 1 point-to-point message in CM_P2P_MSG_RETRIES -+ * attempts to send one to be successfully received */ -+#define CM_P2P_MSG_RETRIES 8 -+ -+/* We expect at least 1 broadcast message in CM_BCAST_MSG_RETRIES attempts -+ * to send one to be successfully received. */ -+#define CM_BCAST_MSG_RETRIES 40 -+ -+/* Heartbeat timeout allows for a node stalling and still getting its -+ * heartbeat. The 2 is to allow for unsynchronised polling times. */ -+#define CM_HEARTBEAT_TIMEOUT (CM_TIMER_SCHEDULE_TIMEOUT + (2 + CM_P2P_MSG_RETRIES) * CM_HEARTBEAT_INTERVAL) -+ -+/* Discover timeout must be > CM_HEARTBEAT_TIMEOUT to guarantee that people -+ * who don't see discovery are considered dead by their leader. This -+ * ensures that by the time a node "discovers" it is a leader of a segment, -+ * the previous leader of that segment will have been deemed to be dead by -+ * its the parent segment's leader */ -+#define CM_DISCOVER_TIMEOUT (CM_TIMER_SCHEDULE_TIMEOUT + (2 + CM_BCAST_MSG_RETRIES) * CM_URGENT_DISCOVER_INTERVAL) -+ -+#define CM_WAITING_TIMEOUT (CM_DISCOVER_TIMEOUT * 100) -+ -+/* -+ * Convert all timeouts specified in mS into "ticks" -+ */ -+#define MSEC2TICKS(MSEC) (((MSEC)*HZ)/1000) -+ -+ -+/* statemap entry */ -+typedef struct cm_state_entry -+{ -+ int16_t level; /* cluster level to apply to */ -+ int16_t offset; /* from statemap_findchange() */ -+ uint16_t seg[BT_NBIPUL/16]; /* ditto */ -+} CM_STATEMAP_ENTRY; -+ -+/* offset is >= 0 for a change to apply and */ -+#define STATEMAP_NOMORECHANGES (-1) /* end of a set of updates */ -+#define STATEMAP_RESET (-2) /* reset the target map */ -+#define STATEMAP_NOOP (-3) /* null token */ -+ -+/* CM message format */ -+typedef int8_t CM_SEQ; /* heartbeat sequence numbers; at least 2 bits, signed */ -+ -+/* -+ * The message header is received into the last 64 byte block of -+ * the input queue and the Version *MUST* be the last word of the -+ * block to ensure that we can see that the whole of the message -+ * has reached main memory after we've seen the input queue pointer -+ * have been updated. -+ */ -+typedef struct ep_cm_hdr -+{ -+ uint32_t Pad0; -+ uint32_t Pad1; -+ -+ uint8_t Type; -+ uint8_t Level; -+ CM_SEQ Seq; /* precision at least 2 bits each*/ -+ CM_SEQ AckSeq; -+ -+ uint16_t NumMaps; -+ uint16_t MachineId; -+ -+ uint16_t NodeId; -+ uint16_t Checksum; -+ -+ uint32_t Timestamp; -+ uint32_t ParamHash; -+ uint32_t Version; -+} CM_HDR; -+ -+#define CM_HDR_SIZE sizeof (CM_HDR) -+ -+typedef struct cm_msg -+{ -+ union { -+ CM_STATEMAP_ENTRY Statemaps[1]; /* piggy-backed statemap updates start here */ -+ uint8_t Space[EP_SYSTEMQ_MSG_MAX - CM_HDR_SIZE]; -+ } Payload; -+ -+ CM_HDR Hdr; -+} CM_MSG; -+ -+/* The maximum number of statemap entries that can fit within an EP_CM_MSG_BUFFER */ -+#define CM_MSG_MAXMAPS (offsetof (CM_MSG, Hdr) / sizeof (CM_STATEMAP_ENTRY)) -+#define CM_MSG_MAP(mapno) (CM_MSG_MAXMAPS - (mapno) - 1) -+ -+/* The actual special message base & size, including 'nmaps' piggy-backed statemap entries */ -+#define CM_MSG_BASE(nmaps) (nmaps == 0 ? offsetof (CM_MSG, Hdr) : offsetof (CM_MSG, Payload.Statemaps[CM_MSG_MAXMAPS - nmaps])) -+#define CM_MSG_SIZE(nmaps) (sizeof (CM_MSG) - CM_MSG_BASE(nmaps)) -+ -+#define CM_MSG_VERSION 0xcad00005 -+#define CM_MSG_TYPE_RESOLVE_LEADER 0 -+#define CM_MSG_TYPE_DISCOVER_LEADER 1 -+#define CM_MSG_TYPE_NOTIFY 2 -+#define CM_MSG_TYPE_DISCOVER_SUBORDINATE 3 -+#define CM_MSG_TYPE_IMCOMING 4 -+#define CM_MSG_TYPE_HEARTBEAT 5 -+#define CM_MSG_TYPE_REJOIN 6 -+ -+/* CM machine segment */ -+typedef struct cm_sgmtMaps -+{ -+ u_char InputMapValid; /* Input map has been set */ -+ u_char OutputMapValid; /* Output map has been set */ -+ u_char SentChanges; /* got an outstanding STATEMAP_NOMORECHANGES to send */ -+ statemap_t *OutputMap; /* state to send */ -+ statemap_t *InputMap; /* state received */ -+ statemap_t *CurrentInputMap; /* state being received */ -+} CM_SGMTMAPS; -+ -+typedef struct cm_sgmt -+{ -+ u_char State; -+ u_char SendMaps; -+ u_char MsgAcked; -+ CM_SEQ MsgSeq; -+ CM_SEQ AckSeq; -+ u_int NodeId; -+ long UpdateTick; -+ long WaitingTick; -+ uint32_t Timestamp; -+ CM_SGMTMAPS Maps[CM_MAX_LEVELS]; /* Maps[i] == state for cluster level i */ -+ u_short MsgNumber; /* msg buffer to use */ -+ u_short NumMaps; /* # maps in message buffer */ -+ u_short Level; -+ u_short Sgmt; -+} CM_SGMT; -+ -+#define CM_SGMT_ABSENT 0 /* no one there at all */ -+#define CM_SGMT_WAITING 1 /* waiting for subtree to connect */ -+#define CM_SGMT_COMING 2 /* expecting a subtree to reconnect */ -+#define CM_SGMT_PRESENT 3 /* connected */ -+ -+typedef struct cm_level -+{ -+ int SwitchLevel; -+ u_int MinNodeId; -+ u_int NumNodes; -+ u_int NumSegs; -+ u_int MySgmt; -+ -+ /* SubordinateMap[i] == OR of all subordinate maps on this level and down for cluster level i */ -+ u_char SubordinateMapValid[CM_MAX_LEVELS]; -+ statemap_t *SubordinateMap[CM_MAX_LEVELS]; -+ -+ /* maps/flags for this cluster level */ -+ u_int Online:1; /* I've gone online (seen myself running) */ -+ u_int Restarting:1; /* driving my owm restart bit */ -+ u_char OfflineReasons; /* forced offline by broadcast */ -+ -+ u_char GlobalMapValid; -+ u_char SubTreeMapValid; -+ u_long Connected; -+ -+ statemap_t *LocalMap; /* state bits I drive */ -+ statemap_t *SubTreeMap; /* OR of my and my subtree states */ -+ statemap_t *GlobalMap; /* OR of all node states */ -+ statemap_t *LastGlobalMap; /* last map I saw */ -+ statemap_t *TmpMap; /* scratchpad */ -+ -+ CM_SGMT Sgmts[CM_SGMTS_PER_LEVEL]; -+} CM_LEVEL; -+ -+#define CM_ROLE_LEADER_CANDIDATE 0 -+#define CM_ROLE_LEADER 1 -+#define CM_ROLE_SUBORDINATE 2 -+ -+/* global status bits */ -+#define CM_GSTATUS_STATUS_MASK 0x03 /* bits nodes drive to broadcast their status */ -+#define CM_GSTATUS_ABSENT 0x00 /* Off the network */ -+#define CM_GSTATUS_STARTING 0x01 /* I'm waiting for everyone to see me online */ -+#define CM_GSTATUS_RUNNING 0x03 /* up and running */ -+#define CM_GSTATUS_CLOSING 0x02 /* I'm waiting for everyone to see me offline */ -+ -+#define CM_GSTATUS_ACK_MASK 0x0c /* bits node drive to ack other status */ -+#define CM_GSTATUS_MAY_START 0x04 /* Everyone thinks I may not start */ -+#define CM_GSTATUS_MAY_RUN 0x08 /* Everyone thinks I may not run */ -+ -+#define CM_GSTATUS_RESTART 0x10 /* Someone thinks I should restart */ -+#define CM_GSTATUS_BITS 5 -+ -+#define CM_GSTATUS_BASE(node) ((node) * CM_GSTATUS_BITS) -+ -+#if defined(PER_CPU_TIMEOUT) -+typedef struct cm_timeout_data -+{ -+ long ScheduledAt; /* lbolt timeout was scheduled to run at */ -+ -+ unsigned long EarlyCount; /* # times run early than NextRun */ -+ unsigned long MissedCount; /* # times run on time - but someone else was running it */ -+ unsigned long WastedCount; /* # times we failed to get the spinlock */ -+ unsigned long WorkCount; /* # times we're the one running */ -+ -+ unsigned long WorstDelay; /* worst scheduling delay */ -+ unsigned long BestDelay; /* best scheduling delay */ -+ -+ unsigned long WorstLockDelay; /* worst delay before getting rail->Lock */ -+ -+ unsigned long WorstHearbeatDelay; /* worst delay before calling DoHeartbeatWork */ -+} CM_TIMEOUT_DATA; -+#endif -+ -+typedef struct cm_rail -+{ -+ EP_RAIL *Rail; /* rail we're associated with */ -+ struct list_head Link; /* and linked on the CM_SUBSYS */ -+ -+ uint32_t ParamHash; /* hash of critical parameters */ -+ uint32_t Timestamp; -+ long DiscoverStartTick; /* when discovery start */ -+ -+ unsigned int NodeId; /* my node id */ -+ unsigned int NumNodes; /* and number of nodes */ -+ unsigned int NumLevels; /* number of levels computed from machine size */ -+ int BroadcastLevel; -+ long BroadcastLevelTick; -+ unsigned int TopLevel; /* level at which I'm not a leader */ -+ unsigned char Role; /* state at TopLevel */ -+ -+ EP_INPUTQ *PolledQueue; /* polled input queue */ -+ EP_INPUTQ *IntrQueue; /* intr input queue */ -+ EP_OUTPUTQ *MsgQueue; /* message */ -+ unsigned int NextSpareMsg; /* next "spare" message buffer to use */ -+ -+ EP_CM_RAIL_STATS Stats; /* statistics */ -+ -+ kmutex_t Mutex; -+ spinlock_t Lock; -+ -+ long NextHeartbeatTime; /* next time to check/send heartbeats */ -+ long NextDiscoverTime; /* next time to progress discovery */ -+ long NextRunTime; /* the earlier of the above two or intr requires inputq poll*/ -+ -+ unsigned int OfflineReasons; /* forced offline by procfs/manager thread stuck */ -+ -+#if defined(PER_CPU_TIMEOUT) -+ spinlock_t HeartbeatTimeoutsLock; /* spinlock to sequentialise per-cpu timeouts */ -+ long HeartbeatTimeoutsStarted; /* bitmap of which timeouts have started */ -+ long HeartbeatTimeoutsStopped; /* bitmap of which timeouts have stopped */ -+ long HeartbeatTimeoutsShouldStop; /* flag to indicate timeouts should stop */ -+ kcondvar_t HeartbeatTimeoutsWait; /* place to sleep waiting for timeouts to stop */ -+ long HeartbeatTimeoutRunning; /* someone is running the timeout - don't try for the lock */ -+ -+ long HeartbeatTimeoutOverdue; /* heartbeat seen as overdue - interrupt requested */ -+ -+ CM_TIMEOUT_DATA *HeartbeatTimeoutsData; /* per timeout data */ -+#else -+ struct timer_list HeartbeatTimer; /* timer for heartbeat/discovery */ -+#endif -+ -+ CM_LEVEL Levels[CM_MAX_LEVELS]; -+} CM_RAIL; -+ -+/* OfflineReasons (both per-rail and */ -+#define CM_OFFLINE_BROADCAST (1 << 0) -+#define CM_OFFLINE_PROCFS (1 << 1) -+#define CM_OFFLINE_MANAGER (1 << 2) -+ -+typedef struct cm_subsys -+{ -+ EP_SUBSYS Subsys; -+ CM_RAIL *Rails[EP_MAX_RAILS]; -+} CM_SUBSYS; -+ -+extern int MachineId; -+ -+extern void cm_node_disconnected (EP_RAIL *rail, unsigned nodeId); -+extern void cm_restart_node (EP_RAIL *rail, unsigned nodeId); -+extern void cm_restart_comms (CM_RAIL *cmRail); -+extern int cm_init (EP_SYS *sys); -+ -+extern void DisplayRail(EP_RAIL *rail); -+extern void DisplaySegs (EP_RAIL *rail); -+extern void DisplayStatus (EP_RAIL *rail); -+ -+extern void DisplayNodeMaps (DisplayInfo *di, CM_RAIL *cmRail); -+extern void DisplayNodeSgmts (DisplayInfo *di, CM_RAIL *cmRail); -+extern void DisplayRailDo (DisplayInfo *di, EP_RAIL *rail); -+ -+extern int cm_read_cluster(EP_RAIL *rail,char *page); -+extern void cm_force_offline (EP_RAIL *rail, int offline, unsigned int reason); -+ -+extern int cm_svc_indicator_set (EP_RAIL *rail, int svc_indicator); -+extern int cm_svc_indicator_clear (EP_RAIL *rail, int svc_indicator); -+extern int cm_svc_indicator_is_set (EP_RAIL *rail, int svc_indicator, int nodeId); -+extern int cm_svc_indicator_bitmap (EP_RAIL *rail, int svc_indicator, bitmap_t * bitmap, int low, int nnodes); -+ -+/* cm_procfs.c */ -+extern void cm_procfs_init (CM_SUBSYS *subsys); -+extern void cm_procfs_fini (CM_SUBSYS *subsys); -+extern void cm_procfs_rail_init (CM_RAIL *rail); -+extern void cm_procfs_rail_fini (CM_RAIL *rail); -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -+#endif /* __ELAN_CM_H */ -+ -diff -urN clean/include/elan/compat.h linux-2.6.9/include/elan/compat.h ---- clean/include/elan/compat.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan/compat.h 2003-12-03 08:18:48.000000000 -0500 -@@ -0,0 +1,23 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: compat.h,v 1.1 2003/12/03 13:18:48 david Exp $ $Name: QSNETMODULES-5-11-3_20050907 $" -+/* $Source: /cvs/master/quadrics/elanmod/modsrc/compat.h,v $*/ -+ -+#ifndef __ELAN_COMPAT_H -+#define __ELAN_COMPAT_H -+ -+#define ELANMOD_STATS_MAP ELAN_STATS_MAP -+ -+#endif /* __ELAN_COMPAT_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/include/elan/device.h linux-2.6.9/include/elan/device.h ---- clean/include/elan/device.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan/device.h 2003-09-24 09:55:37.000000000 -0400 -@@ -0,0 +1,62 @@ -+/* -+ * Copyright (c) 2003 by Quadrics Limited. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: device.h,v 1.5 2003/09/24 13:55:37 david Exp $" -+/* $Source: /cvs/master/quadrics/elanmod/modsrc/device.h,v $*/ -+ -+#ifndef __ELAN_DEVICE_H -+#define __ELAN_DEVICE_H -+ -+/* non-kernel headings */ -+typedef unsigned int ELAN_DEV_IDX; -+ -+#if defined(__KERNEL__) -+ -+/* device callbacks */ -+#define ELAN_DEV_OPS_VERSION ((u_int)1) -+ -+typedef struct elan_dev_ops -+{ -+ /* dev info */ -+ int (*get_position) (void *user_data, ELAN_POSITION *position); -+ int (*set_position) (void *user_data, unsigned short nodeId, unsigned short numNodes); -+ -+ /* cap */ -+ -+ u_int ops_version; -+} ELAN_DEV_OPS; -+ -+typedef struct elan_dev_struct -+{ -+ struct list_head node; -+ -+ ELAN_DEV_IDX devidx; -+ ELAN_DEVINFO *devinfo; -+ void *user_data; -+ ELAN_DEV_OPS *ops; -+} ELAN_DEV_STRUCT; -+ -+/* device.c */ -+extern ELAN_DEV_IDX elan_dev_register (ELAN_DEVINFO *devinfo, -+ ELAN_DEV_OPS *ops, -+ void *userdata); -+extern int elan_dev_deregister (ELAN_DEVINFO *devinfo); -+ -+extern ELAN_DEV_STRUCT * elan_dev_find (ELAN_DEV_IDX devidx); -+ -+extern ELAN_DEV_STRUCT * elan_dev_find_byrail(unsigned short deviceid, unsigned rail); -+extern int elan_dev_dump (void); -+ -+#endif /* __KERNEL__ */ -+ -+#endif /* __ELAN_DEVICE_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "linux" -+ * End: -+ */ -diff -urN clean/include/elan/devinfo.h linux-2.6.9/include/elan/devinfo.h ---- clean/include/elan/devinfo.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan/devinfo.h 2005-02-01 07:35:53.000000000 -0500 -@@ -0,0 +1,92 @@ -+/* -+ * Copyright (c) 2003 by Quadrics Limited. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: devinfo.h,v 1.16 2005/02/01 12:35:53 david Exp $" -+/* $Source: /cvs/master/quadrics/elanmod/modsrc/devinfo.h,v $*/ -+ -+#ifndef __ELAN_DEVINFO_H -+#define __ELAN_DEVINFO_H -+ -+#define ELAN_MAX_LEVELS 8 /* maximum number of levels in switch network */ -+ -+typedef struct elan_position -+{ -+ unsigned pos_mode; /* mode we're operating in */ -+ unsigned pos_nodeid; /* port this device connected to */ -+ unsigned pos_levels; /* number of levels to top switch */ -+ unsigned pos_nodes; /* number of nodes in the machine */ -+ unsigned pos_random_disabled; /* levels at which "random" routing is not possible */ -+ unsigned char pos_arity[ELAN_MAX_LEVELS]; /* number of downlinks per switch level */ -+} ELAN_POSITION; -+ -+#define ELAN4_PARAM_PCI_PADDING_FLAGS 0 /* A bit field, representing good places to burst across the pci */ -+#define ELAN4_PARAM_EVENT_COPY_WIN 1 /* The num of cmds when it becomes quicker to send via event copy than write directly */ -+#define ELAN4_PARAM_WRITE_COMBINING 2 /* If set the device supports bursts accesses across the pci bus */ -+#define ELAN4_PARAM_DRIVER_FEATURES 11 /* device driver features */ -+#define ELAN4_PARAM_COUNT 12 -+ -+/* values for ELAN4_PARAM_DRIVER_FEATURES, dev_features */ -+#define ELAN4_FEATURE_PCI_MAP (1 << 0) /* must use pci mapping functions */ -+#define ELAN4_FEATURE_64BIT_READ (1 << 1) /* must perform 64 bit PIO reads */ -+#define ELAN4_FEATURE_PIN_DOWN (1 << 2) /* must pin down pages */ -+#define ELAN4_FEATURE_NO_WRITE_COMBINE (1 << 3) /* don't allow write combinig at all */ -+#define ELAN4_FEATURE_NO_IOPROC (1 << 4) /* unpatched kernel or disabled by procfs */ -+#define ELAN4_FEATURE_NO_IOPROC_UPDATE (1 << 5) /* don't do coproc update xlation loading */ -+#define ELAN4_FEATURE_NO_PAGEFAULT (1 << 6) /* don't do pagefaulting */ -+#define ELAN4_FEATURE_NO_PREFETCH (1 << 7) /* don't allow prefetching of elan sdram/cports */ -+ -+typedef struct elan_params -+{ -+ unsigned values[ELAN4_PARAM_COUNT]; -+} ELAN_PARAMS; -+ -+/* values for pos_mode */ -+#define ELAN_POS_UNKNOWN 0 /* network position unknown */ -+#define ELAN_POS_MODE_SWITCHED 1 /* connected to a switch */ -+#define ELAN_POS_MODE_LOOPBACK 2 /* loopback connector */ -+#define ELAN_POS_MODE_BACKTOBACK 3 /* cabled back-to-back to another node */ -+ -+typedef struct elan_devinfo -+{ -+ unsigned short dev_vendor_id; /* pci vendor id */ -+ unsigned short dev_device_id; /* pci device id */ -+ unsigned char dev_revision_id; /* pci revision id */ -+ unsigned char dev_instance; /* device instance number */ -+ unsigned char dev_rail; /* device rail number */ -+ -+ unsigned short dev_driver_version; /* device driver version */ -+ unsigned short dev_params_mask; /* mask for valid entries in dev_params array */ -+ ELAN_PARAMS dev_params; /* device parametization */ -+ -+ unsigned dev_num_down_links_value; /* hint to machine size */ -+} ELAN_DEVINFO; -+ -+#define PCI_VENDOR_ID_QUADRICS 0x14fc -+#define PCI_DEVICE_ID_ELAN3 0x0000 -+#define PCI_REVISION_ID_ELAN3_REVA 0x0000 -+#define PCI_REVISION_ID_ELAN3_REVB 0x0001 -+#define PCI_DEVICE_ID_ELAN4 0x0001 -+#define PCI_REVISION_ID_ELAN4_REVA 0x0000 -+#define PCI_REVISION_ID_ELAN4_REVB 0x0001 -+ -+#if defined(__KERNEL__) -+/* devinfo.c */ -+#include -+#include -+extern int elan_get_devinfo (ELAN_DEV_IDX devidx, ELAN_DEVINFO *devinfo); -+extern int elan_get_position (ELAN_DEV_IDX devidx, ELAN_POSITION *position); -+extern int elan_set_position (ELAN_DEV_IDX devidx, unsigned short nodeId, unsigned short numNodes); -+#endif /* __KERNEL__ */ -+ -+ -+#endif /* __ELAN_DEVINFO_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "linux" -+ * End: -+ */ -diff -urN clean/include/elan/elanmoddebug.h linux-2.6.9/include/elan/elanmoddebug.h ---- clean/include/elan/elanmoddebug.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan/elanmoddebug.h 2005-05-24 13:07:44.000000000 -0400 -@@ -0,0 +1,64 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef _ELAN_DEBUG_H -+#define _ELAN_DEBUG_H -+ -+ -+#ident "$Id: elanmoddebug.h,v 1.6 2005/05/24 17:07:44 addy Exp $" -+/* $Source: /cvs/master/quadrics/elanmod/modsrc/elanmoddebug.h,v $ */ -+ -+#if defined(__KERNEL__) -+ -+/* 0 | QSNET_DEBUG_BUFFER | QSNET_DEBUG_CONSOLE */ -+extern int elan_debug_mode; -+extern int elan_debug_mask; -+ -+#define ELAN_DBG_VP 0x00000001 -+#define ELAN_DBG_CAP 0x00000002 -+#define ELAN_DBG_CTRL 0x00000004 -+#define ELAN_DBG_SYS_FN 0x00000008 -+#define ELAN_DBG_USERCOPY 0x00000010 -+#define ELAN_DBG_ALL 0xffffffff -+ -+ -+#if defined(DEBUG_PRINTF) -+# define ELAN_DEBUG0(m,fmt) ((elan_debug_mask&(m)) ? qsnet_debugf(elan_debug_mode,fmt) : (void)0) -+# define ELAN_DEBUG1(m,fmt,a) ((elan_debug_mask&(m)) ? qsnet_debugf(elan_debug_mode,fmt,a) : (void)0) -+# define ELAN_DEBUG2(m,fmt,a,b) ((elan_debug_mask&(m)) ? qsnet_debugf(elan_debug_mode,fmt,a,b) : (void)0) -+# define ELAN_DEBUG3(m,fmt,a,b,c) ((elan_debug_mask&(m)) ? qsnet_debugf(elan_debug_mode,fmt,a,b,c) : (void)0) -+# define ELAN_DEBUG4(m,fmt,a,b,c,d) ((elan_debug_mask&(m)) ? qsnet_debugf(elan_debug_mode,fmt,a,b,c,d) : (void)0) -+# define ELAN_DEBUG5(m,fmt,a,b,c,d,e) ((elan_debug_mask&(m)) ? qsnet_debugf(elan_debug_mode,fmt,a,b,c,d,e) : (void)0) -+# define ELAN_DEBUG6(m,fmt,a,b,c,d,e,f) ((elan_debug_mask&(m)) ? qsnet_debugf(elan_debug_mode,fmt,a,b,c,d,e,f) : (void)0) -+#ifdef __GNUC__ -+# define ELAN_DEBUG(m,args...) ((elan_debug_mask&(m)) ? qsnet_debugf(elan_debug_mode, ##args) : (void)0) -+#endif -+ -+#else -+ -+# define ELAN_DEBUG0(m,fmt) (0) -+# define ELAN_DEBUG1(m,fmt,a) (0) -+# define ELAN_DEBUG2(m,fmt,a,b) (0) -+# define ELAN_DEBUG3(m,fmt,a,b,c) (0) -+# define ELAN_DEBUG4(m,fmt,a,b,c,d) (0) -+# define ELAN_DEBUG5(m,fmt,a,b,c,d,e) (0) -+# define ELAN_DEBUG6(m,fmt,a,b,c,d,e,f) (0) -+#ifdef __GNUC__ -+# define ELAN_DEBUG(m,args...) -+#endif -+ -+#endif /* DEBUG_PRINTF */ -+ -+ -+#endif /* __KERNEL__ */ -+#endif /* _ELAN_DEBUG_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "linux" -+ * End: -+ */ -diff -urN clean/include/elan/elanmod.h linux-2.6.9/include/elan/elanmod.h ---- clean/include/elan/elanmod.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan/elanmod.h 2005-05-26 12:14:21.000000000 -0400 -@@ -0,0 +1,83 @@ -+/* -+ * Copyright (c) 2003 by Quadrics Limited. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: elanmod.h,v 1.13 2005/05/26 16:14:21 addy Exp $" -+/* $Source: /cvs/master/quadrics/elanmod/modsrc/elanmod.h,v $*/ -+ -+#ifndef __ELAN_MOD_H -+#define __ELAN_MOD_H -+ -+#include -+#include -+#include -+#include -+ -+#if defined(__KERNEL__) -+ -+#include -+ -+/* Linux RW semaphores */ -+#include -+#include -+ -+#define ELANMOD_RWLOCK struct rw_semaphore -+#define ELANMOD_RWLOCK_INIT(l) init_rwsem(l) -+#define ELANMOD_RWLOCK_DESTROY(l) -+#define ELANMOD_RWLOCK_READ(l) down_read(l) -+#define ELANMOD_RWLOCK_WRITE(l) down_write(l) -+#define ELANMOD_RWLOCK_READ_UNLOCK(l) up_read(l) -+#define ELANMOD_RWLOCK_WRITE_UNLOCK(l) up_write(l) -+ -+extern ELANMOD_RWLOCK elan_rwlock; -+ -+/* elan_general.c */ -+extern int elan_init(void); -+extern int elan_fini(void); -+ -+/* return codes, -ve => errno, +ve => success */ -+#define ELAN_CAP_OK (0) -+#define ELAN_CAP_RMS (1) -+ -+#define ELAN_USER_ATTACH (1) -+#define ELAN_USER_DETACH (2) -+#define ELAN_USER_P2P (3) -+#define ELAN_USER_BROADCAST (4) -+ -+extern int elanmod_classify_cap (ELAN_POSITION *position, ELAN_CAPABILITY *cap, unsigned use); -+ -+#define ELAN_USER_BASE_CONTEXT_NUM 0x000 /* first user allowable context */ -+#define ELAN_USER_TOP_CONTEXT_NUM 0x7FF /* last user allowable context */ -+ -+#define ELAN_RMS_BASE_CONTEXT_NUM 0x400 /* reserved for RMS allocation */ -+#define ELAN_RMS_TOP_CONTEXT_NUM 0x7FF -+ -+#define ELAN_USER_CONTEXT(ctx) ((ctx) >= ELAN_USER_BASE_CONTEXT_NUM && \ -+ (ctx) <= ELAN_USER_TOP_CONTEXT_NUM) -+ -+#define ELAN_RMS_CONTEXT(ctx) ((ctx) >= ELAN_RMS_BASE_CONTEXT_NUM && \ -+ (ctx) <= ELAN_RMS_TOP_CONTEXT_NUM) -+ -+ -+/* capability.c */ -+struct elan_cap_node_struct; -+extern int elan_usercopy_attach (ELAN_CAPABILITY *cap, struct elan_cap_node_struct **node_ptr, void *handle, void *owner); -+extern int elan_usercopy_detach (struct elan_cap_node_struct *cap_ptr, void *owner); -+extern int elan_usercopy_handle (struct elan_cap_node_struct *cap_ptr, int ctxId, void **handlep); -+ -+/* usercopy.c */ -+extern int elan_usercopy (void *remote, void *local, size_t len, int write, -+ int ctxId, struct elan_cap_node_struct *cap_ptr); -+ -+#endif /* __KERNEL__ */ -+ -+#endif /* __ELAN_MOD_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "linux" -+ * End: -+ */ -diff -urN clean/include/elan/elanmod_linux.h linux-2.6.9/include/elan/elanmod_linux.h ---- clean/include/elan/elanmod_linux.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan/elanmod_linux.h 2005-02-22 07:29:22.000000000 -0500 -@@ -0,0 +1,164 @@ -+/* -+ * Copyright (c) 2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: elanmod_linux.h,v 1.7 2005/02/22 12:29:22 addy Exp $" -+/* $Source: /cvs/master/quadrics/elanmod/modsrc/elanmod_linux.h,v $*/ -+ -+#ifndef __ELAN_MOD_LINUX_H -+#define __ELAN_MOD_LINUX_H -+ -+#define ELANCRTL_USER_BASE 0x40 -+ -+/* stats */ -+typedef struct elanctrl_stats_get_next_struct -+{ -+ ELAN_STATS_IDX statidx; -+ ELAN_STATS_IDX *next_statidx; /* return value */ -+} ELANCTRL_STATS_GET_NEXT_STRUCT; -+#define ELANCTRL_STATS_GET_NEXT _IOR ('e', ELANCRTL_USER_BASE + 0, ELANCTRL_STATS_GET_NEXT_STRUCT) -+ -+typedef struct elanctrl_stats_find_index_struct -+{ -+ caddr_t block_name; -+ ELAN_STATS_IDX *statidx; /* return value */ -+ uint *num_entries; /* return value */ -+} ELANCTRL_STATS_FIND_INDEX_STRUCT; -+#define ELANCTRL_STATS_FIND_INDEX _IOR ('e', ELANCRTL_USER_BASE + 1, ELANCTRL_STATS_FIND_INDEX_STRUCT) -+ -+typedef struct elanctrl_stats_get_block_info_struct -+{ -+ ELAN_STATS_IDX statidx; -+ caddr_t block_name; /* return value */ -+ uint *num_entries; /* return value */ -+} ELANCTRL_STATS_GET_BLOCK_INFO_STRUCT; -+#define ELANCTRL_STATS_GET_BLOCK_INFO _IOR ('e', ELANCRTL_USER_BASE + 2, ELANCTRL_STATS_GET_BLOCK_INFO_STRUCT) -+ -+typedef struct elanctrl_stats_get_index_name_struct -+{ -+ ELAN_STATS_IDX statidx; -+ uint index; -+ caddr_t name; /* return value */ -+} ELANCTRL_STATS_GET_INDEX_NAME_STRUCT; -+#define ELANCTRL_STATS_GET_INDEX_NAME _IOR ('e', ELANCRTL_USER_BASE + 3, ELANCTRL_STATS_GET_INDEX_NAME_STRUCT) -+ -+typedef struct elanctrl_stats_clear_block_struct -+{ -+ ELAN_STATS_IDX statidx; -+} ELANCTRL_STATS_CLEAR_BLOCK_STRUCT; -+#define ELANCTRL_STATS_CLEAR_BLOCK _IOR ('e', ELANCRTL_USER_BASE + 4, ELANCTRL_STATS_CLEAR_BLOCK_STRUCT) -+ -+typedef struct elanctrl_stats_get_block_struct -+{ -+ ELAN_STATS_IDX statidx; -+ uint entries; -+ ulong *values; /* return values */ -+} ELANCTRL_STATS_GET_BLOCK_STRUCT; -+#define ELANCTRL_STATS_GET_BLOCK _IOR ('e', ELANCRTL_USER_BASE + 5, ELANCTRL_STATS_GET_BLOCK_STRUCT) -+ -+ -+typedef struct elanctrl_get_devinfo_struct -+{ -+ ELAN_DEV_IDX devidx; -+ ELAN_DEVINFO *devinfo; /* return values */ -+} ELANCTRL_GET_DEVINFO_STRUCT; -+#define ELANCTRL_GET_DEVINFO _IOR ('e', ELANCRTL_USER_BASE + 6, ELANCTRL_GET_DEVINFO_STRUCT) -+ -+typedef struct elanctrl_get_position_struct -+{ -+ ELAN_DEV_IDX devidx; -+ ELAN_POSITION *position; /* return values */ -+} ELANCTRL_GET_POSITION_STRUCT; -+#define ELANCTRL_GET_POSITION _IOR ('e', ELANCRTL_USER_BASE + 7, ELANCTRL_GET_POSITION_STRUCT) -+ -+typedef struct elanctrl_set_position_struct -+{ -+ ELAN_DEV_IDX devidx; -+ unsigned short nodeId; -+ unsigned short numNodes; -+} ELANCTRL_SET_POSITION_STRUCT; -+#define ELANCTRL_SET_POSITION _IOR ('e', ELANCRTL_USER_BASE + 8, ELANCTRL_SET_POSITION_STRUCT) -+ -+typedef struct elanctrl_create_cap_struct -+{ -+ ELAN_CAPABILITY cap; -+} ELANCTRL_CREATE_CAP_STRUCT; -+#define ELANCTRL_CREATE_CAP _IOW ('e', ELANCRTL_USER_BASE + 9, ELANCTRL_CREATE_CAP_STRUCT) -+ -+typedef struct elanctrl_destroy_cap_struct -+{ -+ ELAN_CAPABILITY cap; -+} ELANCTRL_DESTROY_CAP_STRUCT; -+#define ELANCTRL_DESTROY_CAP _IOW ('e', ELANCRTL_USER_BASE + 10, ELANCTRL_DESTROY_CAP_STRUCT) -+ -+typedef struct elanctrl_create_vp_struct -+{ -+ ELAN_CAPABILITY cap; -+ ELAN_CAPABILITY map; -+} ELANCTRL_CREATE_VP_STRUCT; -+#define ELANCTRL_CREATE_VP _IOW ('e', ELANCRTL_USER_BASE + 11, ELANCTRL_CREATE_VP_STRUCT) -+ -+typedef struct elanctrl_destroy_vp_struct -+{ -+ ELAN_CAPABILITY cap; -+ ELAN_CAPABILITY map; -+} ELANCTRL_DESTROY_VP_STRUCT; -+#define ELANCTRL_DESTROY_VP _IOW ('e', ELANCRTL_USER_BASE + 12, ELANCTRL_DESTROY_VP_STRUCT) -+ -+#define ELANCTRL_DEBUG_DUMP _IO ('e', ELANCRTL_USER_BASE + 13) -+ -+typedef struct elanctrl_get_caps_struct -+{ -+ uint *number_of_results; -+ uint array_size; -+ ELAN_CAP_STRUCT *caps; -+} ELANCTRL_GET_CAPS_STRUCT; -+#define ELANCTRL_GET_CAPS _IOW ('e', ELANCRTL_USER_BASE + 14, ELANCTRL_GET_CAPS_STRUCT) -+ -+ -+typedef struct elanctrl_debug_buffer_struct -+{ -+ caddr_t buffer; -+ int size; -+} ELANCTRL_DEBUG_BUFFER_STRUCT; -+#define ELANCTRL_DEBUG_BUFFER _IOW ('e', ELANCRTL_USER_BASE + 15, ELANCTRL_DEBUG_BUFFER_STRUCT) -+ -+ -+/* -+ * Usercopy ioctl definitions -+ */ -+typedef struct elanctrl_usercopy_attach_struct -+{ -+ ELAN_CAPABILITY cap; /* process capability (for security checks) */ -+} ELANCTRL_USERCOPY_ATTACH_STRUCT; -+#define ELANCTRL_USERCOPY_ATTACH _IOR ('u', ELANCRTL_USER_BASE + 0, ELANCTRL_USERCOPY_ATTACH_STRUCT) -+#define ELANCTRL_USERCOPY_DETACH _IO ('u', ELANCRTL_USER_BASE + 1) -+ -+typedef struct elanctrl_usercopy_struct -+{ -+ void *remote; /* remote process buffer */ -+ void *local; /* local process buffer */ -+ size_t len; -+ int write; /* Direction */ -+ -+ int ctxId; /* remote process context id (0 .. nlocal-1) */ -+ -+} ELANCTRL_USERCOPY_STRUCT; -+#define ELANCTRL_USERCOPY _IOR ('u', ELANCRTL_USER_BASE + 2, ELANCTRL_USERCOPY_STRUCT) -+ -+#define ELANMOD_PROCFS_IOCTL "/proc/qsnet/elan/ioctl" -+#define ELANMOD_PROCFS_USER_IOCTL "/proc/qsnet/elan/user" -+#define ELANMOD_PROCFS_VERSION "/proc/qsnet/elan/version" -+#define ELANMOD_PROCFS_DEBUG_MASK "/proc/qsnet/elan/debug_mask" -+#define ELANMOD_PROCFS_DEBUG_MODE "/proc/qsnet/elan/debug_mode" -+ -+#endif /* __ELAN_MOD_LINUX_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "linux" -+ * End: -+ */ -diff -urN clean/include/elan/elanmod_subsystem.h linux-2.6.9/include/elan/elanmod_subsystem.h ---- clean/include/elan/elanmod_subsystem.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan/elanmod_subsystem.h 2003-09-29 11:35:13.000000000 -0400 -@@ -0,0 +1,138 @@ -+/* -+ * Copyright (c) 2003 by Quadrics Limited. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __ELAN_SUBSYSTEM_H -+#define __ELAN_SUBSYSTEM_H -+ -+#include -+#include -+ -+#if defined( __KERNEL__) -+int elan_configure( -+ cfg_op_t op, -+ caddr_t indata, -+ ulong indata_size, -+ caddr_t outdata, -+ ulong outdata_size); -+#endif -+ -+#define ELAN_KMOD_CODE(x) ((x)+CFG_OP_SUBSYS_MIN) -+#define ELAN_MAX_KMOD_CODES 100 -+ -+#define ELAN_SUBSYS "elan" -+ -+#define ELAN_STATS_GET_NEXT 0x01 -+typedef struct { -+ ELAN_STATS_IDX statidx; -+ ELAN_STATS_IDX *next_statidx; -+} elan_stats_get_next_struct; -+ -+ -+#define ELAN_STATS_FIND_INDEX 0x02 -+typedef struct { -+ caddr_t block_name; -+ ELAN_STATS_IDX *statidx; /* return value */ -+ uint *num_entries; /* return value */ -+} elan_stats_find_index_struct; -+ -+#define ELAN_STATS_GET_BLOCK_INFO 0x03 -+typedef struct { -+ ELAN_STATS_IDX statidx; -+ caddr_t block_name; /* return value */ -+ uint *num_entries; /* return value */ -+} elan_stats_get_block_info_struct; -+ -+#define ELAN_STATS_GET_INDEX_NAME 0x04 -+typedef struct { -+ ELAN_STATS_IDX statidx; -+ uint index; -+ caddr_t name; /* return value */ -+} elan_stats_get_index_name_struct; -+ -+#define ELAN_STATS_CLEAR_BLOCK 0x05 -+typedef struct { -+ ELAN_STATS_IDX statidx; -+} elan_stats_clear_block_struct; -+ -+#define ELAN_STATS_GET_BLOCK 0x06 -+typedef struct -+{ -+ ELAN_STATS_IDX statidx; -+ uint entries; -+ ulong *values; /* return values */ -+} elan_stats_get_block_struct; -+ -+#define ELAN_GET_DEVINFO 0x07 -+typedef struct -+{ -+ ELAN_DEV_IDX devidx; -+ ELAN_DEVINFO *devinfo; /* return values */ -+} elan_get_devinfo_struct; -+ -+#define ELAN_GET_POSITION 0x08 -+typedef struct { -+ ELAN_DEV_IDX devidx; -+ ELAN_POSITION *position; /* return values */ -+} elan_get_position_struct; -+ -+#define ELAN_SET_POSITION 0x09 -+typedef struct { -+ ELAN_DEV_IDX devidx; -+ unsigned short nodeId; -+ unsigned short numNodes; -+} elan_set_position_struct; -+ -+#define ELAN_CREATE_CAP 0x0a -+typedef struct { -+ ELAN_CAPABILITY cap; -+} elan_create_cap_struct; -+ -+#define ELAN_DESTROY_CAP 0x0b -+typedef struct { -+ ELAN_CAPABILITY cap; -+} elan_destroy_cap_struct; -+ -+#define ELAN_CREATE_VP 0x0c -+typedef struct { -+ ELAN_CAPABILITY cap; -+ ELAN_CAPABILITY map; -+} elan_create_vp_struct; -+ -+#define ELAN_DESTROY_VP 0x0d -+typedef struct { -+ ELAN_CAPABILITY cap; -+ ELAN_CAPABILITY map; -+} elan_destroy_vp_struct; -+ -+ -+#define ELAN_DEBUG_DUMP 0x0e -+ -+#define ELAN_GET_CAPS 0x0f -+typedef struct { -+ uint *number_of_results; -+ uint array_size; -+ ELAN_CAP_STRUCT *caps; -+} elan_get_caps_struct; -+ -+#define ELAN_DEBUG_BUFFER 0x10 -+typedef struct { -+ caddr_t addr; -+ int len; -+} elan_debug_buffer_struct; -+ -+#define ELANMOD_PROCFS_IOCTL "/proc/qsnet/elan/ioctl" -+#define ELANMOD_PROCFS_VERSION "/proc/qsnet/elan/version" -+#define ELANMOD_PROCFS_DEBUG_MASK "/proc/qsnet/elan/debug_mask" -+#define ELANMOD_PROCFS_DEBUG_MODE "/proc/qsnet/elan/debug_mode" -+ -+#endif /* __ELAN_SUBSYSTEM_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "linux" -+ * End: -+ */ -diff -urN clean/include/elan/epcomms.h linux-2.6.9/include/elan/epcomms.h ---- clean/include/elan/epcomms.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan/epcomms.h 2004-11-12 05:55:03.000000000 -0500 -@@ -0,0 +1,635 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __ELAN_EPCOMMS_H -+#define __ELAN_EPCOMMS_H -+ -+#ident "$Id: epcomms.h,v 1.46 2004/11/12 10:55:03 mike Exp $" -+/* $Source: /cvs/master/quadrics/epmod/epcomms.h,v $ */ -+ -+#include -+#include -+ -+#define EPCOMMS_SUBSYS_NAME "epcomms" -+ -+/* message service numbers */ -+#define EP_MSG_SVC_EIP512 0x00 /* Quadrics EIP services */ -+#define EP_MSG_SVC_EIP1K 0x01 -+#define EP_MSG_SVC_EIP2K 0x02 -+#define EP_MSG_SVC_EIP4K 0x03 -+#define EP_MSG_SVC_EIP8K 0x04 -+#define EP_MSG_SVC_EIP16K 0x05 -+#define EP_MSG_SVC_EIP32K 0x06 -+#define EP_MSG_SVC_EIP64K 0x07 -+#define EP_MSG_SVC_EIP128K 0x08 -+ -+#define EP_MSG_SVC_PFS 0x09 /* Quadrics PFS rpc service */ -+ -+#define EP_MSG_SVC_PORTALS_SMALL 0x10 /* Lustre Portals */ -+#define EP_MSG_SVC_PORTALS_LARGE 0x11 -+ -+#define EP_MSG_NSVC 0x40 /* Max number of services */ -+ -+#define EP_MSGQ_ADDR(qnum) (EP_EPCOMMS_QUEUE_BASE + (qnum) * EP_QUEUE_DESC_SIZE) -+ -+/* -+ * EP_ENVELOPE -+ * Messages are sent by sending an envelope to the destination -+ * describing the source buffers to transfer. The receiving thread -+ * then allocates a receive buffer and fetches the data by issuing -+ * "get" dmas. -+ * -+ * NOTE: envelopes are not explicitly converted to network byte order -+ * since they are always transferred little endian as they are -+ * copied to/from elan memory using word operations. -+ */ -+typedef struct ep_envelope -+{ -+ uint32_t Version; /* Protocol version field */ -+ -+ EP_ATTRIBUTE Attr; /* Attributes */ -+ -+ EP_XID Xid; /* transaction id */ -+ -+ uint32_t NodeId; /* Source processor */ -+ uint32_t Range; /* range we're sending to (high << 16 | low) */ -+ -+ EP_ADDR TxdRail; /* address of per-rail txd */ -+ EP_NMD TxdMain; /* address of main memory portion of txd */ -+ -+ uint32_t nFrags; /* # fragments */ -+ EP_NMD Frags[EP_MAXFRAG]; /* network mapping handles of source data */ -+ -+ uint32_t CheckSum; /* holds the check sum value when active -+ * must be after all members to be checksum'd -+ */ -+ -+ uint32_t Pad[6]; /* Pad to 128 bytes */ -+} EP_ENVELOPE; -+ -+#define EP_ENVELOPE_VERSION 0xdac10001 -+#define EP_ENVELOPE_SIZE roundup (sizeof (EP_ENVELOPE), EP_BLK_SIZE) -+ -+/* -+ * RPC payload - this small amount of data is transfered in -+ * the envelope for RPCs -+ */ -+typedef struct ep_payload -+{ -+ uint32_t Data[128/sizeof(uint32_t)]; -+} EP_PAYLOAD; -+ -+#define EP_PAYLOAD_SIZE roundup (sizeof (EP_PAYLOAD), EP_BLK_SIZE) -+ -+#define EP_INPUTQ_SIZE (EP_ENVELOPE_SIZE + EP_PAYLOAD_SIZE) -+ -+/* -+ * EP_STATUSBLK -+ * RPC completion transfers a status block to the client. -+ */ -+typedef struct ep_statusblk -+{ -+ uint32_t Data[128/sizeof(uint32_t)]; -+} EP_STATUSBLK; -+ -+#define EP_STATUSBLK_SIZE roundup (sizeof(EP_STATUSBLK), EP_BLK_SIZE) -+ -+#define EP_RANGE(low,high) ((high) << 16 | (low)) -+#define EP_RANGE_LOW(range) ((range) & 0xFFFF) -+#define EP_RANGE_HIGH(range) (((range) >> 16) & 0xFFFF) -+ -+/* return codes from functions, + 'res' parameter to txd callback, ep_rxd_status() */ -+typedef enum -+{ -+ EP_SUCCESS = 0, /* message sent/received successfully */ -+ EP_RXD_PENDING = -1, /* rxd not completed by thread */ -+ EP_CONN_RESET = -2, /* virtual circuit reset */ -+ EP_NODE_DOWN = -3, /* node down - transmit not attempted */ -+ EP_MSG_TOO_BIG = -4, /* received message larger than buffer */ -+ EP_ENOMEM = -5, /* memory alloc failed */ -+ EP_EINVAL = -6, /* invalid parameters */ -+ EP_SHUTDOWN = -7, /* receiver is being shut down */ -+} EP_STATUS; -+ -+/* forward declarations */ -+typedef struct ep_rxd EP_RXD; -+typedef struct ep_txd EP_TXD; -+typedef struct ep_rcvr_rail EP_RCVR_RAIL; -+typedef struct ep_rcvr EP_RCVR; -+typedef struct ep_xmtr_rail EP_XMTR_RAIL; -+typedef struct ep_xmtr EP_XMTR; -+typedef struct ep_comms_rail EP_COMMS_RAIL; -+typedef struct ep_comms_subsys EP_COMMS_SUBSYS; -+ -+typedef struct ep_rcvr_stats EP_RCVR_STATS; -+typedef struct ep_xmtr_stats EP_XMTR_STATS; -+typedef struct ep_rcvr_rail_stats EP_RCVR_RAIL_STATS; -+typedef struct ep_xmtr_rail_stats EP_XMTR_RAIL_STATS; -+ -+typedef void (EP_RXH)(EP_RXD *rxd); /* callback function from receive completion */ -+typedef void (EP_TXH)(EP_TXD *txd, void *arg, EP_STATUS res); /* callback function from transmit completion */ -+ -+/* Main memory portion shared descriptor */ -+typedef struct ep_rxd_main -+{ -+ EP_ENVELOPE Envelope; /* 128 byte aligned envelope */ -+ EP_PAYLOAD Payload; /* 128 byte aligned payload */ -+ bitmap_t Bitmap[BT_BITOUL(EP_MAX_NODES)]; /* broadcast bitmap */ -+ EP_STATUSBLK StatusBlk; /* RPC status block to return */ -+ uint64_t Next; /* linked list when on active list (main address) */ -+ int32_t Len; /* Length of message received */ -+} EP_RXD_MAIN; -+ -+#define EP_RXD_MAIN_SIZE roundup (sizeof (EP_RXD_MAIN), EP_BLK_SIZE) -+ -+/* Phases for message/rpc */ -+#ifndef __ELAN__ -+ -+/* Kernel memory portion of per-rail receive descriptor */ -+typedef struct ep_rxd_rail -+{ -+ struct list_head Link; /* linked on freelist */ -+ EP_RCVR_RAIL *RcvrRail; /* rvcr we're associated with */ -+ -+ EP_RXD *Rxd; /* receive descriptor we're bound to */ -+} EP_RXD_RAIL; -+ -+#define RXD_BOUND2RAIL(rxdRail,rcvrRail) ((rxdRail) != NULL && ((EP_RXD_RAIL *) (rxdRail))->RcvrRail == (EP_RCVR_RAIL *) rcvrRail) -+ -+struct ep_rxd -+{ -+ struct list_head Link; /* linked on free/active list */ -+ EP_RCVR *Rcvr; /* owning receiver */ -+ -+ EP_RXD_MAIN *RxdMain; /* shared main memory portion. */ -+ EP_NMD NmdMain; /* and network mapping descriptor */ -+ -+ EP_RXD_RAIL *RxdRail; /* per-rail rxd we're bound to */ -+ -+ EP_RXH *Handler; /* completion function */ -+ void *Arg; /* and arguement */ -+ -+ unsigned int State; /* RXD status (active,stalled,failed) */ -+ -+ EP_NMD Data; /* network mapping descriptor for user buffer */ -+ -+ int nFrags; /* network mapping descriptor for put/get/complete */ -+ EP_NMD Local[EP_MAXFRAG]; -+ EP_NMD Remote[EP_MAXFRAG]; -+ -+ long NextRunTime; /* time to resend failover/map requests */ -+ EP_XID MsgXid; /* and transaction id */ -+ -+#if ! defined(CONFIG_EP_NO_CHECK_SUM) -+ struct list_head CheckSumLink; /* linked on check sum list */ -+#endif -+}; -+ -+#define EP_NUM_RXD_PER_BLOCK 16 -+ -+/* rxd->State */ -+#define EP_RXD_FREE 0 -+ -+#define EP_RXD_RECEIVE_UNBOUND 1 -+#define EP_RXD_RECEIVE_ACTIVE 2 -+ -+#define EP_RXD_PUT_ACTIVE 3 -+#define EP_RXD_PUT_STALLED 4 -+#define EP_RXD_GET_ACTIVE 5 -+#define EP_RXD_GET_STALLED 6 -+ -+#define EP_RXD_COMPLETE_ACTIVE 7 -+#define EP_RXD_COMPLETE_STALLED 8 -+ -+#define EP_RXD_RPC_IN_PROGRESS 9 -+#define EP_RXD_COMPLETED 10 -+ -+#define EP_RXD_BEEN_ABORTED 11 /* rxd was aborted while in a private state */ -+ -+typedef struct ep_rxd_block -+{ -+ struct list_head Link; -+ -+ EP_NMD NmdMain; -+ -+ EP_RXD Rxd[EP_NUM_RXD_PER_BLOCK]; -+} EP_RXD_BLOCK; -+ -+struct ep_rcvr_rail_stats -+{ -+ EP_STATS_COUNT rx; -+ EP_STATS_COUNT rx_len; -+}; -+ -+struct ep_rcvr_rail -+{ -+ EP_RCVR *Rcvr; /* associated receiver */ -+ EP_COMMS_RAIL *CommsRail; /* comms rail */ -+ -+ struct proc_dir_entry *procfs_root; /* root of this rcvr_rail's procfs entry */ -+ EP_RCVR_RAIL_STATS stats; /* generic rcvr_rail stats */ -+}; -+ -+struct ep_rcvr_stats -+{ -+ EP_STATS_COUNT rx; -+ EP_STATS_COUNT rx_len; -+}; -+ -+struct ep_rcvr -+{ -+ struct list_head Link; /* queued on subsystem */ -+ EP_COMMS_SUBSYS *Subsys; /* kernel comms subsystem */ -+ EP_SERVICE Service; /* service number */ -+ -+ unsigned int InputQueueEntries; /* # entries on receive queue */ -+ -+ EP_RAILMASK RailMask; /* bitmap of which rails are available */ -+ EP_RCVR_RAIL *Rails[EP_MAX_RAILS]; -+ -+ spinlock_t Lock; /* spinlock for rails/receive lists */ -+ -+ struct list_head ActiveDescList; /* List of pending/active receive descriptors */ -+ -+ EP_XID_CACHE XidCache; /* XID cache (protected by Lock) */ -+ -+ struct list_head FreeDescList; /* List of free receive descriptors */ -+ unsigned int FreeDescCount; /* and number on free list */ -+ unsigned int TotalDescCount; /* total number created */ -+ spinlock_t FreeDescLock; /* and lock for free list */ -+ kcondvar_t FreeDescSleep; /* with place to sleep for rx desc */ -+ int FreeDescWanted; /* and flag */ -+ struct list_head DescBlockList; -+ -+ unsigned int ForwardRxdCount; /* count of rxd's being forwarded */ -+ unsigned int CleanupWaiting; /* waiting for cleanup */ -+ kcondvar_t CleanupSleep; /* and place to sleep */ -+ -+ struct proc_dir_entry *procfs_root; /* place where this rcvr's proc entry is */ -+ EP_RCVR_STATS stats; -+}; -+ -+#if ! defined(CONFIG_EP_NO_CHECK_SUM) -+#define EP_ENVELOPE_CHECK_SUM (1<<31) -+extern uint32_t ep_calc_check_sum (EP_SYS *sys, EP_ENVELOPE *env, EP_NMD *nmd, int nFrags); -+#endif -+ -+#endif /* ! __ELAN__ */ -+ -+typedef struct ep_txd_main -+{ -+ EP_STATUSBLK StatusBlk; /* RPC status block */ -+ bitmap_t Bitmap[BT_BITOUL(EP_MAX_NODES)]; /* broadcast bitmap */ -+} EP_TXD_MAIN; -+ -+#define EP_TXD_MAIN_SIZE roundup (sizeof (EP_TXD_MAIN), EP_BLK_SIZE) -+ -+#ifndef __ELAN__ -+typedef struct ep_txd_rail -+{ -+ struct list_head Link; /* linked on freelist */ -+ EP_XMTR_RAIL *XmtrRail; /* xmtr we're associated with */ -+ -+ EP_TXD *Txd; /* txd we're bound to */ -+} EP_TXD_RAIL; -+ -+#define TXD_BOUND2RAIL(rxdRail,xmtrRail) ((txdRail) != NULL && ((EP_TXD_RAIL *) (txdRail))->XmtrRail == (EP_XMTR_RAIL *) xmtrRail) -+ -+struct ep_txd -+{ -+ struct list_head Link; /* linked on free/active list */ -+ EP_XMTR *Xmtr; /* service we're associated with */ -+ -+ EP_TXD_MAIN *TxdMain; /* shared main memory portion */ -+ EP_NMD NmdMain; /* and network mapping descriptor */ -+ -+ EP_TXD_RAIL *TxdRail; /* per-rail txd for this phase */ -+ -+ EP_TXH *Handler; /* completion function */ -+ void *Arg; /* and arguement */ -+ -+ unsigned short NodeId; /* node transmit is to. */ -+ EP_SERVICE Service; /* and seervice */ -+ -+ long TimeStamp; /* time we where created at, to find sends taking too long */ -+ long RetryTime; -+ EP_BACKOFF Backoff; -+ -+ EP_ENVELOPE Envelope; /* envelope for transmit */ -+ EP_PAYLOAD Payload; /* payload for transmit */ -+}; -+ -+#define EP_NUM_TXD_PER_BLOCK 16 -+ -+/* "phase" parameter to BindTxd */ -+#define EP_TXD_PHASE_ACTIVE 1 -+#define EP_TXD_PHASE_PASSIVE 2 -+ -+typedef struct ep_txd_block -+{ -+ struct list_head Link; -+ EP_NMD NmdMain; -+ EP_TXD Txd[EP_NUM_TXD_PER_BLOCK]; /* transmit descriptors */ -+} EP_TXD_BLOCK; -+ -+struct ep_xmtr_rail_stats -+{ -+ EP_STATS_COUNT tx; -+ EP_STATS_COUNT tx_len; -+}; -+ -+struct ep_xmtr_rail -+{ -+ EP_COMMS_RAIL *CommsRail; /* associated comms rail */ -+ EP_XMTR *Xmtr; /* associated transmitter */ -+ -+ struct proc_dir_entry *procfs_root; /* place where this xmtr's proc entry is */ -+ -+ EP_XMTR_RAIL_STATS stats; -+}; -+ -+struct ep_xmtr_stats -+{ -+ EP_STATS_COUNT tx; -+ EP_STATS_COUNT tx_len; -+}; -+ -+struct ep_xmtr -+{ -+ struct list_head Link; /* Linked on subsys */ -+ EP_COMMS_SUBSYS *Subsys; /* kernel comms subsystem */ -+ -+ EP_RAILMASK RailMask; /* bitmap of which rails are available */ -+ EP_XMTR_RAIL *Rails[EP_MAX_RAILS]; /* per-rail state */ -+ -+ spinlock_t Lock; /* lock for active descriptor list */ -+ -+ struct list_head ActiveDescList; /* list of active transmit descriptors */ -+ -+ EP_XID_CACHE XidCache; /* XID cache (protected by Lock) */ -+ -+ struct list_head FreeDescList; /* List of free receive descriptors */ -+ unsigned int FreeDescCount; /* and number on free list */ -+ unsigned int TotalDescCount; -+ spinlock_t FreeDescLock; /* and lock for free list */ -+ kcondvar_t FreeDescSleep; /* with place to sleep for rx desc */ -+ int FreeDescWanted; /* and flag */ -+ struct list_head DescBlockList; -+ -+ struct proc_dir_entry *procfs_root; /* place where this rcvr's proc entry is */ -+ EP_XMTR_STATS stats; -+}; -+ -+/* forward descriptor */ -+#define EP_TREE_ARITY 3 -+ -+typedef struct ep_fwd_desc -+{ -+ struct list_head Link; /* linked on forward/free lists */ -+ EP_RXD *Rxd; /* rxd to forward */ -+ EP_NMD Data; /* nmd of subset of receive buffer */ -+ unsigned NumChildren; /* number of places we're forwarding */ -+ unsigned Children[EP_TREE_ARITY]; -+} EP_FWD_DESC; -+ -+typedef struct ep_comms_ops -+{ -+ void (*DelRail) (EP_COMMS_RAIL *rail); -+ void (*DisplayRail) (EP_COMMS_RAIL *rail); -+ -+ struct { -+ void (*AddRail) (EP_RCVR *rcvr, EP_COMMS_RAIL *rail); -+ void (*DelRail) (EP_RCVR *rcvr, EP_COMMS_RAIL *rail); -+ -+ long (*Check) (EP_RCVR_RAIL *rcvrRail, long nextRunTime); -+ -+ int (*QueueRxd) (EP_RXD *rxd, EP_RCVR_RAIL *rcvrRail); -+ void (*RpcPut)(EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags); -+ void (*RpcGet)(EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags); -+ void (*RpcComplete)(EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags); -+ -+ EP_RXD *(*StealRxd)(EP_RCVR_RAIL *rcvrRail); -+ -+ void (*DisplayRcvr) (DisplayInfo *di, EP_RCVR_RAIL *rcvrRail); -+ void (*DisplayRxd) (DisplayInfo *di, EP_RXD_RAIL *rxdRail); -+ -+ void (*FillOutRailStats) (EP_RCVR_RAIL *rcvr_rail, char *str); -+ -+ } Rcvr; -+ -+ struct { -+ void (*AddRail) (EP_XMTR *xmtr, EP_COMMS_RAIL *rail); -+ void (*DelRail) (EP_XMTR *xmtr, EP_COMMS_RAIL *rail); -+ -+ long (*Check) (EP_XMTR_RAIL *xmtrRail, long nextRunTime); -+ -+ int (*BindTxd) (EP_TXD *txd, EP_XMTR_RAIL *xmtrRail, unsigned int phase); -+ void (*UnbindTxd) (EP_TXD *txd, unsigned int phase); -+ int (*PollTxd) (EP_XMTR_RAIL *xmtrRail, EP_TXD_RAIL *txdRail, int how); -+ -+ void (*DisplayXmtr) (DisplayInfo *di, EP_XMTR_RAIL *xmtrRail); -+ void (*DisplayTxd) (DisplayInfo *di, EP_TXD_RAIL *txdRail); -+ -+ int (*CheckTxdState) (EP_TXD *txd); -+ -+ void (*FillOutRailStats) (EP_XMTR_RAIL *xmtr_rail, char *str); -+ -+ } Xmtr; -+} EP_COMMS_OPS; -+ -+#define EP_RAIL_OP(commsRail, Which) (commsRail)->Ops.Which -+#define EP_RCVR_OP(rcvrRail, Which) (rcvrRail)->CommsRail->Ops.Rcvr.Which -+#define EP_XMTR_OP(xmtrRail, Which) (xmtrRail)->CommsRail->Ops.Xmtr.Which -+ -+/* "how" parameter to PollTxd */ -+#define POLL_TX_LIST 0 -+#define ENABLE_TX_CALLBACK 1 -+#define DISABLE_TX_CALLBACK 2 -+ -+struct ep_comms_rail -+{ -+ struct list_head Link; /* Linked on subsys */ -+ EP_RAIL *Rail; /* kernel comms rail */ -+ EP_COMMS_SUBSYS *Subsys; -+ EP_COMMS_OPS Ops; -+ -+ EP_COMMS_RAIL_STATS Stats; /* statistics */ -+}; -+ -+struct ep_comms_subsys -+{ -+ EP_SUBSYS Subsys; /* is a kernel comms subsystem */ -+ -+ kmutex_t Lock; /* global lock */ -+ -+ EP_COMMS_STATS Stats; /* statistics */ -+ -+ struct list_head Rails; /* list of all rails */ -+ -+ struct list_head Receivers; /* list of receivers */ -+ struct list_head Transmitters; /* and transmitters */ -+ -+ /* forward/allocator thread */ -+ EP_KTHREAD Thread; /* place thread sleeps */ -+ -+ /* message passing "broadcast" forward lists */ -+ spinlock_t ForwardDescLock; /* Lock for broadcast forwarding */ -+ struct list_head ForwardDescList; /* List of rxd's to forward */ -+ -+#if ! defined(CONFIG_EP_NO_CHECK_SUM) -+ spinlock_t CheckSumDescLock; /* Lock for CheckSums */ -+ struct list_head CheckSumDescList; /* List of rxd's to be CheckSumed */ -+#endif -+ -+ EP_XMTR *ForwardXmtr; /* and transmitter to forward with */ -+}; -+ -+/* epcomms.c subsystem initialisation */ -+extern unsigned int epcomms_forward_limit; -+ -+extern int ep_comms_init (EP_SYS *sys); -+extern void ep_comms_display (EP_SYS *sys, char *how); -+extern EP_RAILMASK ep_rcvr_railmask (EP_SYS *epsys, EP_SERVICE service); -+ -+/* epcomms_elan3.c */ -+extern EP_COMMS_RAIL *ep3comms_add_rail (EP_SUBSYS *s, EP_SYS *sys, EP_RAIL *rail); -+ -+/* epcomms_elan4.c */ -+extern EP_COMMS_RAIL *ep4comms_add_rail (EP_SUBSYS *s, EP_SYS *sys, EP_RAIL *rail); -+ -+/* epcommsTx.c */ -+extern int TxdShouldStabalise (EP_TXD_RAIL *txdRail, EP_RAIL *rail); -+extern void FreeTxd (EP_XMTR *xmtr, EP_TXD *txd); -+ -+extern unsigned int ep_txd_lowat; -+extern long ep_check_xmtr (EP_XMTR *xmtr, long nextRunTime); -+extern void ep_display_xmtr (DisplayInfo *di, EP_XMTR *xmtr); -+extern void ep_xmtr_flush_callback (EP_XMTR *xmtr, EP_XMTR_RAIL *xmtrRail); -+extern void ep_xmtr_reloc_callback (EP_XMTR *xmtr, EP_XMTR_RAIL *xmtrRail); -+ -+extern void ep_xmtr_fillout_stats (EP_XMTR *xmtr, char *str); -+extern void ep_xmtr_rail_fillout_stats (EP_XMTR_RAIL *xmtr_rail, char *str); -+ -+extern void ep_xmtr_txd_stat (EP_XMTR *xmtr, EP_TXD *txd); -+ -+/* epcommsRx.c */ -+extern EP_RXD *StealRxdFromOtherRail (EP_RCVR *rcvr); -+ -+extern unsigned int ep_rxd_lowat; -+extern long ep_check_rcvr (EP_RCVR *rcvr, long nextRunTime); -+extern void ep_rcvr_flush_callback (EP_RCVR *rcvr, EP_RCVR_RAIL *rcvrRail); -+extern void ep_rcvr_reloc_callback (EP_RCVR *rcvr, EP_RCVR_RAIL *rcvrRail); -+extern void ep_display_rcvr (DisplayInfo *di, EP_RCVR *rcvr, int full); -+ -+extern long ep_forward_rxds (EP_COMMS_SUBSYS *subsys, long nextRunTime); -+ -+extern void ep_rcvr_fillout_stats (EP_RCVR *rcvr, char *str); -+extern void ep_rcvr_rail_fillout_stats (EP_RCVR_RAIL *rcvr_rail, char *str); -+ -+#if ! defined(CONFIG_EP_NO_CHECK_SUM) -+extern void ep_csum_rxds (EP_COMMS_SUBSYS *subsys); -+extern void ep_rxd_queue_csum (EP_RXD *rxd); -+#endif -+ -+extern void ep_rxd_received (EP_RXD *rxd); -+extern void ep_rxd_received_now (EP_RXD *rxd); -+ -+/* ep_procfs.c */ -+extern struct proc_dir_entry *ep_procfs_root; -+ -+extern void ep_procfs_rcvr_xmtr_init(void); -+extern void ep_procfs_rcvr_xmtr_fini(void); -+ -+extern void ep_procfs_rcvr_add(EP_RCVR *rcvr); -+extern void ep_procfs_rcvr_del(EP_RCVR *rcvr); -+ -+extern void ep_procfs_rcvr_add_rail(EP_RCVR_RAIL *rcvrRail); -+extern void ep_procfs_rcvr_del_rail(EP_RCVR_RAIL *rcvrRail); -+ -+extern void ep_procfs_xmtr_add(EP_XMTR *xmtr); -+extern void ep_procfs_xmtr_del(EP_XMTR *xmtr); -+ -+extern void ep_procfs_xmtr_add_rail(EP_XMTR_RAIL *xmtrRail); -+extern void ep_procfs_xmtr_del_rail(EP_XMTR_RAIL *xmtrRail); -+ -+ -+/* Public Interface */ -+ -+ -+/* epcomms.c message xmtr functions */ -+extern EP_XMTR *ep_alloc_xmtr (EP_SYS *sys); -+extern void ep_free_xmtr (EP_XMTR *xmtr); -+ -+extern EP_STATUS ep_transmit_message (EP_XMTR *xmtr, unsigned int dest, EP_SERVICE service, EP_ATTRIBUTE attr, -+ EP_TXH *handler, void *arg, EP_PAYLOAD *payload, -+ EP_NMD *nmd, int nFrag); -+extern EP_STATUS ep_multicast_message (EP_XMTR *xmtr, unsigned int destLo, unsigned int destHi, bitmap_t *bitmap, -+ EP_SERVICE service, EP_ATTRIBUTE attr, EP_TXH *handler, void *arg, -+ EP_PAYLOAD *payload, EP_NMD *nmd, int nFrag); -+extern EP_STATUS ep_transmit_rpc (EP_XMTR *xmtr, unsigned int dest, EP_SERVICE service, EP_ATTRIBUTE attr, -+ EP_TXH *handler, void *arg, EP_PAYLOAD *payload, -+ EP_NMD *nmd, int nFrag); -+extern EP_STATUS ep_multicast_forward (EP_XMTR *xmtr, unsigned int dest, EP_SERVICE service, EP_ATTRIBUTE attr, -+ EP_TXH *handler, void *arg, EP_ENVELOPE *env, EP_PAYLOAD *payload, -+ bitmap_t *bitmap, EP_NMD *nmd, int nFrags); -+ -+/* epcomms.c functions for use with polled transmits */ -+extern int ep_poll_transmits (EP_XMTR *xmtr); -+extern int ep_enable_txcallbacks (EP_XMTR *xmtr); -+extern int ep_disable_txcallbacks (EP_XMTR *xmtr); -+ -+/* epcomms.c message rcvr functions */ -+extern EP_RCVR *ep_alloc_rcvr (EP_SYS *sys, EP_SERVICE svc, unsigned int nenvelopes); -+extern void ep_free_rcvr (EP_RCVR *rcvr); -+ -+extern EP_STATUS ep_queue_receive (EP_RCVR *rcvr, EP_RXH *handler, void *arg, EP_NMD *nmd, EP_ATTRIBUTE attr); -+extern void ep_requeue_receive (EP_RXD *rxd, EP_RXH *handler, void *arg, EP_NMD *nmd, EP_ATTRIBUTE attr); -+extern EP_STATUS ep_rpc_put (EP_RXD *rxd, EP_RXH *handler, void *arg, EP_NMD *from, EP_NMD *to, int nFrags); -+extern EP_STATUS ep_rpc_get (EP_RXD *rxd, EP_RXH *handler, void *arg, EP_NMD *from, EP_NMD *to, int nFrags); -+extern EP_STATUS ep_complete_rpc (EP_RXD *rxd, EP_RXH *handler, void *arg, EP_STATUSBLK *blk, -+ EP_NMD *from, EP_NMD *to, int nFrags); -+extern void ep_complete_receive (EP_RXD *rxd); -+ -+/* railhints.c */ -+extern int ep_xmtr_bcastrail (EP_XMTR *xmtr, EP_RAILMASK allowedRails); -+extern int ep_xmtr_prefrail (EP_XMTR *xmtr, EP_RAILMASK allowedRails, unsigned nodeId); -+extern EP_RAILMASK ep_xmtr_availrails (EP_XMTR *xmtr); -+extern EP_RAILMASK ep_xmtr_noderails (EP_XMTR *xmtr, unsigned nodeId); -+extern int ep_rcvr_prefrail (EP_RCVR *rcvr, EP_RAILMASK allowedRails); -+extern EP_RAILMASK ep_rcvr_availrails (EP_RCVR *rcvr); -+extern EP_RAILMASK ep_rxd_railmask (EP_RXD *rxd); -+ -+/* epcomms.c functions for accessing fields of rxds */ -+extern void *ep_rxd_arg(EP_RXD *rxd); -+extern int ep_rxd_len(EP_RXD *rxd); -+extern EP_STATUS ep_rxd_status(EP_RXD *rxd); -+extern int ep_rxd_isrpc(EP_RXD *rxd); -+extern EP_ENVELOPE *ep_rxd_envelope(EP_RXD *rxd); -+extern EP_PAYLOAD *ep_rxd_payload(EP_RXD *rxd); -+extern int ep_rxd_node(EP_RXD *rxd); -+extern EP_STATUSBLK *ep_rxd_statusblk(EP_RXD *rxd); -+ -+/* functions for accessing fields of txds */ -+extern int ep_txd_node(EP_TXD *txd); -+extern EP_STATUSBLK *ep_txd_statusblk(EP_TXD *txd); -+ -+/* functions for controlling how many processes are using module */ -+extern void ep_mod_dec_usecount (void); -+extern void ep_mod_inc_usecount (void); -+ -+extern EP_RAILMASK ep_xmtr_svc_indicator_railmask (EP_XMTR *xmtr, int svc_indicator, int nodeId); -+extern int ep_xmtr_svc_indicator_bitmap (EP_XMTR *xmtr, int svc_indicator, bitmap_t * bitmap, int low, int nnodes); -+ -+#endif /* ! __ELAN__ */ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -+#endif /* __ELAN_EPCOMMS_H */ -+ -diff -urN clean/include/elan/epsvc.h linux-2.6.9/include/elan/epsvc.h ---- clean/include/elan/epsvc.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan/epsvc.h 2004-02-13 05:03:27.000000000 -0500 -@@ -0,0 +1,36 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __ELAN_EPSVC_H -+#define __ELAN_EPSVC_H -+ -+#ident "@(#)$Id: epsvc.h,v 1.9 2004/02/13 10:03:27 david Exp $" -+/* $Source: /cvs/master/quadrics/epmod/epsvc.h,v $ */ -+ -+ -+#define EP_SVC_NUM_INDICATORS 8 -+#define EP_SVC_INDICATOR_MAX_NAME 32 -+ -+#define EP_SVC_EIP 0 -+#define EP_SVC_NAMES {"eip", "1", "2", "3", "4", "5", "6", "7"}; -+ -+#if defined(__KERNEL__) -+extern int ep_svc_indicator_set (EP_SYS *epsys, int svc_indicator); -+extern int ep_svc_indicator_clear (EP_SYS *epsys, int svc_indicator); -+extern int ep_svc_indicator_is_set (EP_SYS *epsys, int svc_indicator, int nodeId); -+extern int ep_svc_indicator_bitmap (EP_SYS *epsys, int svc_indicator, bitmap_t * bitmap, int low, int nnodes); -+extern EP_RAILMASK ep_svc_indicator_railmask (EP_SYS *epsys, int svc_indicator, int nodeId); -+#endif -+ -+#endif /* __ELAN_EPSVC_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/include/elan/kalloc.h linux-2.6.9/include/elan/kalloc.h ---- clean/include/elan/kalloc.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan/kalloc.h 2004-05-19 06:23:59.000000000 -0400 -@@ -0,0 +1,108 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __ELAN3_KALLOC_H -+#define __ELAN3_KALLOC_H -+ -+#ident "$Id: kalloc.h,v 1.11 2004/05/19 10:23:59 david Exp $" -+/* $Source: /cvs/master/quadrics/epmod/kalloc.h,v $ */ -+ -+#include -+ -+/* -+ * Memory allocator -+ */ -+#define LN2_MIN_SIZE 6 /* 64 bytes */ -+#define LN2_MAX_SIZE 16 /* 64k bytes */ -+#define NUM_FREELISTS (LN2_MAX_SIZE-LN2_MIN_SIZE + 1) -+#define MIN_SIZE (1 << LN2_MIN_SIZE) -+#define MAX_SIZE (1 << LN2_MAX_SIZE) -+ -+#define HASHSHIFT LN2_MAX_SIZE -+#define NHASH 32 -+#define HASH(addr) (((addr) >> HASHSHIFT) & (NHASH-1)) -+ -+typedef enum -+{ -+ EP_ALLOC_TYPE_PRIVATE_SDRAM, -+ EP_ALLOC_TYPE_PRIVATE_MAIN, -+ EP_ALLOC_TYPE_SHARED_MAIN, -+} EP_ALLOC_TYPE; -+ -+typedef struct ep_pool -+{ -+ EP_NMH Handle; /* network mapping handle */ -+ -+ struct list_head HashBase; /* linked on hash lists */ -+ struct list_head HashTop; /* linked on hash lists */ -+ -+ struct list_head Link[NUM_FREELISTS]; /* linked on free lists */ -+ bitmap_t *Bitmaps[NUM_FREELISTS]; /* bitmaps for each size */ -+ -+ union { -+ sdramaddr_t Sdram; -+ unsigned long Ptr; -+ } Buffer; -+} EP_POOL; -+ -+typedef struct ep_alloc -+{ -+ spinlock_t Lock; -+ -+ EP_ALLOC_TYPE Type; -+ unsigned int Perm; -+ -+ EP_RMAP *ResourceMap; -+ -+ struct list_head HashBase[NHASH]; -+ struct list_head HashTop[NHASH]; -+ struct list_head Freelists[NUM_FREELISTS]; -+ -+ union { -+ struct { -+ EP_SYS *System; -+ struct list_head Rails; -+ } Shared; -+ -+ struct { -+ EP_RAIL *Rail; -+ } Private; -+ } Data; -+} EP_ALLOC; -+ -+extern void ep_display_alloc (EP_ALLOC *alloc); -+ -+extern void ep_alloc_init (EP_RAIL *rail); -+extern void ep_alloc_fini (EP_RAIL *rail); -+ -+extern sdramaddr_t ep_alloc_memory_elan (EP_RAIL *rail, EP_ADDR addr, unsigned size, unsigned int perm, EP_ATTRIBUTE attr); -+extern void ep_free_memory_elan (EP_RAIL *rail, EP_ADDR addr); -+ -+extern sdramaddr_t ep_alloc_elan (EP_RAIL *rail, unsigned size, EP_ATTRIBUTE attr, EP_ADDR *addrp); -+extern void ep_free_elan (EP_RAIL *rail, EP_ADDR addr, unsigned size); -+extern void *ep_alloc_main (EP_RAIL *rail, unsigned size, EP_ATTRIBUTE attr, EP_ADDR *addr); -+extern void ep_free_main (EP_RAIL *rail, EP_ADDR addr, unsigned size); -+ -+extern sdramaddr_t ep_elan2sdram (EP_RAIL *rail, EP_ADDR addr); -+extern void *ep_elan2main (EP_RAIL *rail, EP_ADDR addr); -+ -+extern void ep_shared_alloc_init (EP_SYS *sys); -+extern void ep_shared_alloc_fini (EP_SYS *sys); -+extern int ep_shared_alloc_add_rail (EP_SYS *sys, EP_RAIL *rail); -+extern void ep_shared_alloc_remove_rail (EP_SYS *sys, EP_RAIL *rail); -+ -+extern void *ep_shared_alloc_main (EP_SYS *sys, unsigned size, EP_ATTRIBUTE attr, EP_NMD *nmd); -+extern void ep_shared_free_main (EP_SYS *sys, EP_NMD *nmd); -+ -+#endif /* __ELAN_KALLOC_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/include/elan/kcomm.h linux-2.6.9/include/elan/kcomm.h ---- clean/include/elan/kcomm.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan/kcomm.h 2005-04-05 12:36:28.000000000 -0400 -@@ -0,0 +1,831 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __ELAN_KCOMM_H -+#define __ELAN_KCOMM_H -+ -+#ident "$Id: kcomm.h,v 1.82 2005/04/05 16:36:28 mike Exp $" -+/* $Source: /cvs/master/quadrics/epmod/kcomm.h,v $*/ -+#define EP_KCOMM_MAJOR_VERSION 3 -+#define EP_KCOMM_MINOR_VERSION 1 -+ -+#define EP_PROTOCOL_VERSION 1 /* CM/KCOMM protocol revision */ -+ -+#define EP_MAX_NODES 2048 /* Max nodes we support */ -+#define EP_MAX_RAILS 16 /* max number of rails (we use an unsigned short for bitmaps !) */ -+#define EP_MAXFRAG 4 /* max number of fragments */ -+ -+#define EP_BLK_SIZE 64 /* align objects for elan access */ -+ -+/* Elan virtual address address space */ -+#define EP_SYSTEM_QUEUE_BASE 0x00010000 /* Base address for system queues */ -+#define EP_MSGSYS_QUEUE_BASE 0x00020000 /* Base address for msgsys queues */ -+#define EP_EPCOMMS_QUEUE_BASE 0x00030000 /* Base address for message queues */ -+#define EP_DVMA_BASE 0x10000000 /* elan address range for dvma mapping. */ -+#define EP_DVMA_TOP 0xE0000000 -+ -+#define EP_SHARED_BASE 0xE0000000 /* shared main/elan allocators */ -+#define EP_SHARED_TOP 0xF0000000 -+ -+#define EP_PRIVATE_BASE 0xF0000000 /* private main/elan allocators */ -+#define EP_PRIVATE_TOP 0xF8000000 -+ -+#define EP_DVMA_RMAP_SIZE 1024 /* size of resource map for dvma address space */ -+#define EP_SHARED_RMAP_SIZE 1024 /* size of resource map for shared address space */ -+#define EP_PRIVATE_RMAP_SIZE 1024 /* size of resource map for private address space */ -+ -+/* Input queue descriptors fit into 64 bytes */ -+#define EP_QUEUE_DESC_SIZE 64 -+ -+/* Timeouts for checking network position */ -+#define EP_POSITION_TIMEOUT (4*HZ) /* 1s time to notice CheckNetworkPosition changes */ -+#define EP_WITHDRAW_TIMEOUT (2*HZ) /* 2s time before withdrawing from unreachable nodes */ -+ -+/* Time to try again due to resource failue (eg malloc etc) */ -+#define RESOURCE_RETRY_TIME (HZ/20) -+ -+/* Time to retransmit message when send failed */ -+#define MSGBUSY_RETRY_TIME (HZ/20) -+ -+/* Time between retransmits of messages network flush requests */ -+#define MESSAGE_RETRY_TIME (HZ/5) -+ -+/* time to hold the context filter up to ensure that the -+ * next packet of a dma is guaranteed to get nacked (8mS) */ -+#define NETWORK_ERROR_TIMEOUT (1 + roundup (HZ * 8 / 1000, 1)) -+ -+/* Time between retransmits of message failover requests */ -+#define FAILOVER_RETRY_TIME (HZ/5) -+ -+/* compute earliest time */ -+#define SET_NEXT_RUN_TIME(nextRunTime, time) \ -+do { \ -+ if ((nextRunTime) == 0 || AFTER(nextRunTime, (time)))\ -+ (nextRunTime) = (time);\ -+} while (0) -+ -+/* DMA retry backoff/priorities/issue rings */ -+#define EP_NUM_BACKOFF 8 -+#define EP_RETRY_STABALISING 0 -+#define EP_RETRY_BASE 1 -+ -+#define EP_RETRY_CRITICAL EP_RETRY_BASE -+#define EP_RETRY_HIGH_PRI (EP_RETRY_CRITICAL + 1) -+#define EP_RETRY_HIGH_PRI_TIME (1) -+#define EP_RETRY_HIGH_PRI_RETRY (EP_RETRY_HIGH_PRI + 1) -+#define EP_RETRY_HIGH_PRI_RETRY_TIME (2) -+#define EP_RETRY_LOW_PRI (EP_RETRY_HIGH_PRI_RETRY + EP_NUM_BACKOFF) -+#define EP_RETRY_LOW_PRI_TIME (2) -+#define EP_RETRY_LOW_PRI_RETRY (EP_RETRY_LOW_PRI + 1) -+#define EP_RETRY_LOW_PRI_RETRY_TIME (4) -+#define EP_RETRY_ANONYMOUS (EP_RETRY_LOW_PRI_RETRY + EP_NUM_BACKOFF) -+#define EP_RETRY_ANONYMOUS_TIME (10) -+#define EP_RETRY_NETERR (EP_RETRY_ANONYMOUS + EP_NUM_BACKOFF) -+#define EP_RETRY_NETERR_TIME (10) -+#define EP_NUM_RETRIES (EP_RETRY_NETERR + 1) -+ -+typedef unsigned short EP_SERVICE; -+ -+/* EP_ATTRIBUTE 32 bits -+ * -+ * 0-2 -+ * for initial call :- -+ * 0 (0x1) EP_NO_ALLOC used once -+ * 1 (0x2) EP_NO_SLEEP used once -+ * 2 (0x4) EP_NOT_MYSELF used once -+ * -+ * when stored and transmited :- -+ * 0 (0x0) EP_MULTICAST envelope -+ * 1 (0x2) EP_RPC envelope -+ * 2 (0x4) EP_HAS_PAYLOAD envelope -+ * -+ * 3-11 -+ * 3 (0x08) EP_PREFRAIL_SET preserved -+ * 4-7 (0xf0) Pref Rail -+ * 8 (0x100) EP_NO_INTERUPT -+ * 9 (0x200) EP_NO_FAILOVER -+ * -+ * 10 (0x400) EP_INTERRUPT_ENABLED internal -+ * 11 (0x800) EP_TXD_STABALISING internal -+ * -+ * 12-13 Not Used. -+ * -+ * 14-15 (0xC000) Data Type. passed in -+ * 00 none. -+ * 01 Service Indicator. -+ * 10 TimeOut. -+ * 11 RailMask -+ * -+ * 16-31 (0x10000) Data. Service Indicator, TimeOut, RailMask, Pref Rail. -+ * -+*/ -+ -+typedef uint32_t EP_ATTRIBUTE; -+ -+#define EP_LOCAL_ATTR_MASK 0x07 -+#define EP_CLEAR_LOCAL_ATTR(ATTR) ( (ATTR) & ~EP_LOCAL_ATTR_MASK ) -+ -+#define EP_NO_ALLOC 0x01 /* Don't call allocators if no free descriptors */ -+#define EP_NO_SLEEP 0x02 /* Don't sleep if no free descriptors */ -+#define EP_NOT_MYSELF 0x04 /* Don't send multicast to me */ -+ -+#define EP_MULTICAST 0x01 /* Message is a multicast */ -+#define EP_RPC 0x02 /* Wait for RPC reply */ -+#define EP_HAS_PAYLOAD_BIT 0x04 /* transfer payload */ -+ -+ -+#define EP_PREFRAIL_SET 0x08 /* preferred rail is set (otherwise pick one from the NMDs) */ -+ -+#define EP_PREFRAIL_SHIFT (4) -+#define EP_PREFRAIL_MASK 0xf0 -+#define EP_IS_PREFRAIL_SET(ATTR) (((ATTR) & EP_PREFRAIL_SET) != 0) -+#define EP_CLEAR_PREFRAIL(ATTR) (((ATTR) & ~EP_PREFRAIL_SET) & ~EP_PREFRAIL_MASK) -+#define EP_SET_PREFRAIL(ATTR,RAIL) (EP_CLEAR_PREFRAIL(ATTR) | (((RAIL) << EP_PREFRAIL_SHIFT ) & EP_PREFRAIL_MASK ) | EP_PREFRAIL_SET) -+ -+ -+#define EP_ATTR2PREFRAIL(ATTR) (((ATTR) & EP_PREFRAIL_MASK) >> EP_PREFRAIL_SHIFT) -+ -+ -+#define EP_INTERRUPT_ENABLED 0x400 /* event interrupt enabled on EP_NO_INTERRUPT */ -+#define EP_TXD_STABALISING 0x800 /* flag to indicate this is attempting to stabalise */ -+ -+#define EP_IS_MULTICAST(ATTR) (((ATTR) & EP_MULTICAST) != 0) -+#define EP_SET_MULTICAST(ATTR) ( (ATTR) | EP_MULTICAST) -+#define EP_CLEAR_MULTICAST(ATTR) ( (ATTR) & ~EP_MULTICAST) -+ -+#define EP_IS_RPC(ATTR) (((ATTR) & EP_RPC) != 0) -+#define EP_SET_RPC(ATTR) ( (ATTR) | EP_RPC) -+#define EP_CLEAR_RPC(ATTR) ( (ATTR) & ~EP_RPC) -+ -+#define EP_HAS_PAYLOAD(ATTR) (((ATTR) & EP_HAS_PAYLOAD_BIT) != 0) -+#define EP_SET_HAS_PAYLOAD(ATTR) ( (ATTR) | EP_HAS_PAYLOAD_BIT) -+#define EP_CLEAR_HAS_PAYLOAD(ATTR) ( (ATTR) & ~EP_HAS_PAYLOAD_BIT) -+ -+#define EP_IS_INTERRUPT_ENABLED(ATTR) (((ATTR) & EP_INTERRUPT_ENABLED) != 0) -+#define EP_SET_INTERRUPT_ENABLED(ATTR) ( (ATTR) | EP_INTERRUPT_ENABLED) -+#define EP_CLEAR_INTERRUPT_ENABLED(ATTR) ( (ATTR) & ~EP_INTERRUPT_ENABLED) -+ -+#define EP_IS_TXD_STABALISING(ATTR) (((ATTR) & EP_TXD_STABALISING) != 0) -+#define EP_SET_TXD_STABALISING(ATTR) ( (ATTR) | EP_TXD_STABALISING) -+#define EP_CLEAR_TXD_STABALISING(ATTR) ( (ATTR) & ~EP_TXD_STABALISING) -+ -+#define EP_NO_INTERRUPT 0x100 /* Don't generate completion interrupt (tx) */ -+#define EP_NO_FAILOVER 0x200 /* don't attempt rail failover, just abort */ -+ -+#define EP_IS_NO_INTERRUPT(ATTR) (((ATTR) & EP_NO_INTERRUPT) != 0) -+#define EP_SET_NO_INTERRUPT(ATTR) ( (ATTR) | EP_NO_INTERRUPT) -+#define EP_CLEAR_NO_INTERRUPT(ATTR) ( (ATTR) & ~EP_NO_INTERRUPT) -+ -+#define EP_IS_NO_FAILOVER(ATTR) (((ATTR) & EP_NO_FAILOVER) != 0) -+#define EP_SET_NO_FAILOVER(ATTR) ( (ATTR) | EP_NO_FAILOVER) -+#define EP_CLEAR_NO_FAILOVER(ATTR) ( (ATTR) & ~EP_NO_FAILOVER) -+ -+#define EP_TYPE_MASK 0xC000 -+#define EP_TYPE_SVC_INDICATOR 0x4000 -+#define EP_TYPE_TIMEOUT 0x8000 -+#define EP_TYPE_RAILMASK 0xC000 -+ -+#define EP_ATTR2TYPE(ATTR) ( (ATTR) & EP_TYPE_MASK ) -+ -+#define EP_IS_SVC_INDICATOR(ATTR) (EP_ATTR2TYPE(ATTR) == EP_TYPE_SVC_INDICATOR) -+#define EP_IS_TIMEOUT(ATTR) (EP_ATTR2TYPE(ATTR) == EP_TYPE_TIMEOUT) -+#define EP_IS_RAILMASK(ATTR) (EP_ATTR2TYPE(ATTR) == EP_TYPE_RAILMASK) -+#define EP_IS_NO_TYPE(ATTR) (EP_ATTR2TYPE(ATTR) == 0) -+ -+#define EP_DATA_SHIFT (16) -+#define EP_DATA_MASK 0xffff0000 -+ -+#define EP_ATTR2DATA(ATTR) (((ATTR) & EP_DATA_MASK) >> EP_DATA_SHIFT) -+#define EP_DATA2ATTR(DATA) (((DATA) << EP_DATA_SHIFT) & EP_DATA_MASK) -+ -+#define EP_CLEAR_DATA(ATTR) (((ATTR) & ~EP_TYPE_MASK) & ~EP_DATA_MASK) -+#define EP_SET_DATA(ATTR,TYPE,DATA) (EP_CLEAR_DATA(ATTR) | ((TYPE) & EP_TYPE_MASK) | (((DATA) << EP_DATA_SHIFT) & EP_DATA_MASK)) -+ -+#define EP_DEFAULT_TIMEOUT (HZ*30) -+ -+#if !defined(offsetof) -+#define offsetof(s, m) (unsigned long)(&(((s *)0)->m)) -+#endif -+#if !defined(roundup) -+#define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) -+#endif -+ -+/* -+ * Message transaction ID's - these are unique 64 bts -+ * numbers which include the initial rail number. -+ */ -+typedef struct ep_xid -+{ -+ uint32_t Generation; -+ uint32_t Handle; -+ uint64_t Unique; -+} EP_XID; -+ -+#define EP_INVALIDATE_XID(xid) ((xid).Generation = (xid).Handle = (xid).Unique = 0) -+ -+#define EP_XID_INVALID(xid) ((xid).Generation == 0 && (xid).Handle == 0 && (xid).Unique == 0) -+#define EP_XIDS_MATCH(a,b) ((a).Generation == (b).Generation && (a).Handle == (b).Handle && (a).Unique == (b).Unique) -+ -+typedef struct ep_backoff -+{ -+ unsigned char type; -+ unsigned char indx; -+ unsigned short count; -+} EP_BACKOFF; -+ -+/* values for "type" */ -+#define EP_BACKOFF_FREE 0 -+#define EP_BACKOFF_ENVELOPE 1 -+#define EP_BACKOFF_FETCH 2 -+#define EP_BACKOFF_DATA 3 -+#define EP_BACKOFF_DONE 4 -+#define EP_BACKOFF_STABILISE 5 -+ -+#ifndef __ELAN__ -+ -+/* forward declaration of types */ -+typedef struct ep_rail EP_RAIL; -+typedef struct ep_sys EP_SYS; -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+typedef struct ep_callback -+{ -+ struct ep_callback *Next; -+ void (*Routine)(void *, statemap_t *); -+ void *Arg; -+} EP_CALLBACK; -+ -+#define EP_CB_FLUSH_FILTERING 0 -+#define EP_CB_FLUSH_FLUSHING 1 -+#define EP_CB_PASSIVATED 2 -+#define EP_CB_FAILOVER 3 -+#define EP_CB_DISCONNECTING 4 -+#define EP_CB_DISCONNECTED 5 -+#define EP_CB_NODESET 6 -+#define EP_CB_COUNT 7 -+ -+#endif /* !defined(__ELAN__) */ -+ -+/* Small unreliable system message queues */ -+#define EP_SYSTEMQ_INTR 0 /* input queue for cluster membership generating an interrupt */ -+#define EP_SYSTEMQ_POLLED 1 /* input queue for cluster membership polled on clock tick */ -+#define EP_SYSTEMQ_MANAGER 2 /* input queue for manager messages */ -+#define EP_NUM_SYSTEMQ 64 -+ -+#define EP_SYSTEMQ_ADDR(qnum) (EP_SYSTEM_QUEUE_BASE + (qnum) * EP_QUEUE_DESC_SIZE) -+#define EP_SYSTEMQ_DESC(base,qnum) ((base) + (qnum) * EP_QUEUE_DESC_SIZE) -+ -+#define EP_SYSTEMQ_MSG_ALIGN 64 /* message sizes aligned to 64 byte boundaries */ -+#define EP_SYSTEMQ_MSG_MAX (4*64) /* max message size */ -+ -+/* Special flag for Version field to indicate message not -+ * seen in main memory yet and time limit to poll for it */ -+#define EP_SYSTEMQ_UNRECEIVED 0xdeadbabe -+#define EP_SYSTEMQ_UNRECEIVED_TLIMIT 16384 /* 1023 uS */ -+ -+#ifndef __ELAN__ -+ -+typedef void (EP_INPUTQ_HANDLER) (EP_RAIL *rail, void *arg, void *msg); -+typedef void (EP_INPUTQ_CALLBACK) (EP_RAIL *rail, void *arg); -+ -+typedef struct ep_inputq -+{ -+ unsigned long q_hidden; /* implementation hidden as ep3 or ep4 */ -+} EP_INPUTQ; -+ -+typedef struct ep_outputq -+{ -+ unsigned long q_hidden; /* implementation hidden as ep3 or ep4 */ -+} EP_OUTPUTQ; -+ -+/* returned values for ep_outputq_state */ -+#define EP_OUTPUTQ_BUSY 0 -+#define EP_OUTPUTQ_FAILED 1 -+#define EP_OUTPUTQ_FINISHED 2 -+ -+typedef struct ep_switch -+{ -+ unsigned present:1; -+ unsigned invalid:1; -+ unsigned link:3; -+ unsigned bcast:3; -+ unsigned lnr; -+} EP_SWITCH; -+ -+/* -+ * Network error fixup, flush, relocation messges -+ */ -+typedef struct ep_map_nmd_body -+{ -+ uint32_t nFrags; -+ EP_RAILMASK Railmask; -+ EP_NMD Nmd[EP_MAXFRAG]; -+} EP_MAP_NMD_BODY; -+ -+typedef struct ep_failover_body -+{ -+ EP_XID Xid; -+ EP_RAILMASK Railmask; -+} EP_FAILOVER_BODY; -+ -+typedef struct ep_failover_txd -+{ -+ EP_XID Xid; -+ uint32_t Rail; -+ EP_ADDR TxdRail; -+} EP_FAILOVER_TXD; -+ -+typedef uint64_t EP_NETERR_COOKIE; -+ -+#define EP_PANIC_STRLEN 31 -+ -+typedef struct ep_node_state -+{ -+ unsigned char State; -+ unsigned char NetworkErrorState; -+ EP_RAILMASK Railmask; -+} EP_NODE_STATE; -+ -+#define EP_MANAGER_MSG_SIZE (2 * EP_SYSTEMQ_MSG_ALIGN) -+ -+typedef struct ep_manager_msg_hdr -+{ -+ EP_XID Xid; /* Message transaction id */ -+ -+ uint16_t NodeId; /* Originating node number */ -+ uint16_t DestId; /* destination node id */ -+ -+ uint16_t Checksum; /* Message checksum */ -+ uint8_t Rail; /* Rail message associated with */ -+ uint8_t Type; /* Message type */ -+ -+ uint32_t Pad; /* pad to 32 bytes */ -+ -+ uint32_t Version; /* Message Version */ -+} EP_MANAGER_MSG_HDR; -+ -+typedef union ep_manager_msg_body -+{ -+ unsigned char Space[EP_MANAGER_MSG_SIZE - sizeof (EP_MANAGER_MSG_HDR)]; -+ -+ EP_NETERR_COOKIE Cookies[2]; /* EP_MSG_TYPE_NETERR */ -+ EP_MAP_NMD_BODY MapNmd; /* EP_MSG_TYPE_MAP_NMD */ -+ EP_FAILOVER_BODY Failover; /* EP_MSG_TYPE_FAILOVER_REQUEST */ -+ EP_FAILOVER_TXD FailoverTxd; /* EP_MSG_TYPE_FAILOVER_RESPONSE */ -+ unsigned char PanicReason[EP_PANIC_STRLEN+1]; /* EP_MSG_TYPE_REMOTE_PANIC */ -+ EP_NODE_STATE NodeState; /* EP_MSG_TYPE_GET_NODE_STATE_RESPONSE */ -+ EP_SERVICE Service; /* EP_MSG_TYPE_GET_NODE_STATE */ -+} EP_MANAGER_MSG_BODY; -+ -+typedef struct ep_manager_msg -+{ -+ EP_MANAGER_MSG_BODY Body; -+ EP_MANAGER_MSG_HDR Hdr; -+} EP_MANAGER_MSG; -+ -+#define EP_MANAGER_MSG_VERSION 0xcad01000 -+#define EP_MANAGER_MSG_TYPE_REMOTE_PANIC 0x00 -+#define EP_MANAGER_MSG_TYPE_NETERR_REQUEST 0x01 -+#define EP_MANAGER_MSG_TYPE_NETERR_RESPONSE 0x02 -+#define EP_MANAGER_MSG_TYPE_FLUSH_REQUEST 0x03 -+#define EP_MANAGER_MSG_TYPE_FLUSH_RESPONSE 0x04 -+#define EP_MANAGER_MSG_TYPE_MAP_NMD_REQUEST 0x05 -+#define EP_MANAGER_MSG_TYPE_MAP_NMD_RESPONSE 0x06 -+#define EP_MANAGER_MSG_TYPE_FAILOVER_REQUEST 0x07 -+#define EP_MANAGER_MSG_TYPE_FAILOVER_RESPONSE 0x08 -+#define EP_MANAGER_MSG_TYPE_GET_NODE_STATE 0x09 -+#define EP_MANAGER_MSG_TYPE_GET_NODE_STATE_RESPONSE 0x0a -+ -+/* Message types which should only be sent when a rail is connected */ -+#define EP_MANAGER_MSG_TYPE_CONNECTED(type) (((type) & 1) == 1) -+ -+#define EP_MANAGER_OUTPUTQ_SLOTS 128 /* # entries in outputq */ -+#define EP_MANAGER_INPUTQ_SLOTS 128 /* # entries in inputq */ -+#define EP_MANAGER_OUTPUTQ_RETRIES 31 /* # retries for manager messages */ -+ -+/* XID's are allocated from a cache, which doesn't -+ * require locking since it relies on the caller to -+ * manage the locking for us. -+ */ -+typedef struct ep_xid_cache -+{ -+ struct list_head Link; -+ -+ uint32_t Handle; /* my XID cache handle */ -+ uint64_t Current; /* range of XID.Unique we can allocate from */ -+ uint64_t Last; -+ -+ void (*MessageHandler)(void *arg, EP_MANAGER_MSG *); -+ void *Arg; -+} EP_XID_CACHE; -+ -+#define EP_XID_CACHE_CHUNKS (10000) -+ -+typedef struct ep_node_rail -+{ -+ struct list_head Link; /* can be linked on work lists */ -+ -+ unsigned char State; /* node connection state */ -+ unsigned char NetworkErrorState; /* reasons for keeping the context filter up */ -+ unsigned char MessageState; /* state of messages during passivate/relocate */ -+ -+ EP_XID MsgXid; /* neterr/flush transaction id */ -+ long NextRunTime; /* time to drop context filter for destroyed dma packet, or to send next request */ -+ EP_NETERR_COOKIE NetworkErrorCookies[2]; /* identify cookie for destroyed atomic packet */ -+ -+ uint32_t Cookie; /* per-node network error cookie */ -+ spinlock_t CookieLock; /* and spinlock for it. */ -+ -+ struct list_head StalledDmas; /* list of stalled DMAs */ -+} EP_NODE_RAIL; -+ -+#define EP_NODE_DISCONNECTED 0 /* node is disconnected */ -+#define EP_NODE_CONNECTING 1 /* awaiting connection */ -+#define EP_NODE_CONNECTED 2 /* node is connected */ -+#define EP_NODE_LEAVING_CONNECTED 3 /* node is starting to disconnect */ -+#define EP_NODE_LOCAL_PASSIVATE 4 /* flushing context filter/run queues */ -+#define EP_NODE_REMOTE_PASSIVATE 5 /* stalling for neterr flush */ -+#define EP_NODE_PASSIVATED 6 /* relocating active/passive messages */ -+#define EP_NODE_DISCONNECTING 7 /* entering disconncted - abort remaining comms */ -+#define EP_NODE_NUM_STATES 8 -+ -+#define EP_NODE_NETERR_ATOMIC_PACKET (1 << 0) -+#define EP_NODE_NETERR_DMA_PACKET (1 << 1) -+ -+#define EP_NODE_PASSIVE_MESSAGES (1 << 0) -+#define EP_NODE_ACTIVE_MESSAGES (1 << 1) -+ -+/* -+ * Kernel thread code is loaded as a table. -+ */ -+typedef struct ep_symbol -+{ -+ char *name; -+ EP_ADDR value; -+} EP_SYMBOL; -+ -+typedef struct ep_code -+{ -+ u_char *text; -+ u_int text_size; -+ u_char *data; -+ u_int data_size; -+ u_char *rodata; -+ u_int rodata_size; -+ EP_SYMBOL *symbols; -+ -+ int ntext; -+ sdramaddr_t pptext; -+ EP_ADDR etext; -+ sdramaddr_t _stext; -+ sdramaddr_t _rodata; -+ -+ int ndata; -+ sdramaddr_t ppdata; -+ EP_ADDR edata; -+ sdramaddr_t _sdata; -+} EP_CODE; -+ -+typedef struct ep_switchstate -+{ -+ unsigned char linkid; -+ unsigned char LNR; -+ unsigned char bcast; -+ unsigned char uplink; -+} EP_SWITCHSTATE; -+ -+typedef struct ep_rail_ops -+{ -+ void (*DestroyRail) (EP_RAIL *rail); -+ -+ int (*StartRail) (EP_RAIL *rail); -+ void (*StallRail) (EP_RAIL *rail); -+ void (*StopRail) (EP_RAIL *rail); -+ -+ sdramaddr_t (*SdramAlloc) (EP_RAIL *rail, EP_ADDR addr, unsigned size); -+ void (*SdramFree) (EP_RAIL *rail, sdramaddr_t addr, unsigned size); -+ void (*SdramWriteb) (EP_RAIL *rail, sdramaddr_t addr, unsigned char val); -+ -+ void (*KaddrMap) (EP_RAIL *rail, EP_ADDR eaddr, virtaddr_t kaddr, unsigned len, unsigned int perm, int ep_attr); -+ void (*SdramMap) (EP_RAIL *rail, EP_ADDR eaddr, sdramaddr_t saddr, unsigned len, unsigned int perm, int ep_attr); -+ void (*Unmap) (EP_RAIL *rail, EP_ADDR eaddr, unsigned len); -+ -+ void *(*DvmaReserve) (EP_RAIL *rail, EP_ADDR eaddr, unsigned npages); -+ void (*DvmaRelease) (EP_RAIL *rail, EP_ADDR eaddr, unsigned npages, void *private); -+ void (*DvmaSetPte) (EP_RAIL *rail, void *private, unsigned index, physaddr_t phys, unsigned int perm); -+ physaddr_t (*DvmaReadPte) (EP_RAIL *rail, void *private, unsigned index); -+ void (*DvmaUnload)(EP_RAIL *rail, void *private, unsigned index, unsigned npages); -+ void (*FlushTlb) (EP_RAIL *rail); -+ -+ int (*ProbeRoute) (EP_RAIL *r, int level, int sw, int nodeid, int *linkup, -+ int *linkdown, int attempts, EP_SWITCH *lsw); -+ void (*PositionFound) (EP_RAIL *rail, ELAN_POSITION *pos); -+ int (*CheckPosition) (EP_RAIL *rail); -+ void (*NeterrFixup) (EP_RAIL *rail, unsigned int nodeId, EP_NETERR_COOKIE *cookies); -+ -+ void (*LoadSystemRoute) (EP_RAIL *rail, unsigned int vp, unsigned int lowNode, unsigned int highNode); -+ -+ void (*LoadNodeRoute) (EP_RAIL *rail, unsigned nodeId); -+ void (*UnloadNodeRoute) (EP_RAIL *rail, unsigned nodeId); -+ void (*LowerFilter) (EP_RAIL *rail, unsigned nodeId); -+ void (*RaiseFilter) (EP_RAIL *rail, unsigned nodeId); -+ void (*NodeDisconnected) (EP_RAIL *rail, unsigned nodeId); -+ -+ void (*FlushFilters) (EP_RAIL *rail); -+ void (*FlushQueues) (EP_RAIL *rail); -+ -+ -+ EP_INPUTQ *(*AllocInputQ) (EP_RAIL *rail, unsigned qnum, unsigned slotSize, unsigned slotCount, -+ void (*callback)(EP_RAIL *rail, void *arg), void *arg); -+ void (*FreeInputQ) (EP_RAIL *rail, EP_INPUTQ *q); -+ void (*EnableInputQ) (EP_RAIL *rail, EP_INPUTQ *q); -+ void (*DisableInputQ) (EP_RAIL *rail, EP_INPUTQ *q); -+ int (*PollInputQ) (EP_RAIL *rail, EP_INPUTQ *q, int maxCount, EP_INPUTQ_HANDLER *handler, void *arg); -+ -+ EP_OUTPUTQ *(*AllocOutputQ) (EP_RAIL *rail, unsigned slotSize, unsigned slotCount); -+ void (*FreeOutputQ) (EP_RAIL *rail, EP_OUTPUTQ *outputq); -+ void *(*OutputQMsg) (EP_RAIL *rail, EP_OUTPUTQ *outputq, unsigned slotNum); -+ int (*OutputQState) (EP_RAIL *rail, EP_OUTPUTQ *outputq, unsigned slotNum); -+ int (*OutputQSend) (EP_RAIL *rail, EP_OUTPUTQ *outputq, unsigned slotNum, unsigned size, -+ unsigned vp, unsigned qnum, unsigned retries); -+ -+ void (*FillOutStats) (EP_RAIL *rail, char *str); -+ void (*Debug) (EP_RAIL *rail); -+ -+} EP_RAIL_OPS; -+ -+#define ep_alloc_inputq(rail,qnum,slotSize,slotCount,callback,arg) \ -+ (rail)->Operations.AllocInputQ(rail,qnum,slotSize,slotCount,callback,arg) -+#define ep_free_inputq(rail,inputq) \ -+ (rail)->Operations.FreeInputQ(rail,inputq) -+#define ep_enable_inputq(rail,inputq) \ -+ (rail)->Operations.EnableInputQ(rail,inputq) -+#define ep_disable_inputq(rail,inputq) \ -+ (rail)->Operations.DisableInputQ(rail,inputq) -+#define ep_poll_inputq(rail,inputq,maxCount,handler,arg) \ -+ (rail)->Operations.PollInputQ(rail,inputq,maxCount,handler,arg) -+#define ep_alloc_outputq(rail,slotSize,slotCount)\ -+ (rail)->Operations.AllocOutputQ(rail,slotSize,slotCount) -+#define ep_free_outputq(rail,outputq)\ -+ (rail)->Operations.FreeOutputQ(rail,outputq) -+#define ep_outputq_msg(rail,outputq,slotNum)\ -+ (rail)->Operations.OutputQMsg(rail,outputq,slotNum) -+#define ep_outputq_state(rail,outputq,slotNum)\ -+ (rail)->Operations.OutputQState(rail,outputq,slotNum) -+#define ep_outputq_send(rail,outputq,slotNum,size,vp,qnum,retries)\ -+ (rail)->Operations.OutputQSend(rail,outputq,slotNum,size,vp,qnum,retries) -+ -+struct ep_rail -+{ -+ EP_SYS *System; /* "system" we've attached to */ -+ -+ unsigned char Number; /* Rail number */ -+ unsigned char State; /* Rail state */ -+ char Name[32]; /* Rail name */ -+ -+ struct list_head ManagerLink; /* linked on ManagedRails list */ -+ -+ ELAN_DEVINFO Devinfo; /* Device information for this rail */ -+ ELAN_POSITION Position; /* Position on switch device is connected to */ -+ -+ EP_RAIL_OPS Operations; /* device specific operations */ -+ EP_RAIL_STATS Stats; /* statistics */ -+ -+ EP_ALLOC ElanAllocator; /* per-rail elan memory allocator */ -+ EP_ALLOC MainAllocator; /* per-rail main memory allocator */ -+ -+ unsigned TlbFlushRequired; /* lazy TLB flushing */ -+ -+ int SwitchBroadcastLevel; /* current switch level ok for broadcast */ -+ unsigned long SwitchBroadcastLevelTick; -+ -+ int SwitchProbeLevel; /* result of last switch probe */ -+ EP_SWITCHSTATE SwitchState[ELAN_MAX_LEVELS]; -+ EP_SWITCHSTATE SwitchLast[ELAN_MAX_LEVELS]; -+ unsigned long SwitchProbeTick[ELAN_MAX_LEVELS]; -+ -+ /* Node disconnecting/connecting state */ -+ EP_CALLBACK *CallbackList[EP_CB_COUNT]; /* List of callbacks */ -+ kmutex_t CallbackLock; /* and lock for it. */ -+ unsigned CallbackStep; /* step through UpdateConnectionState. */ -+ -+ /* back pointer for cluster membership */ -+ void *ClusterRail; -+ -+ /* Per node state for message passing */ -+ EP_NODE_RAIL *Nodes; /* array of per-node state */ -+ statemap_t *NodeSet; /* per-rail statemap of connected nodes */ -+ statemap_t *NodeChangeMap; /* statemap of nodes to being connected/disconnected */ -+ statemap_t *NodeChangeTmp; /* and temporary copies */ -+ -+ struct list_head NetworkErrorList; /* list of nodes resolving network errors */ -+ struct list_head LocalPassivateList; /* list of nodes in state LOCAL_PASSIVATE */ -+ struct list_head RemotePassivateList; /* list of nodes waiting for remote network error flush */ -+ struct list_head PassivatedList; /* list of nodes performing message relocation */ -+ struct list_head DisconnectingList; /* list of nodes transitioning to disconnected */ -+ -+ EP_XID_CACHE XidCache; /* XID cache for node messages (single threaded access) */ -+ -+ /* Manager messages */ -+ EP_INPUTQ *ManagerInputQ; -+ EP_OUTPUTQ *ManagerOutputQ; -+ unsigned ManagerOutputQNextSlot; -+ spinlock_t ManagerOutputQLock; -+ -+ /* /proc entries */ -+ struct proc_dir_entry *ProcDir; -+ struct proc_dir_entry *SvcIndicatorDir; -+ int CallbackRegistered; -+}; -+ -+/* values for State */ -+#define EP_RAIL_STATE_UNINITIALISED 0 /* device uninitialised */ -+#define EP_RAIL_STATE_STARTED 1 /* device started but network position unknown */ -+#define EP_RAIL_STATE_RUNNING 2 /* device started and position known */ -+#define EP_RAIL_STATE_INCOMPATIBLE 3 /* device started, but position incompatible */ -+ -+typedef struct ep_rail_entry -+{ -+ struct list_head Link; -+ EP_RAIL *Rail; -+} EP_RAIL_ENTRY; -+ -+typedef struct ep_subsys -+{ -+ EP_SYS *Sys; -+ -+ struct list_head Link; /* Linked on sys->Subsystems */ -+ char *Name; /* Name to lookup */ -+ -+ void (*Destroy) (struct ep_subsys *subsys, EP_SYS *sys); -+ -+ int (*AddRail) (struct ep_subsys *subsys, EP_SYS *sys, EP_RAIL *rail); -+ void (*RemoveRail) (struct ep_subsys *subsys, EP_SYS *sys, EP_RAIL *rail); -+} EP_SUBSYS; -+ -+typedef struct ep_node -+{ -+ EP_RAILMASK ConnectedRails; -+} EP_NODE; -+ -+struct ep_sys -+{ -+ EP_RAIL *Rails[EP_MAX_RAILS]; /* array of all available devices */ -+ -+ kmutex_t StartStopLock; /* lock for starting stopping rails */ -+ -+ ELAN_POSITION Position; /* primary node position */ -+ -+ EP_NMH_TABLE MappingTable; /* Network mapping handle table */ -+ -+ EP_ALLOC Allocator; /* shared main memory allocator */ -+ -+ EP_DVMA_STATE DvmaState; /* dvma state */ -+ -+ kmutex_t SubsysLock; /* lock on the Subsytems list */ -+ struct list_head Subsystems; /* list of subsystems */ -+ -+ /* device manager state */ -+ struct list_head ManagedRails; /* list of managed devices */ -+ EP_KTHREAD ManagerThread; /* place for manager thread to sleep */ -+ -+ /* global node state */ -+ spinlock_t NodeLock; /* spinlock for node state (including per-device node state) */ -+ EP_NODE *Nodes; /* system wide node state */ -+ statemap_t *NodeSet; /* system wide nodeset */ -+ struct list_head NodesetCallbackList; /* list of "callbacks" */ -+ -+ /* Transaction Id */ -+ struct list_head XidCacheList; /* list of XID caches */ -+ uint32_t XidGeneration; /* XID generation number (distinguishes reboots) */ -+ uint32_t XidHandle; /* XID handles (distinguishes XID caches) */ -+ uint64_t XidNext; /* next XID to prime cache */ -+ spinlock_t XidLock; /* and it's spinlock */ -+ -+ /* Shutdown/Panic */ -+ unsigned int Shutdown; /* node has shutdown/panic'd */ -+}; -+ -+#if defined(DEBUG_ASSERT) -+extern int ep_assfail (EP_RAIL *rail, const char *string, const char *func, const char *file, const int line); -+extern int sdram_assert; -+extern int assfail_mode; -+ -+#define EP_ASSERT(rail, EX) do { \ -+ if (!(EX) && ep_assfail ((EP_RAIL *) (rail), #EX, __FUNCTION__, __FILE__, __LINE__)) { \ -+ BUG(); \ -+ } \ -+} while (0) -+#define EP_ASSFAIL(rail,EX) do { \ -+ if (ep_assfail ((EP_RAIL *) (rail), EX, __FUNCTION__, __FILE__, __LINE__)) { \ -+ BUG(); \ -+ } \ -+} while (0) -+#define SDRAM_ASSERT(EX) (sdram_assert ? (EX) : 1) -+#else -+#define EP_ASSERT(rail, EX) ((void) 0) -+#define EP_ASSFAIL(rail,str) ((void) 0) -+#define SDRAM_ASSERT(EX) (1) -+#endif -+ -+/* conf_osdep.c */ -+extern EP_SYS *ep_system(void); -+extern void ep_mod_dec_usecount (void); -+extern void ep_mod_inc_usecount (void); -+ -+/* procfs_osdep.c */ -+extern struct proc_dir_entry *ep_procfs_root; -+extern struct proc_dir_entry *ep_config_root; -+ -+/* kcomm.c */ -+extern int ep_sys_init (EP_SYS *sys); -+extern void ep_sys_fini (EP_SYS *sys); -+extern void ep_shutdown (EP_SYS *sys); -+extern int ep_init_rail (EP_SYS *sys, EP_RAIL *rail); -+extern void ep_destroy_rail (EP_RAIL *rail); -+extern int ep_start_rail (EP_RAIL *rail); -+extern void ep_stop_rail (EP_RAIL *rail); -+ -+extern void ep_connect_node (EP_RAIL *rail, int nodeId); -+extern int ep_disconnect_node (EP_RAIL *rail, int nodeId); -+ -+extern EP_XID ep_xid_cache_alloc (EP_SYS *sys, EP_XID_CACHE *cache); -+extern void ep_xid_cache_init (EP_SYS *sys, EP_XID_CACHE *cache); -+extern void ep_xid_cache_destroy (EP_SYS *sys, EP_XID_CACHE *cache); -+ -+extern int ep_send_message (EP_RAIL *rail, int nodeId, int type, EP_XID xid, EP_MANAGER_MSG_BODY *body); -+ -+extern void ep_panic_node (EP_SYS *sys, int nodeId, unsigned char *reason); -+ -+extern void ep_subsys_add (EP_SYS *sys, EP_SUBSYS *subsys); -+extern void ep_subsys_del (EP_SYS *sys, EP_SUBSYS *subsys); -+extern EP_SUBSYS *ep_subsys_find (EP_SYS *sys, char *name); -+ -+extern void DisplayNodes (EP_RAIL *rail); -+ -+extern void ep_fillout_stats(EP_RAIL *rail, char *str); -+ -+/* neterr.c */ -+extern void ep_queue_network_error (EP_RAIL *rail, int nodeId, int what, int channel, EP_NETERR_COOKIE cookie); -+ -+/* kcomm_elan3.c */ -+extern unsigned int ep3_create_rails (EP_SYS *sys, unsigned int disabled); -+ -+/* kcomm_elan4.c */ -+extern unsigned int ep4_create_rails (EP_SYS *sys, unsigned int disabled); -+ -+/* probenetwork.c */ -+extern int ProbeNetwork (EP_RAIL *rail, ELAN_POSITION *pos); -+extern void CheckPosition (EP_RAIL *rail); -+ -+extern uint16_t CheckSum (char *msg, int nob); -+ -+/* threadcode.c */ -+extern EP_ADDR ep_symbol (EP_CODE *code, char *name); -+extern int ep_loadcode (EP_RAIL *rail, EP_CODE *code); -+extern void ep_unloadcode (EP_RAIL *rail, EP_CODE *code); -+ -+/* Public interface */ -+/* debug.c */ -+extern int ep_sprintf_bitmap (char *str, unsigned nbytes, bitmap_t *bitmap, int base, int count, int off); -+extern void ep_display_bitmap (char *prefix, char *tag, bitmap_t *bitmap, unsigned base, unsigned nbits); -+ -+/* epcomms.c */ -+extern int ep_waitfor_nodeid (EP_SYS *sys); -+extern int ep_nodeid (EP_SYS *sys); -+extern int ep_numnodes (EP_SYS *sys); -+ -+/* railhints.c */ -+extern int ep_pickRail(EP_RAILMASK railmask); -+ -+/* support.c */ -+extern int ep_register_nodeset_callback (EP_SYS *sys, void (*routine)(void *, statemap_t *), void *arg); -+extern void ep_remove_nodeset_callback (EP_SYS *sys, void (*routine)(void *, statemap_t *), void *arg); -+extern void ep_call_nodeset_callbacks (EP_SYS *sys, statemap_t *map); -+ -+extern int ep_register_callback (EP_RAIL *rail, unsigned idx, void (*routine)(void *, statemap_t *), void *arg); -+extern void ep_remove_callback (EP_RAIL *rail, unsigned idx, void (*routine)(void *, statemap_t *), void *arg); -+extern void ep_call_callbacks (EP_RAIL *rail, unsigned idx, statemap_t *); -+extern unsigned int ep_backoff (EP_BACKOFF *backoff, int type); -+ -+#endif /* !__ELAN__ */ -+ -+#endif /* __ELAN_KCOMM_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/include/elan/kcomm_stats.h linux-2.6.9/include/elan/kcomm_stats.h ---- clean/include/elan/kcomm_stats.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan/kcomm_stats.h 2005-05-31 07:42:43.000000000 -0400 -@@ -0,0 +1,153 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __EP_EPSTATS_H -+#define __EP_EPSTATS_H -+ -+#ident "$Id: kcomm_stats.h,v 1.5.2.1 2005/05/31 11:42:43 mike Exp $" -+/* $Source: /cvs/master/quadrics/epmod/kcomm_stats.h,v $ */ -+ -+#define EP_BUCKET_SLOTS 8 -+ -+#define BucketStat(obj,stat,size) ((size) < 128 ? (obj)->Stats.stat[0]++ : \ -+ (size) < 512 ? (obj)->Stats.stat[1]++ : \ -+ (size) < 1024 ? (obj)->Stats.stat[2]++ : \ -+ (size) < 8192 ? (obj)->Stats.stat[3]++ : \ -+ (size) < 16384 ? (obj)->Stats.stat[4]++ : \ -+ (size) < 32768 ? (obj)->Stats.stat[5]++ : \ -+ (size) < 65536 ? (obj)->Stats.stat[6]++ : \ -+ (obj)->Stats.stat[7]++) -+#define IncrStat(obj,stat) ((obj)->Stats.stat++) -+ -+ -+#define EP3_NUM_DMA_FAIL 11 /* NOTE - the same as EP_NUM_RETRIES */ -+ -+#define ADD_STAT(STATS,STAT,VALUE) { unsigned long now = lbolt;\ -+ STATS.STAT.total += VALUE; \ -+ if ( (long)( now - STATS.STAT.last_time) > HZ ){ \ -+ STATS.STAT.last_per_sec = ( STATS.STAT.total - STATS.STAT.last_count)/ ( ( (long)( now - STATS.STAT.last_time) + (HZ/2)) / HZ);\ -+ STATS.STAT.last_time = now; \ -+ STATS.STAT.last_count = STATS.STAT.total; \ -+ }} \ -+ -+#define INC_STAT(STATS,STAT) ADD_STAT(STATS,STAT,1) -+ -+#define GET_STAT_PER_SEC(STATS, STAT) ( (( lbolt - STATS.STAT.last_time ) < (HZ * 5)) ? STATS.STAT.last_per_sec : 0 ) -+#define GET_STAT_TOTAL(STATS, STAT) ( STATS.STAT.total ) -+ -+struct ep_stats_count -+{ -+ unsigned long total; -+ unsigned long last_time; -+ unsigned long last_count; -+ unsigned long last_per_sec; -+}; -+ -+typedef struct ep_stats_count EP_STATS_COUNT; -+ -+typedef struct ep3_rail_stats -+{ -+ unsigned long IssueDmaFail[EP3_NUM_DMA_FAIL]; -+ -+ unsigned long DmaQueueLength[EP_BUCKET_SLOTS]; -+ unsigned long CprocDmaQueueOverflow; -+ unsigned long DprocDmaQueueOverflow; -+ unsigned long IprocDmaQueueOverflow; -+ unsigned long CprocEventQueueOverflow; -+ unsigned long DprocEventQueueOverflow; -+ unsigned long IprocEventQueueOverflow; -+ -+ unsigned long QueueingPacketTrap; -+ unsigned long DmaIdentifyTrap; -+ unsigned long ThreadIdentifyTrap; -+ unsigned long DmaPacketTrap; -+} EP3_RAIL_STATS; -+ -+typedef struct ep4_rail_stats -+{ -+ unsigned long somestatsgohere; -+} EP4_RAIL_STATS; -+ -+typedef struct ep_rail_stats -+{ -+ unsigned long SendMessageFailed; -+ unsigned long NeterrAtomicPacket; -+ unsigned long NeterrDmaPacket; -+ -+ EP_STATS_COUNT rx; -+ EP_STATS_COUNT rx_len; -+ -+ EP_STATS_COUNT tx; -+ EP_STATS_COUNT tx_len; -+ -+} EP_RAIL_STATS; -+ -+typedef struct ep_cm_rail_stats -+{ -+ /* cluster membership statistics */ -+ unsigned long HeartbeatsSent; -+ unsigned long HeartbeatsRcvd; -+ -+ unsigned long RetryHeartbeat; -+ unsigned long RejoinRequest; -+ unsigned long RejoinTooSlow; -+ unsigned long LaunchMessageFail; -+ unsigned long MapChangesSent; -+ -+ /* Heartbeat scheduling stats */ -+ unsigned long HeartbeatOverdue; -+} EP_CM_RAIL_STATS; -+ -+typedef struct ep_comms_rail_stats -+{ -+ /* kernel comms large message statistics */ -+ unsigned long TxEnveEvent; -+ unsigned long TxDataEvent; -+ unsigned long TxDoneEvent; -+ unsigned long RxDoneEvent; -+ unsigned long MulticastTxDone; -+ unsigned long QueueReceive; -+ -+ unsigned long TxEnveRetry; -+ unsigned long TxDataRetry; -+ unsigned long TxDoneRetry; -+ unsigned long RxThrdEvent; -+ unsigned long RxDataRetry; -+ unsigned long RxDoneRetry; -+ unsigned long StallThread; -+ unsigned long ThrdWaiting; -+ unsigned long CompleteEnvelope; -+ -+ unsigned long NoFreeTxds; -+ unsigned long NoFreeRxds; -+ -+ unsigned long LockRcvrTrapped; -+} EP_COMMS_RAIL_STATS; -+ -+typedef struct ep_comms_stats -+{ -+ unsigned long DataXmit[8]; -+ unsigned long McastXmit[8]; -+ unsigned long RPCXmit[8]; -+ unsigned long RPCPut[8]; -+ unsigned long RPCGet[8]; -+ unsigned long CompleteRPC[8]; -+ unsigned long RxData[8]; -+ unsigned long RxMcast[8]; -+ -+ unsigned long NoFreeTxds; -+ unsigned long NoFreeRxds; -+} EP_COMMS_STATS; -+ -+#endif /* __EP_EPSTATS_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/include/elan/kmap.h linux-2.6.9/include/elan/kmap.h ---- clean/include/elan/kmap.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan/kmap.h 2004-12-14 05:19:23.000000000 -0500 -@@ -0,0 +1,68 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __ELAN_KMAP_H -+#define __ELAN_KMAP_H -+ -+#ident "$Id: kmap.h,v 1.4 2004/12/14 10:19:23 mike Exp $" -+/* $Source: /cvs/master/quadrics/epmod/kmap.h,v $ */ -+ -+#include -+ -+extern void ep_perrail_kaddr_map (EP_RAIL *rail, EP_ADDR eaddr, virtaddr_t vaddr, unsigned long len, unsigned int perm, int ep_attr); -+extern void ep_perrail_sdram_map (EP_RAIL *rail, EP_ADDR eaddr, sdramaddr_t saddr, unsigned long len, unsigned int perm, int ep_attr); -+extern void ep_perrail_unmap (EP_RAIL *rail, EP_ADDR eaddr, unsigned long len); -+extern void ep_perrail_dvma_sync (EP_RAIL *rail); -+ -+typedef struct ep_dvma_nmh -+{ -+ EP_NMH dvma_nmh; -+ -+ struct list_head dvma_link; /* chained on ep_dvma_state */ -+ unsigned dvma_perm; /* permissions for region */ -+ -+ spinlock_t dvma_lock; -+ EP_RAILMASK dvma_railmask; /* bitmap of rails */ -+ EP_RAIL *dvma_rails[EP_MAX_RAILS]; /* assoicated rails */ -+ void *dvma_private[EP_MAX_RAILS]; /* pointers to rail private data */ -+ unsigned int dvma_attrs[1]; /* bitmap of which rails pages are loaded NOTE - max 32 rails */ -+} EP_DVMA_NMH; -+ -+/* values for dvma_perm */ -+#define EP_PERM_EXECUTE 0 -+#define EP_PERM_READ 1 -+#define EP_PERM_WRITE 2 -+#define EP_PERM_ALL 3 -+ -+typedef struct ep_dvma_state -+{ -+ kmutex_t dvma_lock; -+ struct list_head dvma_handles; -+ struct list_head dvma_rails; -+ EP_RMAP *dvma_rmap; -+} EP_DVMA_STATE; -+ -+extern void ep_dvma_init (EP_SYS *sys); -+extern void ep_dvma_fini (EP_SYS *sys); -+extern EP_NMH *ep_dvma_reserve (EP_SYS *sys, unsigned npages, unsigned perm); -+extern void ep_dvma_release (EP_SYS *sys, EP_NMH *nmh); -+extern void ep_dvma_load (EP_SYS *sys, void *map, caddr_t vaddr, unsigned len, -+ EP_NMH *nmh, unsigned index, EP_RAILMASK *hints, EP_NMD *subset); -+extern void ep_dvma_unload (EP_SYS *sys, EP_NMH *nmh, EP_NMD *nmd); -+ -+extern void ep_dvma_remove_rail (EP_SYS *sys, EP_RAIL *rail); -+extern int ep_dvma_add_rail (EP_SYS *sys, EP_RAIL *rail); -+ -+extern uint16_t rolling_check_sum (char *msg, int nob, uint16_t sum); -+ -+#endif /* __ELAN_KMAP_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/include/elan/kmsg.h linux-2.6.9/include/elan/kmsg.h ---- clean/include/elan/kmsg.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan/kmsg.h 2003-09-23 09:55:12.000000000 -0400 -@@ -0,0 +1,14 @@ -+/* -+ * Copyright (c) 2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __ELAN_KMSG_H -+#define __ELAN_KMSG_H -+ -+#ident "@(#)$Id: kmsg.h,v 1.1 2003/09/23 13:55:12 david Exp $" -+/* $Source: /cvs/master/quadrics/epmod/kmsg.h,v $ */ -+ -+#endif /* __ELAN_KMSG_H */ -diff -urN clean/include/elan/kthread.h linux-2.6.9/include/elan/kthread.h ---- clean/include/elan/kthread.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan/kthread.h 2004-05-06 10:24:08.000000000 -0400 -@@ -0,0 +1,53 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __ELAN3_KTHREAD_H -+#define __ELAN3_KTHREAD_H -+ -+#ident "@(#)$Id: kthread.h,v 1.4 2004/05/06 14:24:08 david Exp $ $Name: QSNETMODULES-5-11-3_20050907 $" -+/* $Source: /cvs/master/quadrics/epmod/kthread.h,v $*/ -+ -+typedef struct ep_kthread -+{ -+ kcondvar_t wait; /* place to sleep */ -+ spinlock_t lock; /* and lock */ -+ long next_run; /* tick when thread should next run */ -+ long running; /* tick when thread started to run */ -+ unsigned short should_stall; -+ unsigned char state; -+ unsigned int started:1; -+ unsigned int should_stop:1; -+ unsigned int stopped:1; -+} EP_KTHREAD; -+ -+#define KT_STATE_SLEEPING 0 -+#define KT_STATE_SCHEDULED 1 -+#define KT_STATE_RUNNING 2 -+#define KT_STATE_STALLED 3 -+ -+#define AFTER(a, b) ((((long)(a)) - ((long)(b))) > 0) -+#define BEFORE(a,b) ((((long)(a)) - ((long)(b))) < 0) -+ -+extern void ep_kthread_init (EP_KTHREAD *kt); -+extern void ep_kthread_destroy (EP_KTHREAD *kt); -+extern void ep_kthread_started (EP_KTHREAD *kt); -+extern void ep_kthread_stopped (EP_KTHREAD *kt); -+extern int ep_kthread_should_stall (EP_KTHREAD *kth); -+extern int ep_kthread_sleep (EP_KTHREAD *kth, long next_run); -+extern void ep_kthread_schedule (EP_KTHREAD *kt, long when); -+extern void ep_kthread_stall (EP_KTHREAD *kth); -+extern void ep_kthread_resume (EP_KTHREAD *kt); -+extern void ep_kthread_stop (EP_KTHREAD *kt); -+extern int ep_kthread_state (EP_KTHREAD *kt, long *time); -+#endif /* __ELAN3_KTHREAD_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "linux" -+ * End: -+ */ -diff -urN clean/include/elan/nmh.h linux-2.6.9/include/elan/nmh.h ---- clean/include/elan/nmh.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan/nmh.h 2004-01-06 05:29:55.000000000 -0500 -@@ -0,0 +1,95 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __ELAN3_NMH_H -+#define __ELAN3_NMH_H -+ -+#ident "@(#)$Id: nmh.h,v 1.7 2004/01/06 10:29:55 david Exp $" -+/* $Source: /cvs/master/quadrics/epmod/nmh.h,v $*/ -+ -+ -+/* Forward declarations */ -+typedef struct ep_nmd EP_NMD; -+typedef struct ep_nmh_ops EP_NMH_OPS; -+typedef struct ep_nmh EP_NMH; -+ -+/* Railmask held in 16 bit field (packs with nodeId into NMD */ -+typedef uint16_t EP_RAILMASK; -+ -+#define EP_RAIL2RAILMASK(rnum) (1 << (rnum)) -+#define EP_RAILMASK_ALL 0xffff -+ -+/* kernel comms elan network address */ -+typedef uint32_t EP_ADDR; -+ -+/* network mapping descriptor - this is returned to the user from a map operation, -+ * and is what is passed to all communication functions */ -+struct ep_nmd -+{ -+ EP_ADDR nmd_addr; /* base address */ -+ uint32_t nmd_len; /* size in bytes */ -+ uint32_t nmd_attr; /* nodeid << 16 | railmask */ -+}; -+ -+#define EP_NMD_ATTR(nodeid,railmask) (((nodeid) << 16) | (railmask)) -+#define EP_NMD_NODEID(nmd) ((nmd)->nmd_attr >> 16) -+#define EP_NMD_RAILMASK(nmd) ((nmd)->nmd_attr & EP_RAILMASK_ALL) -+ -+#if !defined(__ELAN__) -+ -+struct ep_nmh_ops -+{ -+ int (*op_map_rails) (EP_SYS *sys, EP_NMH *nmh, EP_NMD *nmd, EP_RAILMASK mask); /* add mappings to different rail(s) */ -+ -+#if ! defined(CONFIG_EP_NO_CHECK_SUM) -+ uint16_t (*op_calc_check_sum) (EP_SYS *sys, EP_NMH *nmh, EP_NMD *nmd, uint16_t check_sum); /* calculates check sum */ -+#endif -+}; -+ -+struct ep_nmh -+{ -+ EP_NMD nmh_nmd; /* public field */ -+ struct list_head nmh_link; /* linked on hash table */ -+ EP_NMH_OPS *nmh_ops; /* operations to perform on object */ -+}; -+ -+#define EP_NMH_NUMHASH (32 - 11 + 1) /* one hash table for each power of 2 above pagesize */ -+#define EP_NMH_HASHSIZE (64) /* max size of each hash table */ -+ -+typedef struct ep_nmh_table -+{ -+ struct list_head *tbl_hash[EP_NMH_NUMHASH]; -+ unsigned tbl_size[EP_NMH_NUMHASH]; -+} EP_NMH_TABLE; -+ -+extern int ep_nmh_init (EP_NMH_TABLE *tbl); -+extern void ep_nmh_fini (EP_NMH_TABLE *tbl); -+ -+extern void ep_nmh_insert (EP_NMH_TABLE *tbl, EP_NMH *nmd); -+extern void ep_nmh_remove (EP_NMH_TABLE *tbl, EP_NMH *nmd); -+extern EP_NMH *ep_nmh_find (EP_NMH_TABLE *tbl, EP_NMD *nmh); -+ -+#if ! defined(CONFIG_EP_NO_CHECK_SUM) -+extern uint32_t ep_nmd_calc_data_check_sum(EP_SYS *sys, EP_NMD *nmd, int nFrags); -+#endif -+ -+/* Public interface */ -+extern EP_RAILMASK ep_nmd2railmask (EP_NMD *frags, int nFrags); -+extern void ep_nmd_subset (EP_NMD *subset, EP_NMD *nmd, unsigned off, unsigned len); -+extern int ep_nmd_merge (EP_NMD *merged, EP_NMD *a, EP_NMD *b); -+extern int ep_nmd_map_rails (EP_SYS *sys, EP_NMD *nmd, unsigned railmask); -+ -+#endif /* __ELAN__ */ -+ -+#endif /* __ELAN3_NMH_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/include/elan/rmap.h linux-2.6.9/include/elan/rmap.h ---- clean/include/elan/rmap.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan/rmap.h 2004-05-19 06:24:40.000000000 -0400 -@@ -0,0 +1,49 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __ELAN_RMAP_H -+#define __ELAN_RMAP_H -+ -+#ident "$Id: rmap.h,v 1.8 2004/05/19 10:24:40 david Exp $" -+/* $Source: /cvs/master/quadrics/epmod/rmap.h,v $ */ -+ -+ -+typedef struct ep_rmap_entry -+{ -+ size_t m_size; -+ u_long m_addr; -+} EP_RMAP_ENTRY; -+ -+typedef struct ep_rmap -+{ -+ spinlock_t m_lock; -+ kcondvar_t m_wait; -+ u_int m_size; -+ u_int m_free; -+ u_int m_want; -+ char *m_name; -+ EP_RMAP_ENTRY m_map[1]; -+} EP_RMAP; -+ -+extern void ep_display_rmap (EP_RMAP *map); -+ -+extern void ep_rmapinit (EP_RMAP *rmap, char *name, u_int mapsize); -+extern unsigned long ep_rmalloc (EP_RMAP *rmap, size_t size, int cansleep); -+extern unsigned long ep_rmalloc_constrained (EP_RMAP *mp, size_t size, unsigned long alo, unsigned long ahi, unsigned long align, int cansleep); -+extern void ep_rmfree (EP_RMAP *rmap, size_t size, unsigned long addr); -+extern unsigned long ep_rmget (EP_RMAP *rmap, size_t size, unsigned long addr); -+extern EP_RMAP *ep_rmallocmap (size_t size, char *name, int cansleep); -+extern void ep_rmfreemap (EP_RMAP *map); -+ -+#endif /* __ELAN3_RMAP_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/include/elan/statemap.h linux-2.6.9/include/elan/statemap.h ---- clean/include/elan/statemap.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan/statemap.h 2003-10-07 09:22:38.000000000 -0400 -@@ -0,0 +1,52 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __ELAN_STATEMAP_H -+#define __ELAN_STATEMAP_H -+ -+#ident "$Id: statemap.h,v 1.8 2003/10/07 13:22:38 david Exp $" -+/* $Source: /cvs/master/quadrics/epmod/statemap.h,v $ */ -+ -+#include -+ -+/******************************** global state bitmap stuff **********************************/ -+typedef struct -+{ -+ unsigned int size; -+ unsigned int nob; -+ unsigned int changemap_nob; -+ unsigned int bitmap_nob; -+ bitmap_t *changemap0; -+ bitmap_t *changemap1; -+ bitmap_t *changemap2; -+ bitmap_t *bitmap; -+} statemap_t; -+ -+extern bitmap_t statemap_getseg (statemap_t *map, unsigned int offset); -+extern void statemap_setseg (statemap_t *map, unsigned int offset, bitmap_t seg); -+extern bitmap_t statemap_getbits (statemap_t *map, unsigned int offset, int nbits); -+extern void statemap_setbits (statemap_t *map, unsigned int offset, bitmap_t bits, int nbits); -+extern void statemap_zero (statemap_t *map); -+extern void statemap_setmap (statemap_t *dst, statemap_t *src); -+extern void statemap_ormap (statemap_t *dst, statemap_t *src); -+extern int statemap_findchange (statemap_t *map, bitmap_t *newseg, int clearchange); -+extern int statemap_changed (statemap_t *map); -+extern void statemap_reset (statemap_t *map); -+extern void statemap_copy (statemap_t *dst, statemap_t *src); -+extern void statemap_clearchanges (statemap_t *map); -+extern bitmap_t *statemap_tobitmap (statemap_t *map); -+extern statemap_t *statemap_create (int size); -+extern void statemap_destroy (statemap_t *map); -+ -+#endif /* __ELAN_STATEMAP_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/include/elan/stats.h linux-2.6.9/include/elan/stats.h ---- clean/include/elan/stats.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan/stats.h 2003-09-24 09:55:37.000000000 -0400 -@@ -0,0 +1,85 @@ -+/* -+ * Copyright (c) 2003 by Quadrics Limited. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: stats.h,v 1.5 2003/09/24 13:55:37 david Exp $" -+/* $Source: /cvs/master/quadrics/elanmod/modsrc/stats.h,v $*/ -+ -+#ifndef __ELAN_STATS_H -+#define __ELAN_STATS_H -+ -+ -+/* non-kernel headings */ -+#define ELAN_STATS_NAME_MAX_LEN ((uint)64) -+typedef unsigned int ELAN_STATS_IDX; -+ -+typedef struct elan_stats_map -+{ -+ char entry_name[ELAN_STATS_NAME_MAX_LEN]; -+ int index; -+} ELAN_STATS_MAP; -+ -+#if defined(__KERNEL__) -+ -+/* stats callbacks */ -+#define ELAN_STATS_OPS_VERSION ((u_int)1) -+typedef struct elan_stats_ops -+{ -+ u_int ops_version; -+ -+ int (*elan_stats_get_name) (void * arg, uint index, caddr_t name); -+ int (*elan_stats_get_block) (void * arg, uint entries, ulong *values); -+ int (*elan_stats_clear_block) (void * arg); -+ -+} ELAN_STATS_OPS; -+ -+typedef struct elan_stats_struct -+{ -+ struct list_head node; -+ -+ ELAN_STATS_IDX statidx; -+ char block_name[ELAN_STATS_NAME_MAX_LEN]; -+ uint num_entries; -+ ELAN_STATS_OPS *ops; -+ void *arg; -+ -+} ELAN_STATS_STRUCT; -+ -+/* stats.c */ -+extern int elan_stats_register (ELAN_STATS_IDX *statidx, -+ char *block_name, -+ uint num_entries, -+ ELAN_STATS_OPS *ops, -+ void *arg); -+ -+extern int elan_stats_deregister (ELAN_STATS_IDX statidx); -+extern ELAN_STATS_STRUCT *elan_stats_find (ELAN_STATS_IDX statidx); -+extern ELAN_STATS_STRUCT *elan_stats_find_by_name(caddr_t block_name); -+extern ELAN_STATS_STRUCT *elan_stats_find_next (ELAN_STATS_IDX statidx); -+ -+ -+/* elan_stats.c */ -+extern int elan_stats_get_next_index (ELAN_STATS_IDX statidx, ELAN_STATS_IDX *next_statidx); -+ -+extern int elan_stats_find_index (caddr_t block_name, ELAN_STATS_IDX *statidx, uint *num_entries); -+ -+extern int elan_stats_get_block_info (ELAN_STATS_IDX statidx, caddr_t block_name, uint *num_entries); -+ -+extern int elan_stats_get_index_name (ELAN_STATS_IDX statidx, uint index, caddr_t name); -+ -+extern int elan_stats_get_block (ELAN_STATS_IDX statidx, uint entries, ulong *values); -+ -+extern int elan_stats_clear_block (ELAN_STATS_IDX statidx); -+ -+#endif /* __KERNEL__ */ -+ -+#endif /* __ELAN_STATS_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "linux" -+ * End: -+ */ -diff -urN clean/include/elan3/compat.h linux-2.6.9/include/elan3/compat.h ---- clean/include/elan3/compat.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan3/compat.h 2004-06-09 05:07:03.000000000 -0400 -@@ -0,0 +1,177 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: compat.h,v 1.4 2004/06/09 09:07:03 mike Exp $ $Name: QSNETMODULES-5-11-3_20050907 $" -+/* $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/compat.h,v $*/ -+ -+#ifndef __ELAN3_COMPAT_H -+#define __ELAN3_COMPAT_H -+ -+/* compatibility header to allow Eagle branch QSNETLIBS -+ * to compile against head kernel */ -+ -+#define ELAN_EAGLE_COMPAT -+ -+/* vmseg.h */ -+#define ELAN_FLAGSTATS ELAN3_FLAGSTATS -+ -+/* uregs.h */ -+#define ELAN_STATS_NAME ELAN3_STATS_NAME -+#define elan3_stats_names elan_stats_names -+ -+/* spinlock.h */ -+#define ELAN_SPINLOCK ELAN3_SPINLOCK -+#define ELAN_SPINLOCK_MAIN ELAN3_SPINLOCK_MAIN -+#define ELAN_SPINLOCK_ELAN ELAN3_SPINLOCK_ELAN -+#define ELAN_ME_SPINENTER ELAN3_ME_SPINENTER -+#define ELAN_ME_FORCEENTER ELAN3_ME_FORCEENTER -+#define ELAN_ME_SPINEXIT ELAN3_ME_SPINEXIT -+#define ELAN_SPINENTER ELAN3_SPINENTER -+#define ELAN_SPINEXIT ELAN3_SPINEXIT -+#define elan3_me_spinblock elan_me_spinblock -+#define elan3_spinenter elan_spinenter -+ -+/* elanio.h */ -+#define ELANIO_CONTROL_PATHNAME ELAN3IO_CONTROL_PATHNAME -+#define ELANIO_USER_PATHNAME ELAN3IO_USER_PATHNAME -+#define ELANIO_SDRAM_PATHNAME ELAN3IO_SDRAM_PATHNAME -+#define ELANIO_MAX_PATHNAMELEN ELAN3IO_MAX_PATHNAMELEN -+ -+#define ELANIO_SET_BOUNDARY_SCAN ELAN3IO_SET_BOUNDARY_SCAN -+#define ELANIO_CLEAR_BOUNDARY_SCAN ELAN3IO_CLEAR_BOUNDARY_SCAN -+#define ELANIO_READ_LINKVAL ELAN3IO_READ_LINKVAL -+#define ELANIO_WRITE_LINKVAL ELAN3IO_WRITE_LINKVAL -+#define ELANIO_SET_DEBUG_STRUCT ELAN3IO_SET_DEBUG_STRUCT -+#define ELANIO_SET_DEBUG ELAN3IO_SET_DEBUG -+#define ELANIO_DEBUG_BUFFER_STRUCT ELAN3IO_DEBUG_BUFFER_STRUCT -+#define ELANIO_DEBUG_BUFFER ELAN3IO_DEBUG_BUFFER -+#define ELANIO_NETERR_SERVER_STRUCT ELAN3IO_NETERR_SERVER_STRUCT -+#define ELANIO_NETERR_SERVER ELAN3IO_NETERR_SERVER -+#define ELANIO_NETERR_FIXUP ELAN3IO_NETERR_FIXUP -+ -+#define ELANIO_FREE ELAN3IO_FREE -+#define ELANIO_ATTACH ELAN3IO_ATTACH -+#define ELANIO_DETACH ELAN3IO_DETACH -+#define ELANIO_ADDVP_STRUCT ELAN3IO_ADDVP_STRUCT -+#define ELANIO_ADDVP ELAN3IO_ADDVP -+#define ELANIO_REMOVEVP ELAN3IO_REMOVEVP -+#define ELANIO_BCASTVP_STRUCT ELAN3IO_BCASTVP_STRUCT -+#define ELANIO_BCASTVP ELAN3IO_BCASTVP -+#define ELANIO_LOAD_ROUTE_STRUCT ELAN3IO_LOAD_ROUTE_STRUCT -+#define ELANIO_LOAD_ROUTE ELAN3IO_LOAD_ROUTE -+#define ELANIO_PROCESS ELAN3IO_PROCESS -+#define ELANIO_SETPERM_STRUCT ELAN3IO_SETPERM_STRUCT -+#define ELANIO_SETPERM ELAN3IO_SETPERM -+#define ELANIO_CLEARPERM_STRUCT ELAN3IO_CLEARPERM_STRUCT -+#define ELANIO_CLEARPERM ELAN3IO_CLEARPERM -+#define ELANIO_CHANGEPERM_STRUCT ELAN3IO_CHANGEPERM_STRUCT -+#define ELANIO_CHANGEPERM ELAN3IO_CHANGEPERM -+#define ELANIO_HELPER_THREAD ELAN3IO_HELPER_THREAD -+#define ELANIO_WAITCOMMAND ELAN3IO_WAITCOMMAND -+#define ELANIO_BLOCK_INPUTTER ELAN3IO_BLOCK_INPUTTER -+#define ELANIO_SET_FLAGS ELAN3IO_SET_FLAGS -+#define ELANIO_WAITEVENT ELAN3IO_WAITEVENT -+#define ELANIO_ALLOC_EVENTCOOKIE ELAN3IO_ALLOC_EVENTCOOKIE -+#define ELANIO_FREE_EVENTCOOKIE ELAN3IO_FREE_EVENTCOOKIE -+#define ELANIO_ARM_EVENTCOOKIE ELAN3IO_ARM_EVENTCOOKIE -+#define ELANIO_WAIT_EVENTCOOKIE ELAN3IO_WAIT_EVENTCOOKIE -+#define ELANIO_SWAPSPACE ELAN3IO_SWAPSPACE -+#define ELANIO_EXCEPTION_SPACE ELAN3IO_EXCEPTION_SPACE -+#define ELANIO_GET_EXCEPTION ELAN3IO_GET_EXCEPTION -+#define ELANIO_UNLOAD_STRUCT ELAN3IO_UNLOAD_STRUCT -+#define ELANIO_UNLOAD ELAN3IO_UNLOAD -+#define ELANIO_GET_ROUTE_STRUCT ELAN3IO_GET_ROUTE_STRUCT -+#define ELANIO_GET_ROUTE ELAN3IO_GET_ROUTE -+#define ELANIO_RESET_ROUTE_STRUCT ELAN3IO_RESET_ROUTE_STRUCT -+#define ELANIO_RESET_ROUTE ELAN3IO_RESET_ROUTE -+#define ELANIO_CHECK_ROUTE_STRUCT ELAN3IO_CHECK_ROUTE_STRUCT -+#define ELANIO_CHECK_ROUTE ELAN3IO_CHECK_ROUTE -+#define ELANIO_VP2NODEID_STRUCT ELAN3IO_VP2NODEID_STRUCT -+#define ELANIO_VP2NODEID ELAN3IO_VP2NODEID -+#define ELANIO_SET_SIGNAL ELAN3IO_SET_SIGNAL -+#define ELANIO_PROCESS_2_LOCATION_STRUCT ELAN3IO_PROCESS_2_LOCATION_STRUCT -+#define ELANIO_PROCESS_2_LOCATION ELAN3IO_PROCESS_2_LOCATION -+#define ELANIO_GET_DEVINFO_STRUCT ELAN3IO_GET_DEVINFO_STRUCT -+#define ELANIO_GET_DEVINFO ELAN3IO_GET_DEVINFO -+#define ELANIO_GET_POSITION_STRUCT ELAN3IO_GET_POSITION_STRUCT -+#define ELANIO_GET_POSITION ELAN3IO_GET_POSITION -+#define ELANIO_STATS_STRUCT ELAN3IO_STATS_STRUCT -+#define ELANIO_STATS ELAN3IO_STATS -+# define ELAN_SYS_STATS_DEVICE ELAN3_SYS_STATS_DEVICE -+# define ELAN_SYS_STATS_ELAN3MMU ELAN3_SYS_STATS_MMU -+ -+#define ELANIO_OFF_FLAG_PAGE ELAN3IO_OFF_FLAG_PAGE -+#define ELANIO_OFF_UREG_PAGE ELAN3IO_OFF_UREG_PAGE -+#define ELANIO_OFF_COMMAND_PAGE ELAN3IO_OFF_COMMAND_PAGE -+ -+ -+/* elanvp.h */ -+#define ELAN_ROUTE_SUCCESS ELAN3_ROUTE_SUCCESS -+#define ELAN_ROUTE_SYSCALL_FAILED ELAN3_ROUTE_SYSCALL_FAILED -+#define ELAN_ROUTE_INVALID ELAN3_ROUTE_INVALID -+#define ELAN_ROUTE_TOO_LONG ELAN3_ROUTE_TOO_LONG -+#define ELAN_ROUTE_LOAD_FAILED ELAN3_ROUTE_LOAD_FAILED -+#define ELAN_ROUTE_PROC_RANGE ELAN3_ROUTE_PROC_RANGE -+#define ELAN_ROUTE_INVALID_LEVEL ELAN3_ROUTE_INVALID_LEVEL -+#define ELAN_ROUTE_OCILATES ELAN3_ROUTE_OCILATES -+#define ELAN_ROUTE_WRONG_DEST ELAN3_ROUTE_WRONG_DEST -+#define ELAN_ROUTE_TURN_LEVEL ELAN3_ROUTE_TURN_LEVEL -+#define ELAN_ROUTE_NODEID_UNKNOWN ELAN3_ROUTE_NODEID_UNKNOWN -+ -+/* elandev.h */ -+#define ELAN_STATS ELAN3_STATS -+#define ELAN_STATS_VERSION ELAN3_STATS_VERSION -+ -+/* perm.h */ -+#define ELAN_PERM_NOREMOTE ELAN3_PERM_NOREMOTE -+#define ELAN_PERM_LOCAL_READ ELAN3_PERM_LOCAL_READ -+#define ELAN_PERM_REMOTEALL ELAN3_PERM_REMOTEALL -+ -+/* threadsyscall.h */ -+#define ELAN_ABORT_TRAPNUM ELAN3_ABORT_TRAPNUM -+#define ELAN_ELANCALL_TRAPNUM ELAN3_ELANCALL_TRAPNUM -+#define ELAN_SYSCALL_TRAPNUM ELAN3_SYSCALL_TRAPNUM -+#define ELAN_SYS_close ELAN3_SYS_close -+#define ELAN_SYS_getpid ELAN3_SYS_getpid -+#define ELAN_SYS_ioctl ELAN3_SYS_ioctl -+#define ELAN_SYS_kill ELAN3_SYS_kill -+#define ELAN_SYS_lseek ELAN3_SYS_lseek -+#define ELAN_SYS_mmap ELAN3_SYS_mmap -+#define ELAN_SYS_munmap ELAN3_SYS_munmap -+#define ELAN_SYS_open ELAN3_SYS_open -+#define ELAN_SYS_poll ELAN3_SYS_poll -+#define ELAN_SYS_read ELAN3_SYS_read -+#define ELAN_SYS_write ELAN3_SYS_write -+#define ELAN_T_SYSCALL_CODE ELAN3_T_SYSCALL_CODE -+#define ELAN_T_SYSCALL_ERRNO ELAN3_T_SYSCALL_ERRNO -+ -+/* elansyscall.h */ -+#define ELAN_SYS_FLAG_DMA_BADVP ELAN3_SYS_FLAG_DMA_BADVP -+#define ELAN_SYS_FLAG_THREAD_BADVP ELAN3_SYS_FLAG_THREAD_BADVP -+#define ELAN_SYS_FLAG_DMAFAIL ELAN3_SYS_FLAG_DMAFAIL -+#define ELAN_SYS_FLAG_NETERR ELAN3_SYS_FLAG_NETERR -+ -+/* intrinsics.h */ -+#define elan_copy64w elan3_copy64w -+#define elan_read64dw elan3_read64dw -+#define elan_write64dw elan3_write64dw -+ -+#ifndef ELAN_POLL_EVENT -+#define ELAN_POLL_EVENT ELAN3_POLL_EVENT -+#endif -+#ifndef ELAN_WAIT_EVENT -+#define ELAN_WAIT_EVENT ELAN3_WAIT_EVENT -+#endif -+ -+#endif /* __ELAN3_COMPAT_H */ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -+ -diff -urN clean/include/elan3/dma.h linux-2.6.9/include/elan3/dma.h ---- clean/include/elan3/dma.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan3/dma.h 2002-08-21 08:43:27.000000000 -0400 -@@ -0,0 +1,213 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __ELAN3_DMA_H -+#define __ELAN3_DMA_H -+ -+#ident "$Id: dma.h,v 1.38 2002/08/21 12:43:27 david Exp $" -+/* $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/dma.h,v $ */ -+ -+#include -+#include -+ -+/* Alignment for a DMA descriptor */ -+#define E3_DMA_ALIGN (32) -+ -+/* The maximum size a DMA can be (i.e. < 2GB) */ -+#define E3_MAX_DMA_SIZE 0x7fffffff -+ -+/* This macro returns TRUE if a fixup for the ELAN_REVB_BUG_2 problem is required -+ * i.e. if the DMA begins in the last 64-bytes of a page and its size causes it to enter the -+ * next page, hence causing the Elan to issue 2 (64-byte) block reads to different pages. -+ * See GNAT hw-elan3/3263 -+ */ -+#define E3_DMA_REVB_BUG_2(SIZE, ADDR, PAGESIZE) \ -+ ( (((int) (ADDR) & (PAGESIZE-64)) == (PAGESIZE-64)) && (-(((int) (ADDR) | ~(PAGESIZE-1))) < (SIZE)) ) -+ -+/* There is a point where a dma runs quicker from main memory than -+ * when running from sdram and having to copy all the data down -+ * first. -+ */ -+#define E3_DMA_SDRAM_CUTOFF 128 -+ -+typedef union _e3_DmaType -+{ -+ E3_uint32 type; -+ struct -+ { -+#if defined(__LITTLE_ENDIAN__) -+ E3_uint32 dataType:2; /* Bits 0 to 1 */ -+ E3_uint32 direction:3; /* Bit 4 to 2 */ -+ E3_uint32 opCode:4; /* Bits 5 to 8 */ -+ E3_uint32 failCount:6; /* Bits 9 to 14 */ -+ E3_uint32 isRemote:1; /* Bit 15 */ -+ E3_uint32 Context:13; /* Bits 16 to 28 */ -+ E3_uint32 :3; /* Bits 29 to 31 */ -+#else -+ E3_uint32 :3; /* Bits 29 to 31 */ -+ E3_uint32 Context:13; /* Bits 16 to 28 */ -+ E3_uint32 isRemote:1; /* Bit 15 */ -+ E3_uint32 failCount:6; /* Bits 9 to 14 */ -+ E3_uint32 opCode:4; /* Bits 5 to 8 */ -+ E3_uint32 direction:3; /* Bit 4 to 2 */ -+ E3_uint32 dataType:2; /* Bits 0 to 1 */ -+#endif -+ } s; -+} E3_DmaType; -+ -+#define E3_DMA_CONTEXT_MASK (ALL_CONTEXT_BITS << 16) -+ -+#define E3_DMA_CONTEXT(type) (((type) >> 16) & ALL_CONTEXT_BITS) -+#define E3_DMA_ISREMOTE(type) (((type) >> 15) & 1) -+#define E3_DMA_FAILCOUNT(type) (((type) >> 9) & 0x3F) -+#define E3_DMA_OPCODE(type) (((type) >> 5) & 0xF) -+#define E3_DMA_DIRECTION(type) (((type) >> 2) & 0x7) -+#define EP_DMA_DATATYPE(type) (((type) >> 0) & 0x3) -+ -+#define E3_DMA_TYPE(dataType, direction, opCode, failCount) \ -+ (((dataType) & 0x3) | (((direction) & 7) << 2) | (((opCode) & 0xF) << 5) | (((failCount) & 0x3F) << 9)) -+ -+ -+typedef union _e3_CookieVProc -+{ -+ E3_uint32 cookie_vproc; -+ struct -+ { -+#if defined(__LITTLE_ENDIAN__) -+ E3_uint32 vproc:16; /* Bit 15 to 0 */ -+ E3_uint32 cookie:16; /* Bits 31 to 16 */ -+#else -+ E3_uint32 cookie:16; /* Bits 31 to 16 */ -+ E3_uint32 vproc:16; /* Bit 15 to 0 */ -+#endif -+ } s; -+} E3_CookieVProc; -+ -+#define E3_DMA_COOKIE_PROC(Cookie, VProc) (((VProc) & 0xffff) | (((Cookie) << 16))) -+ -+#define DMA_COOKIE_MASK (0xffff0000) -+#define DMA_PROCESS_MASK (0x0000ffff) -+ -+/* We use the bottom bit of the cookie to -+ * distinguish main/thread generated cookies -+ */ -+#define DMA_COOKIE_THREAD (0x01 << 16) -+ -+/* We use the next bit of the cookie to -+ * distinguish locally/remotely generated cookies -+ */ -+#define DMA_COOKIE_REMOTE (0x02 << 16) -+ -+/* Assign and increment cookie (NB: we have reserved the bottom two bits) -+ */ -+#define DMA_COOKIE(COOKIE, VPROC) ((((COOKIE) += (0x4 << 16)) & DMA_COOKIE_MASK) | VPROC) -+#define DMA_REMOTE_COOKIE(COOKIE, VPROC) ((((COOKIE) += (0x4 << 16)) & DMA_COOKIE_MASK) | DMA_COOKIE_REMOTE | VPROC) -+ -+#define DMA_COOKIE_REFRESH(COOKIEVP, COOKIE) \ -+do { \ -+ COOKIEVP &= ~DMA_COOKIE_MASK; /* Clear cookie */ \ -+ COOKIEVP |= DMA_COOKIE(COOKIE,0); /* Assign new cookie */ \ -+} while (0) -+ -+typedef struct e3_dma -+{ -+ E3_DmaType dma_u; -+ E3_uint32 dma_size; -+ E3_Addr dma_source; -+ E3_Addr dma_dest; -+ E3_Addr dma_destEvent; -+ E3_CookieVProc dma_destCookieProc; -+ E3_Addr dma_srcEvent; -+ E3_CookieVProc dma_srcCookieProc; -+} E3_DMA; -+ -+ -+/* -+ * Word-swapped version of DMA descriptor. -+ * This is used by the UltraSPARC code to format the descriptor -+ * in main memory before block-copying it down to Elan SDRAM. -+ * In the process it does a dword (64-bit) conversion and so swaps -+ * the word order on a double-word pair basis -+ */ -+typedef struct e3_dma_swapped -+{ -+ E3_uint32 dma_size; -+ E3_DmaType dma_u; -+ E3_Addr dma_dest; -+ E3_Addr dma_source; -+ E3_CookieVProc dma_destCookieProc; -+ E3_Addr dma_destEvent; -+ E3_CookieVProc dma_srcCookieProc; -+ E3_Addr dma_srcEvent; -+} E3_DMA_SWAPPED; -+ -+/* Define a Main memory structure for DMA desc based on Endianess of machine */ -+#if defined(__LITTLE_ENDIAN__) -+#define E3_DMA_MAIN E3_DMA -+#else -+#define E3_DMA_MAIN E3_DMA_SWAPPED; -+#endif -+ -+#define dma_type dma_u.type -+#define dma_failCount dma_u.s.failCount -+#define dma_isRemote dma_u.s.isRemote -+#define dma_opCode dma_u.s.opCode -+#define dma_direction dma_u.s.direction -+#define dma_dataType dma_u.s.dataType -+#define dma_queueContext dma_u.s.Context -+ -+#define dma_destCookieVProc dma_destCookieProc.cookie_vproc -+#define dma_destVProc dma_destCookieProc.s.vproc -+#define dma_destCookie dma_destCookieProc.s.cookie -+#define dma_srcCookieVProc dma_srcCookieProc.cookie_vproc -+#define dma_srcVProc dma_srcCookieProc.s.vproc -+#define dma_srcCookie dma_srcCookieProc.s.cookie -+ -+/* -+ * Values for dma_opCode -+ */ -+#define DMA_NORMAL 0 -+#define DMA_QUEUED 1 -+#define DMA_NORMAL_BROADCAST 2 -+#define DMA_QUEUED_BROADCAST 3 -+#define DMA_NORMAL_UNSAFE 4 -+#define DMA_QUEUED_UNSAFE 5 -+#define DMA_NORMAL_BROADCAST_UNSAFE 6 -+#define DMA_QUEUED_BROADCAST_UNSAFE 7 -+ -+/* -+ * Values for dma_direction -+ */ -+#define DMA_WRITE 0 -+#define DMA_READ_REQUEUE 1 -+#define DMA_READ 3 -+#define DMA_READ_BROADCAST 7 -+ -+/* -+ * Values for dma_dataType -+ */ -+#define DMA_BYTE 0 -+#define DMA_HALFWORD 1 -+#define DMA_WORD 2 -+#define DMA_DOUBLE 3 -+ -+/* OUT OF DATE ? -+ #define DMA_OPCODE_SHIFT 3 -+ #define DMA_FAILCOUNT_SHIFT 9 -+*/ -+#define DMA_TYPE_ISREMOTE (1 << 15) -+#define DMA_TYPE_READ (3 << 2) -+#define DMA_TYPE_READ_REQUEUE (1 << 2) -+#define DMA_TYPE_DIRECTION_MASK (3 << 2) -+ -+#endif /* __ELAN3_DMA_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/include/elan3/e3types.h linux-2.6.9/include/elan3/e3types.h ---- clean/include/elan3/e3types.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan3/e3types.h 2002-08-09 07:23:33.000000000 -0400 -@@ -0,0 +1,82 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __ELAN3_E3TYPES_H -+#define __ELAN3_E3TYPES_H -+ -+#ident "$Id: e3types.h,v 1.18 2002/08/09 11:23:33 addy Exp $" -+/* $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/e3types.h,v $ */ -+ -+#include -+/* -+ * "flip" values for correctly indexing into -+ * block data which was copied from the Elan -+ * using 64 bit accesses. -+ */ -+#if defined(__LITTLE_ENDIAN__) -+# define ByteEndianFlip 0 -+# define ShortEndianFlip 0 -+# define WordEndianFlip 0 -+#else -+# define ByteEndianFlip 7 -+# define ShortEndianFlip 3 -+# define WordEndianFlip 1 -+#endif -+ -+ -+#ifndef _ASM -+ -+typedef signed int E3_int; -+typedef unsigned int E3_uint; -+ -+typedef signed char E3_int8; -+typedef unsigned char E3_uint8; -+ -+typedef signed short E3_int16; -+typedef unsigned short E3_uint16; -+ -+typedef signed int E3_int32; -+typedef unsigned int E3_uint32; -+ -+#ifdef __ELAN3__ -+typedef signed long long E3_int64; -+typedef unsigned long long E3_uint64; -+#ifdef _MAIN_LP64 -+/* NOTE: If the Main is 64-bit we declare the Elan thread's -+ * E3_uintptr to be 64-bits too -+ */ -+typedef unsigned long long E3_uintptr; -+#else -+typedef unsigned long E3_uintptr; -+#endif -+ -+#else -+ -+#ifdef _LP64 -+typedef signed long E3_int64; -+typedef unsigned long E3_uint64; -+typedef unsigned long E3_uintptr; -+#else /* _ILP32 */ -+typedef signed long long E3_int64; -+typedef unsigned long long E3_uint64; -+typedef unsigned long E3_uintptr; -+#endif -+ -+#endif /* __ELAN3__ */ -+ -+/* 32-bit Elan3 address */ -+typedef E3_uint32 E3_Addr; -+ -+#endif /* _ASM */ -+ -+#endif /* __ELAN3_E3TYPES_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/include/elan3/elan3mmu.h linux-2.6.9/include/elan3/elan3mmu.h ---- clean/include/elan3/elan3mmu.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan3/elan3mmu.h 2004-12-14 05:19:32.000000000 -0500 -@@ -0,0 +1,346 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __ELAN3_ELAN3MMU_H -+#define __ELAN3_ELAN3MMU_H -+ -+#ident "$Id: elan3mmu.h,v 1.41 2004/12/14 10:19:32 mike Exp $" -+/* $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/elan3mmu.h,v $*/ -+ -+ -+#include -+ -+#ifdef __cplusplus -+extern "C" { -+#endif -+ -+typedef struct elan3mmu_global_stats -+{ -+ int version; -+ int pteload; -+ int pteunload; -+ int ptereload; -+ -+ int streamable_alloc; -+ int streamable_free; -+ int streamable_alloc_failed; -+ -+ int num_ptbl_level[4]; /* number of level N ptbls */ -+ -+ int create_ptbl_failed; /* count of ptbl creation failure */ -+ -+ int lX_alloc_l3; /* count of l3 ptbls used as lX */ -+ int lX_freed_l3; /* count of lX ptbls freed as l3 */ -+ -+ int l2_alloc_l3; /* count of l3 ptbls used as l2 */ -+ int l2_freed_l3; /* count of l2 ptbls freed as l3 */ -+ -+ int stolen_ptbls; /* count of l3 ptbls stolen */ -+} ELAN3MMU_GLOBAL_STATS; -+ -+#define ELAN3MMU_STATS_VERSION 1 -+ -+#define ELAN3MMU_STAT(what) (elan3mmu_global_stats.what++) -+#define ELAN3MMU_SET_STAT(what,count) (elan3mmu_global_stats.what = count) -+ -+#ifdef __KERNEL__ -+ -+#define ELAN3_PT_SHIFT (ELAN3_L2_SHIFT + 2) -+ -+typedef struct elan3_ptbl -+{ -+ struct elan3_ptbl *ptbl_parent; /* Parent page table, or next on freelist */ -+ struct elan3mmu *ptbl_elan3mmu; /* elan3mmu we're allocated for */ -+ E3_Addr ptbl_base; /* Virtual address we're mapping */ -+ u_char ptbl_index; /* Index in ptbl group */ -+ u_char ptbl_valid; /* Number of valid entries */ -+ u_char ptbl_flags; /* Flags, defined below. */ -+ u_char ptbl_spare; -+} ELAN3_PTBL; -+ -+#define ptbl_next ptbl_parent /* Parent pointer is next pointer when on free list */ -+ -+#define PTBL_LEVEL_X 0x00 -+#define PTBL_LEVEL_1 0x01 -+#define PTBL_LEVEL_2 0x02 -+#define PTBL_LEVEL_3 0x03 -+#define PTBL_LEVEL_MASK 0x03 -+#define PTBL_LOCKED 0x04 /* Page table is locked, protects all fields */ -+#define PTBL_KEEP 0x08 /* This ptbl is not to be stolen */ -+#define PTBL_ALLOCED 0x10 /* This ptbl has been allocated, and is not free */ -+#define PTBL_GROUPED 0x20 /* This ptbl is a member of a group of ptbls */ -+#define PTBL_KERNEL 0x80 /* This ptbl is allocated for the kernel */ -+ -+#define PTBL_LEVEL(flags) ((flags) & PTBL_LEVEL_MASK) -+#define PTBL_IS_LOCKED(flags) (((flags) & (PTBL_LOCKED|PTBL_ALLOCED)) == (PTBL_LOCKED|PTBL_ALLOCED)) -+ -+#if ELAN3_PAGE_SHIFT == 13 -+# define PTBL_GROUP_SIZE 8192 /* page table groups are 8k bytes */ -+# define PTBLS_PER_GROUP_L1 8 /* Number of level 1 tables in a group */ -+# define PTBLS_PER_GROUP_L2 32 /* ... level 2 */ -+# define PTBLS_PER_GROUP_L3 32 /* ... level 3 */ -+# define PTBLS_PER_GROUP_LX 32 /* ... level X */ -+# define PTBLS_PER_GROUP_MAX 32 /* max of l1,l2,l3,lX */ -+#else -+# define PTBL_GROUP_SIZE 4096 /* page table groups are 4k bytes */ -+# define PTBLS_PER_GROUP_L1 4 /* Number of level 1 tables in a group */ -+# define PTBLS_PER_GROUP_L2 16 /* ... level 2 */ -+# define PTBLS_PER_GROUP_L3 8 /* ... level 3 */ -+# define PTBLS_PER_GROUP_LX 16 /* ... level X */ -+# define PTBLS_PER_GROUP_MAX 16 /* max of l1,l2,l3,lX */ -+#endif -+ -+#define HMES_PER_GROUP (PTBLS_PER_GROUP_L3*ELAN3_L3_ENTRIES) -+ -+#if ELAN3_PAGE_SHIFT == 13 -+# define PTBLS_PER_PTBL_L1 4 /* 256 PTPs */ -+# define PTBLS_PER_PTBL_L2 1 /* 64 PTPs */ -+# define PTBLS_PER_PTBL_L3 1 /* 32 PTEs */ -+#else -+# define PTBLS_PER_PTBL_L1 4 /* 256 PTPs */ -+# define PTBLS_PER_PTBL_L2 1 /* 64 PTPs */ -+# define PTBLS_PER_PTBL_L3 2 /* 64 PTEs */ -+#endif -+ -+#define ELAN3_LX_ENTRIES (32) -+#define PTBLS_PER_PTBL_LX (1) -+ -+#define L1_VA_PER_PTBL (ELAN3_L1_SIZE*(ELAN3_L1_ENTRIES/PTBLS_PER_PTBL_L1)) /* 4 ptbl for L1 */ -+#define L2_VA_PER_PTBL (ELAN3_L2_SIZE*(ELAN3_L2_ENTRIES/PTBLS_PER_PTBL_L2)) /* 1 ptbl for L2 */ -+#define L3_VA_PER_PTBL (ELAN3_L3_SIZE*(ELAN3_L3_ENTRIES/PTBLS_PER_PTBL_L3)) /* 1 ptbl for L3 */ -+ -+typedef struct elan3_ptbl_gr -+{ -+ struct elan3_ptbl_gr *pg_next; /* Next in list. */ -+ int pg_level; /* Level PG allocated for */ -+ sdramaddr_t pg_addr; /* sdram offset of ptes/ptps */ -+ ELAN3_PTBL pg_ptbls[PTBLS_PER_GROUP_MAX]; /* The actual page tables */ -+} ELAN3_PTBL_GR; -+ -+ -+/* -+ * The elan3mmu structure is the mmu dependant hardware address translation -+ * structure linked to the address space structure to show the translatioms -+ * provided by the elan for an address sapce. -+ * -+ * We also have a doubly linked list of 'regions' which allow the -+ * elan3mmu code to determine the access permissions for the elan -+ * dependant on the virtual address that the translation is being -+ * loaded at. -+ */ -+ -+typedef struct elan3mmu_rgn -+{ -+ struct elan3mmu_rgn *rgn_mnext; /* Doubly linked list of regions */ -+ struct elan3mmu_rgn *rgn_mprev; /* sorted on main address */ -+ caddr_t rgn_mbase; /* main address of base of region */ -+ -+ struct elan3mmu_rgn *rgn_enext; /* Doubly linked list of regions */ -+ struct elan3mmu_rgn *rgn_eprev; /* sorted on elan address */ -+ E3_Addr rgn_ebase; /* elan address of base of region */ -+ -+ u_int rgn_len; /* length of region */ -+ u_int rgn_perm; /* elan access permission */ -+} ELAN3MMU_RGN; -+ -+typedef struct elan3mmu -+{ -+ spinlock_t elan3mmu_lock; /* spinlock lock for regions */ -+ ELAN3MMU_RGN *elan3mmu_mrgns; /* Doubly linked list of memory regions */ -+ ELAN3MMU_RGN *elan3mmu_mtail; /* Last memory region on list */ -+ ELAN3MMU_RGN *elan3mmu_mrgnlast; /* Last region 'hit' */ -+ -+ ELAN3MMU_RGN *elan3mmu_ergns; /* Doubly linked list of memory regions */ -+ ELAN3MMU_RGN *elan3mmu_etail; /* Last memory region on list */ -+ ELAN3MMU_RGN *elan3mmu_ergnlast; /* Last region 'hit' */ -+ -+ struct elan3_dev *elan3mmu_dev; /* Elan device we're using. */ -+ struct elan3_ctxt *elan3mmu_ctxt; /* Elan ctxt we're associated with */ -+ -+ sdramaddr_t elan3mmu_ctp; /* Context table entry for our context */ -+ ELAN3_PTBL *elan3mmu_l1ptbl; /* Level 1 Page table (first of 4) */ -+ -+ spinlock_t elan3mmu_lXptbl_lock; /* spinlock for level X table list */ -+ ELAN3_PTBL *elan3mmu_lXptbl; /* Level X Page table list */ -+ -+#ifdef LINUX -+ struct mm_struct *elan3mmu_coproc_mm; /* Linux mm we're mapping */ -+#endif -+} ELAN3MMU; -+ -+_NOTE(LOCK_ORDER(elan3mmu::elan3mmu_lock elan3_dev::IntrLock)) -+ -+_NOTE(MUTEX_PROTECTS_DATA(elan3mmu::elan3mmu_lock, -+ elan3mmu::elan3mmu_mrgns elan3mmu::elan3mmu_mtail -+ elan3mmu::elan3mmu_ergns elan3mmu::elan3mmu_etail)) -+/* protected by dev->IntrLock for read by device driver */ -+_NOTE(DATA_READABLE_WITHOUT_LOCK(elan3mmu::elan3mmu_mrgns elan3mmu::elan3mmu_mtail -+ elan3mmu::elan3mmu_ergns elan3mmu::elan3mmu_etail)) -+ -+_NOTE(SCHEME_PROTECTS_DATA("only set to valid region", -+ elan3mmu::elan3mmu_ergnlast elan3mmu::elan3mmu_mrgnlast)) -+ -+_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::IntrLock, -+ elan3mmu::elan3mmu_l1ptbl -+ elan3mmu::elan3mmu_ctp -+ elan3mmu::elan3mmu_dev)) -+ -+_NOTE(DATA_READABLE_WITHOUT_LOCK(elan3mmu::elan3mmu_l1ptbl -+ elan3mmu::elan3mmu_ctp -+ elan3mmu::elan3mmu_dev)) -+ -+/* -+ * Macros for accessing ptes/ptbls/ptbl_grs -+ */ -+ -+#define OFFSETOF(object,member) /* calculate offset of structure member */ \ -+ ((size_t) (&(((object *)0)->member))) -+#define PTBL_TO_GR(ptbl) /* convert ptbl to ptbl group */ \ -+ ((ELAN3_PTBL_GR *) ((caddr_t) ((ptbl) - (ptbl)->ptbl_index) - OFFSETOF(ELAN3_PTBL_GR,pg_ptbls[0]))) -+#define PTBL_TO_PTADDR(ptbl) /* convert ptbl to a ptp pointing at it */ \ -+ (PTBL_TO_GR(ptbl)->pg_addr + ((ptbl)->ptbl_index<pg_hmes + ((pte) - (ELAN3_PTE *) PTBL_TO_GR(ptbl)->pg_vaddr)) -+#define HME_TO_PTE(ptebl,hme) /* convert hme to corresponding pte */ \ -+ ((ELAN3_PTE *) PTBL_TO_GR(ptbl)->pg_vaddr + ((hme) - (PTBL_TO_GR(ptbl)->pg_hmes))) -+ -+ -+/* Flags for lock_ptbl */ -+#define LK_PTBL_NOWAIT 0x1 -+#define LK_PTBL_FAILOK 0x2 -+ -+/* Return values for lock_ptbl */ -+#define LK_PTBL_OK 0x0 -+#define LK_PTBL_MISMATCH 0x1 -+#define LK_PTBL_FAILED 0x2 -+ -+/* Flags for elan3mmu_ptesync */ -+#define NO_MLIST_LOCK 0 -+#define MLIST_LOCKED 1 -+ -+/* Flags for elan3mmu_pteload */ -+#define PTE_LOAD 0x00 -+#define PTE_LOAD_LOCK 0x01 /* translation should be locked */ -+#define PTE_LOAD_NOSYNC 0x02 /* ref/mod bits should not be sync'ed to page */ -+#define PTE_NO_SLEEP 0x04 /* true if we cant sleep */ -+#define PTE_NO_STEAL 0x08 /* true if we don't want to steal ptbls */ -+ -+#define PTE_LOAD_ENDIAN_MASK 0x10 /* mask for endian-ness */ -+#define PTE_LOAD_LITTLE_ENDIAN 0x00 /* translation is to little-endian memory */ -+#define PTE_LOAD_BIG_ENDIAN 0x10 /* translation is to big-endian memory */ -+ -+ -+/* Flags for elan3mmu_unload */ -+#define PTE_UNLOAD 0x00 -+#define PTE_UNLOAD_UNLOCK 0x01 -+#define PTE_UNLOAD_NOFLUSH 0x02 -+#define PTE_UNLOAD_NOSYNC 0x04 -+ -+extern int elan3mmu_debug; -+#ifdef DEBUG_PRINTF -+# define HAT_PRINTF0(n,msg) ((elan3mmu_debug & n) ? (void) elan3_debugf (NULL, DBG_HAT, msg) : (void) 0) -+# define HAT_PRINTF1(n,msg,a) ((elan3mmu_debug & n) ? (void) elan3_debugf (NULL, DBG_HAT, msg,a) : (void) 0) -+# define HAT_PRINTF2(n,msg,a,b) ((elan3mmu_debug & n) ? (void) elan3_debugf (NULL, DBG_HAT, msg,a,b) : (void) 0) -+# define HAT_PRINTF3(n,msg,a,b,c) ((elan3mmu_debug & n) ? (void) elan3_debugf (NULL, DBG_HAT, msg,a,b,c) : (void) 0) -+# define HAT_PRINTF4(n,msg,a,b,c,d) ((elan3mmu_debug & n) ? (void) elan3_debugf (NULL, DBG_HAT, msg,a,b,c,d) : (void) 0) -+# define HAT_PRINTF5(n,msg,a,b,c,d,e) ((elan3mmu_debug & n) ? (void) elan3_debugf (NULL, DBG_HAT, msg,a,b,c,d,e) : (void) 0) -+# define HAT_PRINTF6(n,msg,a,b,c,d,e,f) ((elan3mmu_debug & n) ? (void) elan3_debugf (NULL, DBG_HAT, msg,a,b,c,d,e,f) : (void) 0) -+# ifdef LINUX -+# define HAT_PRINTF(n,args...) ((elan3mmu_debug & n) ? (void) elan3_debugf(NULL, DBG_HAT, ##args) : (void) 0) -+# endif -+#else -+# define HAT_PRINTF0(n,msg) -+# define HAT_PRINTF1(n,msg,a) -+# define HAT_PRINTF2(n,msg,a,b) -+# define HAT_PRINTF3(n,msg,a,b,c) -+# define HAT_PRINTF4(n,msg,a,b,c,d) -+# define HAT_PRINTF5(n,msg,a,b,c,d,e) -+# define HAT_PRINTF6(n,msg,a,b,c,d,e,f) -+# ifdef LINUX -+# define HAT_PRINTF(n,args...) -+# endif -+#endif -+ -+/* elan3mmu_generic.c */ -+extern ELAN3MMU_GLOBAL_STATS elan3mmu_global_stats; -+ -+extern void elan3mmu_init (void); -+extern void elan3mmu_fini (void); -+ -+extern ELAN3MMU *elan3mmu_alloc (struct elan3_ctxt *ctxt); -+extern void elan3mmu_free (ELAN3MMU *elan3mmu); -+ -+extern void elan3mmu_set_context_filter (ELAN3_DEV *dev, int ctx, int disabled, E3_uint32 Pend, E3_uint32 *Maskp); -+extern int elan3mmu_attach (ELAN3_DEV *dev, int ctx, ELAN3MMU *elan3mmu, sdramaddr_t routeTable, E3_uint32 routeMask); -+extern void elan3mmu_detach (ELAN3_DEV *dev, int ctx); -+ -+extern ELAN3MMU_RGN *elan3mmu_findrgn_elan (ELAN3MMU *elan3mmu, E3_Addr addr, int tail); -+extern int elan3mmu_addrgn_elan (ELAN3MMU *elan3mmu, ELAN3MMU_RGN *nrgn); -+extern ELAN3MMU_RGN *elan3mmu_removergn_elan (ELAN3MMU *elan3mmu, E3_Addr addr); -+extern ELAN3MMU_RGN *elan3mmu_rgnat_elan (ELAN3MMU *elan3mmu, E3_Addr addr); -+extern ELAN3MMU_RGN *elan3mmu_findrgn_main (ELAN3MMU *elan3mmu, caddr_t addr, int tail); -+extern int elan3mmu_addrgn_main (ELAN3MMU *elan3mmu, ELAN3MMU_RGN *nrgn); -+extern ELAN3MMU_RGN *elan3mmu_removergn_main (ELAN3MMU *elan3mmu, caddr_t addr); -+extern ELAN3MMU_RGN *elan3mmu_rgnat_main (ELAN3MMU *elan3mmu, caddr_t addr); -+ -+extern int elan3mmu_setperm (ELAN3MMU *elan3mmu, caddr_t maddr, E3_Addr eaddr, u_int len, u_int perm); -+extern void elan3mmu_clrperm (ELAN3MMU *elan3mmu, E3_Addr addr, u_int len); -+extern int elan3mmu_checkperm (ELAN3MMU *elan3mmu, E3_Addr addr, u_int len, u_int access); -+extern caddr_t elan3mmu_mainaddr (ELAN3MMU *elan3mmu, E3_Addr addr); -+extern E3_Addr elan3mmu_elanaddr (ELAN3MMU *elan3mmu, caddr_t addr); -+ -+extern void elan3mmu_expand (ELAN3MMU *elan3mmu, E3_Addr addr, int len, int level, int attr); -+extern void elan3mmu_reserve (ELAN3MMU *elan3mmu, E3_Addr addr, u_int npages, sdramaddr_t *); -+extern void elan3mmu_release (ELAN3MMU *elan3mmu, E3_Addr addr, u_int npages, sdramaddr_t *); -+ -+extern void elan3mmu_pteload (ELAN3MMU *elan3mmu, int level, E3_Addr addr, physaddr_t paddr, int perm, int attr); -+extern void elan3mmu_unload (ELAN3MMU *elan3mmu, E3_Addr addr, u_int len, int flags); -+extern void elan3mmu_sync (ELAN3MMU *elan3mmu, E3_Addr addr, u_int len, u_int clearflag); -+extern void elan3mmu_pteunload (ELAN3_PTBL *ptbl, sdramaddr_t pte, int flags, int got_mlist_lock); -+extern void elan3mmu_ptesync (ELAN3_PTBL *ptbl, sdramaddr_t pte, int flags, int got_mlist_lock); -+extern sdramaddr_t elan3mmu_ptp2pte (ELAN3MMU *elan3mmu, sdramaddr_t ptp, int level); -+extern sdramaddr_t elan3mmu_ptefind (ELAN3MMU *elan3mmu, E3_Addr, int *level, ELAN3_PTBL **pptbl, spinlock_t **plock, unsigned long *flags); -+extern sdramaddr_t elan3mmu_ptealloc (ELAN3MMU *elan3mmu, E3_Addr, int level, ELAN3_PTBL **pptbl, spinlock_t **plock, int attr, unsigned long *flags); -+extern void elan3mmu_l1inval (ELAN3MMU *elan3mmu, ELAN3_PTBL *l1ptbl, int flags); -+extern int elan3mmu_l2inval (ELAN3MMU *elan3mmu, ELAN3_PTBL *l2ptbl, int flags, E3_Addr addr, spinlock_t **pl2lock, unsigned long *lock_flags); -+extern int elan3mmu_l3inval (ELAN3MMU *elan3mmu, ELAN3_PTBL *l3ptbl, int flags, E3_Addr addr, spinlock_t **pl3lock, unsigned long *lock_flags); -+ -+extern void elan3mmu_free_l1ptbl (ELAN3_DEV *dev, ELAN3_PTBL *ptbl, spinlock_t *lock, unsigned long flags); -+extern void elan3mmu_free_l2ptbl (ELAN3_DEV *dev, ELAN3_PTBL *ptbl, spinlock_t *lock, unsigned long flags); -+extern void elan3mmu_free_l3ptbl (ELAN3_DEV *dev, ELAN3_PTBL *ptbl, spinlock_t *lock, unsigned long flags); -+ -+extern int elan3mmu_lock_this_ptbl (ELAN3_PTBL *ptbl, int flag, spinlock_t **plock, unsigned long *flags); -+extern int elan3mmu_lock_ptbl (ELAN3_PTBL *ptbl, u_int flag, ELAN3MMU *elan3mmu, E3_Addr va, int level, spinlock_t **plock, unsigned long *flags); -+extern void elan3mmu_unlock_ptbl (ELAN3_PTBL *ptbl, spinlock_t *lock, unsigned long flags); -+ -+/* elan3mmu_osdep.c */ -+extern void elan3mmu_init_osdep (void); -+extern void elan3mmu_fini_osdep (void); -+extern void elan3mmu_alloc_osdep (ELAN3MMU *elan3mmu); -+extern void elan3mmu_free_osdep (ELAN3MMU *elan3mmu); -+extern ELAN3_PTE elan3mmu_phys_to_pte (ELAN3_DEV *dev, physaddr_t paddr, int perm); -+extern ELAN3_PTE elan3mmu_kernel_invalid_pte (ELAN3MMU *elan3mmu); -+ -+#if defined (DIGITAL_UNIX) -+# include -+#elif defined (LINUX) -+# include -+#endif -+ -+#endif /* __KERNEL__ */ -+ -+#ifdef __cplusplus -+} -+#endif -+ -+#endif /* __ELAN3_ELAN3MMU_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/include/elan3/elan3mmu_linux.h linux-2.6.9/include/elan3/elan3mmu_linux.h ---- clean/include/elan3/elan3mmu_linux.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan3/elan3mmu_linux.h 2003-09-24 09:57:24.000000000 -0400 -@@ -0,0 +1,39 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __ELAN3_MMU_LINUX_H -+#define __ELAN3_MMU_LINUX_H -+ -+#ident "$Id: elan3mmu_linux.h,v 1.12 2003/09/24 13:57:24 david Exp $" -+/* $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/elan3mmu_linux.h,v $*/ -+ -+/* XXX copy of elan3mmu_dunix.h */ -+ -+#define ALLOC_ELAN3MMU(ptr,cansleep) KMEM_ALLOC(ptr, ELAN3MMU *, sizeof (ELAN3MMU), cansleep) -+#define ALLOC_PTBL_GR(ptr,cansleep) KMEM_ALLOC(ptr, ELAN3_PTBL_GR *, sizeof (ELAN3_PTBL_GR), cansleep) -+#define ALLOC_ELAN3MMU_RGN(ptr,cansleep) KMEM_ALLOC(ptr, ELAN3MMU_RGN *, sizeof (ELAN3MMU_RGN), cansleep) -+#define ALLOC_HMENTS(ptr,cansleep) KMEM_ALLOC((ptr,ELAN3_HMENT *, sizeof (ELAN3_HMENT), cansleep) -+ -+#define FREE_ELAN3MMU(ptr) KMEM_FREE(ptr,sizeof (ELAN3MMU)) -+#define FREE_PTBL_GR(ptr) KMEM_FREE(ptr,sizeof (ELAN3_PTBL_GR)) -+#define FREE_ELAN3MMU_RGN(ptr) KMEM_FREE(ptr,sizeof (ELAN3MMU_RGN)) -+#define FREE_HMENTS(ptr) KMEM_FREE(ptr,sizeof (ELAN3_HMENT)) -+ -+extern void elan3mmu_init_osdep(void); -+extern void elan3mmu_fini_osdep(void); -+ -+extern void elan3mmu_pte_range_unload (ELAN3MMU *elan3mmu, struct mm_struct *mm, caddr_t addr, unsigned long len); -+extern void elan3mmu_pte_range_update (ELAN3MMU *elan3mmu, struct mm_struct *mm, caddr_t addr, unsigned long len); -+extern void elan3mmu_pte_ctxt_unload(ELAN3MMU *elan3mmu); -+ -+#endif -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/include/elan3/elan3ops.h linux-2.6.9/include/elan3/elan3ops.h ---- clean/include/elan3/elan3ops.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan3/elan3ops.h 2003-09-24 09:57:24.000000000 -0400 -@@ -0,0 +1,42 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+/* $Id: elan3ops.h,v 1.3 2003/09/24 13:57:24 david Exp $ */ -+/* $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/elan3ops.h,v $ */ -+ -+#ifndef _ELAN3_OPS_H -+#define _ELAN3_OPS_H -+ -+int get_position (void *arg, ELAN_POSITION *position); -+int set_position (void *arg, unsigned short nodeId, unsigned short numNodes); -+ -+int elan3mod_create_cap (void *arg, ELAN_CAP_OWNER owner, ELAN_CAPABILITY *cap); -+int elan3mod_destroy_cap (void *arg, ELAN_CAP_OWNER owner, ELAN_CAPABILITY *cap); -+ -+int elan3mod_create_vp (void *arg, ELAN_CAP_OWNER owner, ELAN_CAPABILITY *cap, ELAN_CAPABILITY *map); -+int elan3mod_destroy_vp (void *arg, ELAN_CAP_OWNER owner, ELAN_CAPABILITY *cap, ELAN_CAPABILITY *map); -+ -+int elan3mod_attach_cap (void *arg_ctxt, ELAN_CAPABILITY *cap); -+int elan3mod_detach_cap (void *arg_ctxt); -+ -+extern ELAN_DEV_OPS elan3_dev_ops; -+ -+int stats_get_index_name (void *arg, uint index, caddr_t name); -+int stats_get_block (void *arg, uint entries, ulong *value); -+int stats_clear_block (void *arg); -+ -+int elan3_register_dev_stats (ELAN3_DEV * dev); -+void elan3_deregister_dev_stats (ELAN3_DEV * dev); -+ -+ -+#endif /* __ELAN3_OPS_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "linux" -+ * End: -+ */ -diff -urN clean/include/elan3/elanctxt.h linux-2.6.9/include/elan3/elanctxt.h ---- clean/include/elan3/elanctxt.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan3/elanctxt.h 2003-09-24 09:57:24.000000000 -0400 -@@ -0,0 +1,856 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef _ELAN3_ELANCTXT_H -+#define _ELAN3_ELANCTXT_H -+ -+#ident "$Id: elanctxt.h,v 1.81 2003/09/24 13:57:24 david Exp $" -+/* $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/elanctxt.h,v $*/ -+ -+#ifdef __cplusplus -+extern "C" { -+#endif -+ -+#include -+#include -+ -+#define BumpUserStat(ctxt, stat) ((ctxt)->FlagPage->stat++) -+ -+#if defined(__LITTLE_ENDIAN__) -+ -+typedef union _CProcTrapBuf -+{ -+ E3_uint64 Align64; -+ struct -+ { -+ E3_uint32 Areg; -+ E3_uint32 Breg; -+ } r; -+ struct -+ { -+ E3_uint32 Addr; -+ E3_uint32 ContextType; -+ } s; -+} CProcTrapBuf_BE; -+ -+typedef E3_EventInt E3_EventInt_BE; -+typedef E3_IprocTrapHeader E3_IprocTrapHeader_BE; -+typedef E3_IprocTrapData E3_IprocTrapData_BE; -+typedef E3_FaultSave E3_FaultSave_BE; -+ -+typedef union -+{ -+ E3_uint64 Align64; -+ E3_DMA s; -+} E3_DMA_BE; -+ -+typedef E3_ThreadQueue E3_ThreadQueue_BE; -+ -+#else -+ -+/* "Big-Endian" data structures copied by 64 bit loads, these are 32 bit word flipped */ -+/* from the corresponding data structure. */ -+ -+typedef union _CProcTrapBuf -+{ -+ E3_uint64 Align64; -+ struct -+ { -+ E3_uint32 Breg; -+ E3_uint32 Areg; -+ } r; -+ struct -+ { -+ E3_uint32 ContextType; -+ E3_uint32 Addr; -+ } s; -+} CProcTrapBuf_BE; -+ -+typedef union _E3_EventInt_BE -+{ -+ E3_uint64 Align64; -+ struct { -+ E3_uint32 EventContext; /* Bits 16 to 28 */ -+ E3_uint32 IntCookie; -+ } s; -+} E3_EventInt_BE; -+ -+typedef union _E3_IprocTrapHeader_BE -+{ -+ E3_uint64 Align64; -+ -+ struct -+ { -+ E3_uint32 TrAddr; -+ E3_TrTypeCntx TrTypeCntx; -+ union -+ { -+ E3_IProcStatus_Reg u_IProcStatus; -+ E3_uint32 u_TrData1; -+ } ipsotd; -+ E3_uint32 TrData0; -+ } s; -+} E3_IprocTrapHeader_BE; -+ -+typedef E3_IprocTrapData E3_IprocTrapData_BE; -+ -+typedef union _E3_FaultSave_be -+{ -+ E3_uint64 Align64; -+ struct { -+ volatile E3_uint32 FaultContext; -+ E3_FaultStatusReg FSR; -+ volatile E3_uint32 EventAddress; -+ volatile E3_uint32 FaultAddress; -+ } s; -+} E3_FaultSave_BE; -+ -+typedef union _e3_dma_be -+{ -+ E3_uint64 Align64; -+ struct { -+ E3_uint32 dma_size; -+ E3_DmaType dma_u; -+ E3_Addr dma_dest; -+ E3_Addr dma_source; -+ E3_CookieVProc dma_destCookieProc; -+ E3_Addr dma_destEvent; -+ E3_CookieVProc dma_srcCookieProc; -+ E3_Addr dma_srcEvent; -+ } s; -+} E3_DMA_BE; -+ -+typedef union _E3_ThreadQueue_BE -+{ -+ E3_uint64 Align64; -+ struct -+ { -+ /* copied by 64 bit copy from elan to main */ -+ E3_uint32 :3; /* Bits 29 to 31 */ -+ E3_uint32 Context:13; /* Bits 16 to 28 */ -+ E3_uint32 :16; /* Bits 0 to 15 */ -+ E3_Addr Thread; /* Bits 32 to 63 */ -+ } s; -+} E3_ThreadQueue_BE; -+ -+#endif /* defined(LITTLE_ENDIAN) || defined(__LITTLE_ENDIAN__) */ -+ -+typedef struct neterr_msg -+{ -+ E3_uint32 Rail; /* Rail error received on */ -+ ELAN_CAPABILITY SrcCapability; /* Capability of source of packet */ -+ ELAN_CAPABILITY DstCapability; /* Capability of dest of packet */ -+ -+ E3_uint32 DstProcess; /* Virtual Process of dest of packet */ -+ E3_Addr CookieAddr; /* Cookie Address (or NULL for DMA) */ -+ E3_uint32 CookieVProc; /* Cookie and VP (identifies DMA) */ -+ E3_uint32 NextCookie; /* Next Cookie value (for thread) */ -+ E3_uint32 WaitForEop; /* Wait for EOP transaction */ -+} NETERR_MSG; -+ -+#ifdef __KERNEL__ -+ -+/* -+ * Associated with each input channel can be a network error -+ * resolver structure, which can be queued on the network -+ * error resolver threads to perform RPCs to the other kernels -+ * when a network error occurs with an identify transaction -+ * included -+ */ -+typedef struct neterr_resolver -+{ -+ struct neterr_resolver *Next; -+ -+ spinlock_t Lock; -+ -+ struct elan3_ctxt *Ctxt; -+ ELAN_LOCATION Location; -+ -+ int Completed; -+ int Status; -+ long Timestamp; -+ -+ NETERR_MSG Message; -+} NETERR_RESOLVER; -+ -+ -+typedef struct neterr_fixup -+{ -+ struct neterr_fixup *Next; -+ -+ kcondvar_t Wait; -+ int Completed; -+ int Status; -+ -+ NETERR_MSG Message; -+} NETERR_FIXUP; -+ -+#endif /* __KERNEL__ */ -+ -+/* Each of the following structures must be padded to a whole */ -+/* number of 64 bit words since the kernel uses 64 bit load/stores */ -+/* to transfer the elan register state. */ -+typedef struct command_trap -+{ -+ E3_Status_Reg Status; /* 4 bytes */ -+ E3_uint32 Pad; /* 4 bytes */ -+ E3_FaultSave_BE FaultSave; /* 16 bytes */ -+ CProcTrapBuf_BE TrapBuf; /* 8 bytes */ -+} COMMAND_TRAP; -+ -+typedef struct thread_trap -+{ -+ E3_uint32 Registers[32]; /* 128 bytes */ -+#define REG_GLOBALS 0 -+#define REG_OUTS 8 -+#define REG_LOCALS 16 -+#define REG_INS 24 -+ -+ E3_FaultSave_BE FaultSave; /* 16 bytes */ -+ E3_FaultSave_BE DataFaultSave; /* 16 bytes */ -+ E3_FaultSave_BE InstFaultSave; /* 16 bytes */ -+ E3_FaultSave_BE OpenFaultSave; /* 16 bytes */ -+ -+ E3_Status_Reg Status; /* 4 bytes */ -+ -+ E3_Addr pc; /* 4 bytes */ -+ E3_Addr npc; /* 4 bytes */ -+ E3_Addr StartPC; /* 4 bytes */ -+ E3_Addr sp; /* 4 bytes */ -+ E3_uint32 mi; /* 4 bytes */ -+ E3_TrapBits TrapBits; /* 4 bytes */ -+ E3_DirtyBits DirtyBits; /* 4 bytes */ -+} THREAD_TRAP; -+ -+typedef struct dma_trap -+{ -+ E3_DMA_BE Desc; /* 32 bytes */ -+ E3_FaultSave_BE FaultSave; /* 16 bytes */ -+ E3_FaultSave_BE Data0; /* 16 bytes */ -+ E3_FaultSave_BE Data1; /* 16 bytes */ -+ E3_FaultSave_BE Data2; /* 16 bytes */ -+ E3_FaultSave_BE Data3; /* 16 bytes */ -+ E3_Status_Reg Status; /* 4 bytes */ -+ E3_DmaInfo PacketInfo; /* 4 bytes */ -+} DMA_TRAP; -+ -+typedef struct input_trap -+{ -+ E3_uint32 State; /* 4 bytes */ -+ E3_Status_Reg Status; /* 4 bytes */ -+ E3_FaultSave_BE FaultSave; /* 16 bytes */ -+ -+ u_int NumTransactions; /* 4 bytes */ -+ u_int Overflow; /* 4 bytes */ -+ u_int AckSent; /* 4 bytes */ -+ u_int BadTransaction; /* 4 bytes */ -+ -+ E3_IprocTrapHeader_BE *TrappedTransaction; /* 4 bytes */ -+ E3_IprocTrapData_BE *TrappedDataBuffer; /* 4 bytes */ -+ E3_IprocTrapHeader_BE *WaitForEopTransaction; /* 4 bytes */ -+ E3_IprocTrapData_BE *WaitForEopDataBuffer; /* 4 bytes */ -+ E3_IprocTrapHeader_BE *DmaIdentifyTransaction; /* 4 bytes */ -+ E3_IprocTrapHeader_BE *ThreadIdentifyTransaction; /* 4 bytes */ -+ E3_Addr LockQueuePointer; /* 4 bytes */ -+ E3_Addr UnlockQueuePointer; /* 4 bytes */ -+ -+ E3_IprocTrapHeader_BE Transactions[MAX_TRAPPED_TRANS]; /* n * 8 bytes */ -+ E3_IprocTrapData_BE DataBuffers[MAX_TRAPPED_TRANS]; /* n * 64 bytes */ -+} INPUT_TRAP; -+ -+typedef struct input_fault_save -+{ -+ struct input_fault_save *Next; -+ E3_Addr Addr; -+ E3_uint32 Count; -+} INPUT_FAULT_SAVE; -+ -+#define NUM_INPUT_FAULT_SAVE 32 -+#define MIN_INPUT_FAULT_PAGES 8 -+#define MAX_INPUT_FAULT_PAGES 128 -+ -+typedef E3_uint32 EVENT_COOKIE; -+ -+#ifdef __KERNEL__ -+ -+typedef struct event_cookie_entry -+{ -+ struct event_cookie_entry *ent_next; -+ struct event_cookie_entry *ent_prev; -+ -+ spinlock_t ent_lock; -+ unsigned ent_ref; -+ -+ EVENT_COOKIE ent_cookie; -+ EVENT_COOKIE ent_fired; -+ kcondvar_t ent_wait; -+} EVENT_COOKIE_ENTRY; -+ -+typedef struct event_cookie_table -+{ -+ struct event_cookie_table *tbl_next; -+ struct event_cookie_table *tbl_prev; -+ -+ unsigned long tbl_task; -+ unsigned long tbl_handle; -+ -+ spinlock_t tbl_lock; -+ unsigned tbl_ref; -+ EVENT_COOKIE_ENTRY *tbl_entries; -+} EVENT_COOKIE_TABLE; -+ -+#define NBYTES_PER_SMALL_ROUTE 8 -+#define NBYTES_PER_LARGE_ROUTE 16 -+ -+#define ROUTE_BLOCK_SIZE ELAN3_PAGE_SIZE -+#define NROUTES_PER_BLOCK (ROUTE_BLOCK_SIZE/NBYTES_PER_LARGE_ROUTE) -+ -+typedef struct elan3_routes -+{ -+ struct elan3_routes *Next; /* Can be chained together */ -+ -+ sdramaddr_t Routes; /* sdram offset of route entries */ -+ bitmap_t Bitmap[BT_BITOUL(NROUTES_PER_BLOCK)]; /* Bitmap of which entries are used */ -+} ELAN3_ROUTES; -+ -+ -+typedef struct elan3_route_table -+{ -+ spinlock_t Lock; /* Route lock */ -+ sdramaddr_t Table; /* Kernel address for route table */ -+ u_int Size; /* # entries in route table */ -+ -+ ELAN3_ROUTES *LargeRoutes; /* Large routes */ -+} ELAN3_ROUTE_TABLE; -+ -+typedef struct elan3_vpseg -+{ -+ struct elan3_vpseg *Next; -+ int Process; /* Virtual process */ -+ int Entries; /* and # processes */ -+ int Type; /* Type of cookie */ -+ -+ union -+ { -+ -+ ELAN_CAPABILITY Capability; /* Capability of remote segment */ -+# define SegCapability SegUnion.Capability -+ struct { -+ u_short LowProc; /* Base process number */ -+ u_short HighProc; /* and high process number */ -+# define SegLowProc SegUnion.BROADCAST.LowProc -+# define SegHighProc SegUnion.BROADCAST.HighProc -+ } BROADCAST; -+ } SegUnion; -+} ELAN3_VPSEG; -+ -+#define ELAN3_VPSEG_UNINT 0 /* Unitialised */ -+#define ELAN3_VPSEG_P2P 1 /* Point to Point */ -+#define ELAN3_VPSEG_BROADCAST 2 /* Broadcast */ -+ -+#define NUM_LISTS 7 /* Number of "swap" lists */ -+ -+typedef struct elan3_ctxt -+{ -+ struct elan3_ctxt *Next; /* can be queued on a task */ -+ struct elan3_ctxt *Prev; -+ -+ CtxtHandle Handle; /* user handle */ -+ int RefCnt; /* reference count */ -+ -+ ELAN3MMU *Elan3mmu; /* elan3mmu allocated for Elan translations */ -+ -+ struct elan3_ops *Operations; /* User supplied helper functions */ -+ void *Private; /* Users private pointer */ -+ -+ int Status; /* Status (guarded by dev_mutex) */ -+ int OthersState; /* State of halt queueing for dma/thread */ -+ int LwpCount; /* Number of lwp's running */ -+ -+ ELAN3_DEV *Device; /* Elan device */ -+ -+ ELAN_CAPABILITY Capability; /* Capability I've attached as */ -+ ELAN_POSITION Position; /* Position when I was created */ -+ -+ ELAN3_VPSEG *VpSegs; /* List of virtual process segments */ -+ ELAN3_ROUTE_TABLE *RouteTable; -+ -+ krwlock_t VpLock; /* Reader/writer lock for vp list */ -+ kmutex_t SwapListsLock; /* mutex to lock swap lists */ -+ kmutex_t CmdLock; /* mutex to lock trapped dma command */ -+ kmutex_t CmdPortLock; /* mutex to load/unload commandport xlation */ -+ -+ kcondvar_t Wait; /* Condition variable to sleep on */ -+ kcondvar_t CommandPortWait; /* Condition variable to wait for commandport */ -+ kcondvar_t LwpWait; /* Condition variable to wait for lwps to stop */ -+ kcondvar_t HaltWait; /* Condition variable to wait for halt */ -+ int Halted; /* and flag for halt cv */ -+ -+ caddr_t CommandPageMapping; /* user virtual address for command page mapping */ -+ ioaddr_t CommandPage; /* Elan command port mapping page */ -+ DeviceMappingHandle CommandPageHandle; /* DDI Handle */ -+ ioaddr_t CommandPort; /* Elan command port */ -+ void *CommandPortItem; /* Item we're re-issuing to commandport */ -+ -+ ELAN3_FLAGSTATS *FlagPage; /* Page visible to user process */ -+ -+ COMMAND_TRAP *CommandTraps; /* Command port traps */ -+ ELAN3_SPLIT_QUEUE CommandTrapQ; -+ -+ CProcTrapBuf_BE *Commands; /* Overflowed commands */ -+ ELAN3_QUEUE CommandQ; -+ -+ THREAD_TRAP *ThreadTraps; /* Thread processor traps */ -+ ELAN3_QUEUE ThreadTrapQ; -+ -+ DMA_TRAP *DmaTraps; /* Dma processor tra[ed */ -+ ELAN3_QUEUE DmaTrapQ; -+ -+ INPUT_TRAP Input0Trap; /* Inputter channel 0 trap */ -+ INPUT_TRAP Input1Trap; /* Inputter channel 1 trap */ -+ NETERR_RESOLVER *Input0Resolver; /* Inputter channel 0 network error resolver */ -+ NETERR_RESOLVER *Input1Resolver; /* Inputter channel 1 network error resolver */ -+ -+ INPUT_FAULT_SAVE InputFaults[NUM_INPUT_FAULT_SAVE]; /* stored writeblock addresses */ -+ INPUT_FAULT_SAVE *InputFaultList; /* organized in list for LRU */ -+ spinlock_t InputFaultLock; /* and lock for list */ -+ -+ kmutex_t NetworkErrorLock; -+ NETERR_FIXUP *NetworkErrorFixups; -+ -+ EVENT_COOKIE *EventCookies; /* Event cookies. */ -+ ELAN3_QUEUE EventCookieQ; -+ -+ E3_Addr *SwapThreads; /* Swapped Thread Queue */ -+ ELAN3_QUEUE SwapThreadQ; -+ -+ E3_DMA_BE *SwapDmas; /* Swapped Dmas Queue */ -+ ELAN3_QUEUE SwapDmaQ; -+ -+ int ItemCount[NUM_LISTS]; /* Count of items on each swap list */ -+ int inhibit; /* if set lwp not to reload translations */ -+ -+ int Disabled; -+} ELAN3_CTXT; -+ -+_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::IntrLock, -+ elan3_ctxt::Status elan3_ctxt::OthersState -+ elan3_ctxt::CommandTrapQ elan3_ctxt::CommandQ elan3_ctxt::ThreadTrapQ elan3_ctxt::DmaTrapQ -+ elan3_ctxt::Input0Trap elan3_ctxt::Input1Trap elan3_ctxt::EventCookieQ elan3_ctxt::SwapThreadQ -+ elan3_ctxt::SwapDmaQ elan3_ctxt::CommandPortItem elan3_ctxt::LwpCount)) -+_NOTE(MUTEX_PROTECTS_DATA(elan3_ctxt::SwapListsLock, -+ elan3_ctxt::ItemCount)) -+_NOTE(RWLOCK_PROTECTS_DATA(elan3_ctxt::VpLock, -+ elan3_ctxt::VpSegs elan3_vpseg::Next elan3_vpseg::Process -+ elan3_vpseg::Entries elan3_vpseg::Type)) -+ -+_NOTE(DATA_READABLE_WITHOUT_LOCK(elan3_ctxt::ItemCount elan3_ctxt::Status elan3_ctxt::CommandPortItem)) -+ -+_NOTE(LOCK_ORDER(elan3_ctxt::SwapListsLock elan3_ctxt::CmdLock elan3_dev::IntrLock)) -+_NOTE(LOCK_ORDER(elan3_ctxt::SwapListsLock as::a_lock)) /* implicit by pagefault */ -+ -+#define CTXT_DETACHED (1 << 0) /* Context is detached. */ -+#define CTXT_NO_LWPS (1 << 1) /* No lwp's to handle faults */ -+#define CTXT_EXITING (1 << 2) /* User process is exiting */ -+ -+#define CTXT_SWAPPING_OUT (1 << 3) /* Context is swapping out */ -+#define CTXT_SWAPPED_OUT (1 << 4) /* Context is swapped out */ -+ -+#define CTXT_SWAP_FREE (1 << 5) /* Swap buffer is free */ -+#define CTXT_SWAP_VALID (1 << 6) /* Swap buffer has queue entries in it */ -+ -+#define CTXT_DMA_QUEUE_FULL (1 << 7) /* Dma trap queue is full */ -+#define CTXT_THREAD_QUEUE_FULL (1 << 8) /* Thread trap queue is full */ -+#define CTXT_EVENT_QUEUE_FULL (1 << 9) /* Event interrupt queue is full */ -+#define CTXT_COMMAND_OVERFLOW_ERROR (1 << 10) /* Trap queue overflow */ -+ -+#define CTXT_SWAP_WANTED (1 << 11) /* Some one wanted to swap */ -+#define CTXT_WAITING_SWAPIN (1 << 12) /* Someone waiting on swapin */ -+ -+#define CTXT_WAITING_COMMAND (1 << 13) /* swgelan waiting on command port */ -+#define CTXT_COMMAND_MAPPED_MAIN (1 << 14) /* segelan has mapped command port */ -+ -+#define CTXT_QUEUES_EMPTY (1 << 15) /* dma/thread run queues are empty */ -+#define CTXT_QUEUES_EMPTYING (1 << 16) /* dma/thread run queues are being emptied */ -+ -+#define CTXT_USER_FILTERING (1 << 17) /* user requested context filter */ -+ -+#define CTXT_KERNEL (1 << 18) /* context is a kernel context */ -+#define CTXT_COMMAND_MAPPED_ELAN (1 << 19) /* command port is mapped for elan */ -+#define CTXT_FIXUP_NETERR (1 << 20) /* fixing up a network error */ -+ -+ -+#define CTXT_SWAPPED_REASONS (CTXT_NO_LWPS | \ -+ CTXT_DETACHED | \ -+ CTXT_EXITING | \ -+ CTXT_FIXUP_NETERR) -+ -+#define CTXT_OTHERS_REASONS (CTXT_EVENT_QUEUE_FULL | \ -+ CTXT_DMA_QUEUE_FULL | \ -+ CTXT_THREAD_QUEUE_FULL | \ -+ CTXT_COMMAND_OVERFLOW_ERROR | \ -+ CTXT_SWAPPED_REASONS) -+ -+#define CTXT_INPUTTER_REASONS (CTXT_USER_FILTERING | \ -+ CTXT_OTHERS_REASONS) -+ -+#define CTXT_COMMAND_MAPPED (CTXT_COMMAND_MAPPED_MAIN | \ -+ CTXT_COMMAND_MAPPED_ELAN) -+ -+#define CTXT_IS_KERNEL(ctxt) ((ctxt)->Status & CTXT_KERNEL) -+ -+/* -+ * State values for ctxt_inputterState/ctxt_commandportStats -+ */ -+#define CTXT_STATE_OK 0 -+#define CTXT_STATE_TRAPPED 1 /* Inputter channel 0 trapped */ -+#define CTXT_STATE_RESOLVING 2 /* An LWP is resolving the trap */ -+#define CTXT_STATE_NEEDS_RESTART 3 /* Th trapped packet needs to be executed */ -+#define CTXT_STATE_NETWORK_ERROR 4 /* We're waiting on an RPC for the identify transaction */ -+#define CTXT_STATE_EXECUTING 5 /* An LWP is executing the trapped packet */ -+ -+/* -+ * State values for OthersState. -+ */ -+#define CTXT_OTHERS_RUNNING 0 -+#define CTXT_OTHERS_HALTING 1 -+#define CTXT_OTHERS_SWAPPING 2 -+#define CTXT_OTHERS_HALTING_MORE 3 -+#define CTXT_OTHERS_SWAPPING_MORE 4 -+#define CTXT_OTHERS_SWAPPED 5 -+ -+typedef struct elan3_ops -+{ -+ u_int Version; -+ -+ int (*Exception) (ELAN3_CTXT *ctxt, int type, int proc, void *trap, va_list ap); -+ -+ /* swap item list functions */ -+ int (*GetWordItem) (ELAN3_CTXT *ctxt, int list, void **itemp, E3_uint32 *valuep); -+ int (*GetBlockItem) (ELAN3_CTXT *ctxt, int list, void **itemp, E3_Addr *valuep); -+ void (*PutWordItem) (ELAN3_CTXT *ctxt, int list, E3_Addr value); -+ void (*PutBlockItem) (ELAN3_CTXT *ctxt, int list, E3_uint32 *ptr); -+ void (*PutbackItem) (ELAN3_CTXT *ctxt, int list, void *item); -+ void (*FreeWordItem) (ELAN3_CTXT *ctxt, void *item); -+ void (*FreeBlockItem) (ELAN3_CTXT *ctxt, void *item); -+ int (*CountItems) (ELAN3_CTXT *ctxt, int list); -+ -+ /* event interrupt cookie */ -+ int (*Event) (ELAN3_CTXT *ctxt, E3_uint32 cookie, int flag); -+ -+ /* swapin/swapout functions. */ -+ void (*Swapin) (ELAN3_CTXT *ctxt); -+ void (*Swapout) (ELAN3_CTXT *ctxt); -+ -+ /* Free of private data */ -+ void (*FreePrivate) (ELAN3_CTXT *ctxt); -+ -+ /* Fixup a network error */ -+ int (*FixupNetworkError) (ELAN3_CTXT *ctxt, NETERR_FIXUP *nef); -+ -+ /* Interrupt handler trap interface */ -+ int (*DProcTrap) (ELAN3_CTXT *ctxt, DMA_TRAP *trap); -+ int (*TProcTrap) (ELAN3_CTXT *ctxt, THREAD_TRAP *trap); -+ int (*IProcTrap) (ELAN3_CTXT *ctxt, INPUT_TRAP *trap, int chan); -+ int (*CProcTrap) (ELAN3_CTXT *ctxt, COMMAND_TRAP *trap); -+ int (*CProcReissue) (ELAN3_CTXT *ctxt, CProcTrapBuf_BE *TrapBuf); -+ -+ /* User memory access functions */ -+ int (*StartFaultCheck)(ELAN3_CTXT *ctxt); -+ void (*EndFaultCheck) (ELAN3_CTXT *ctxt); -+ -+ E3_uint8 (*Load8) (ELAN3_CTXT *ctxt, E3_Addr addr); -+ void (*Store8) (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint8 val); -+ E3_uint16 (*Load16) (ELAN3_CTXT *ctxt, E3_Addr addr); -+ void (*Store16) (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint16 val); -+ E3_uint32 (*Load32) (ELAN3_CTXT *ctxt, E3_Addr addr); -+ void (*Store32) (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint32 val); -+ E3_uint64 (*Load64) (ELAN3_CTXT *ctxt, E3_Addr addr); -+ void (*Store64) (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint64 val); -+ -+} ELAN3_OPS; -+ -+#define ELAN3_OPS_VERSION 0xdeef0001 -+ -+/* -+ * Flags for ops_event. -+ */ -+#define OP_INTR 0 /* Called from interrupt handler */ -+#define OP_LWP 1 /* Called from "lwp" */ -+ -+/* -+ * Return codes for "ops" functions. -+ */ -+#define OP_DEFER 0 /* Defer to next lower interrupt */ -+#define OP_IGNORE 1 /* No event hander, so ignore it */ -+#define OP_HANDLED 2 /* Handled event (resume thread) */ -+#define OP_FAILED 3 /* Failed */ -+ -+#define ELAN3_CALL_OP(ctxt,fn) ((ctxt)->Operations && (ctxt)->Operations->fn) ? (ctxt)->Operations->fn -+ -+#define ELAN3_OP_EXCEPTION(ctxt,type,proc,trap,ap) (ELAN3_CALL_OP(ctxt,Exception) (ctxt,type,proc,trap,ap) : OP_IGNORE) -+#define ELAN3_OP_GET_WORD_ITEM(ctxt,list,itemp,valuep) (ELAN3_CALL_OP(ctxt,GetWordItem) (ctxt,list,itemp,valuep) : 0) -+#define ELAN3_OP_GET_BLOCK_ITEM(ctxt,list,itemp,valuep) (ELAN3_CALL_OP(ctxt,GetBlockItem) (ctxt,list,itemp,valuep) : 0) -+#define ELAN3_OP_PUT_WORD_ITEM(ctxt,list,value) (ELAN3_CALL_OP(ctxt,PutWordItem) (ctxt,list,value) : (void)0) -+#define ELAN3_OP_PUT_BLOCK_ITEM(ctxt,list,ptr) (ELAN3_CALL_OP(ctxt,PutBlockItem) (ctxt,list,ptr) : (void)0) -+#define ELAN3_OP_PUTBACK_ITEM(ctxt,list,item) (ELAN3_CALL_OP(ctxt,PutbackItem) (ctxt,list,item) : (void)0) -+#define ELAN3_OP_FREE_WORD_ITEM(ctxt,item) (ELAN3_CALL_OP(ctxt,FreeWordItem) (ctxt,item) : (void)0) -+#define ELAN3_OP_FREE_BLOCK_ITEM(ctxt,item) (ELAN3_CALL_OP(ctxt,FreeBlockItem)(ctxt,item) : (void)0) -+#define ELAN3_OP_COUNT_ITEMS(ctxt,list) (ELAN3_CALL_OP(ctxt,CountItems)(ctxt,list) : 0) -+#define ELAN3_OP_EVENT(ctxt,cookie,flag) (ELAN3_CALL_OP(ctxt,Event)(ctxt,cookie,flag) : OP_IGNORE) -+#define ELAN3_OP_SWAPIN(ctxt) (ELAN3_CALL_OP(ctxt,Swapin)(ctxt) : (void)0) -+#define ELAN3_OP_SWAPOUT(ctxt) (ELAN3_CALL_OP(ctxt,Swapout)(ctxt) : (void)0) -+#define ELAN3_OP_FREE_PRIVATE(ctxt) (ELAN3_CALL_OP(ctxt,FreePrivate)(ctxt) : (void)0) -+#define ELAN3_OP_FIXUP_NETWORK_ERROR(ctxt, nef) (ELAN3_CALL_OP(ctxt,FixupNetworkError)(ctxt,nef) : OP_FAILED) -+ -+#define ELAN3_OP_DPROC_TRAP(ctxt, trap) (ELAN3_CALL_OP(ctxt,DProcTrap)(ctxt,trap) : OP_DEFER) -+#define ELAN3_OP_TPROC_TRAP(ctxt, trap) (ELAN3_CALL_OP(ctxt,TProcTrap)(ctxt,trap) : OP_DEFER) -+#define ELAN3_OP_IPROC_TRAP(ctxt, trap, chan) (ELAN3_CALL_OP(ctxt,IProcTrap)(ctxt,trap,chan) : OP_DEFER) -+#define ELAN3_OP_CPROC_TRAP(ctxt, trap) (ELAN3_CALL_OP(ctxt,CProcTrap)(ctxt,trap) : OP_DEFER) -+#define ELAN3_OP_CPROC_REISSUE(ctxt,tbuf) (ELAN3_CALL_OP(ctxt,CProcReissue)(ctxt, tbuf) : OP_DEFER) -+ -+#define ELAN3_OP_START_FAULT_CHECK(ctxt) (ELAN3_CALL_OP(ctxt,StartFaultCheck)(ctxt) : 0) -+#define ELAN3_OP_END_FAULT_CHECK(ctxt) (ELAN3_CALL_OP(ctxt,EndFaultCheck)(ctxt) : (void)0) -+#define ELAN3_OP_LOAD8(ctxt,addr) (ELAN3_CALL_OP(ctxt,Load8)(ctxt,addr) : 0) -+#define ELAN3_OP_STORE8(ctxt,addr,val) (ELAN3_CALL_OP(ctxt,Store8)(ctxt,addr,val) : (void)0) -+#define ELAN3_OP_LOAD16(ctxt,addr) (ELAN3_CALL_OP(ctxt,Load16)(ctxt,addr) : 0) -+#define ELAN3_OP_STORE16(ctxt,addr,val) (ELAN3_CALL_OP(ctxt,Store16)(ctxt,addr,val) : (void)0) -+#define ELAN3_OP_LOAD32(ctxt,addr) (ELAN3_CALL_OP(ctxt,Load32)(ctxt,addr) : 0) -+#define ELAN3_OP_STORE32(ctxt,addr,val) (ELAN3_CALL_OP(ctxt,Store32)(ctxt,addr,val) : (void)0) -+#define ELAN3_OP_LOAD64(ctxt,addr) (ELAN3_CALL_OP(ctxt,Load64)(ctxt,addr) : 0) -+#define ELAN3_OP_STORE64(ctxt,addr,val) (ELAN3_CALL_OP(ctxt,Store64)(ctxt,addr,val) : (void)0) -+ -+#endif /* __KERNEL__ */ -+ -+/* "list" arguement to ops functions */ -+#define LIST_DMA_PTR 0 -+#define LIST_DMA_DESC 1 -+#define LIST_THREAD 2 -+#define LIST_COMMAND 3 -+#define LIST_SETEVENT 4 -+#define LIST_FREE_WORD 5 -+#define LIST_FREE_BLOCK 6 -+ -+#define MAX_LISTS 7 -+ -+#if defined(__KERNEL__) && MAX_LISTS != NUM_LISTS -+# error Check NUM_LISTS == MAX_LISTS -+#endif -+ -+/* -+ * Values for the 'type' field to PostException(). -+ */ -+#define EXCEPTION_INVALID_ADDR 1 /* FaultArea, res */ -+#define EXCEPTION_UNIMP_INSTR 2 /* instr */ -+#define EXCEPTION_INVALID_PROCESS 3 /* proc, res */ -+#define EXCEPTION_SIMULATION_FAILED 4 /* */ -+#define EXCEPTION_UNIMPLEMENTED 5 /* */ -+#define EXCEPTION_SWAP_FAULT 6 /* */ -+#define EXCEPTION_SWAP_FAILED 7 /* */ -+#define EXCEPTION_BAD_PACKET 8 /* */ -+#define EXCEPTION_FAULTED 9 /* addr */ -+#define EXCEPTION_QUEUE_OVERFLOW 10 /* FaultArea, TrapType */ -+#define EXCEPTION_COMMAND_OVERFLOW 11 /* count */ -+#define EXCEPTION_DMA_RETRY_FAIL 12 /* */ -+#define EXCEPTION_CHAINED_EVENT 13 /* EventAddr */ -+#define EXCEPTION_THREAD_KILLED 14 /* */ -+#define EXCEPTION_CANNOT_SAVE_THREAD 15 -+#define EXCEPTION_BAD_SYSCALL 16 /* */ -+#define EXCEPTION_DEBUG 17 -+#define EXCEPTION_BAD_EVENT 18 /* */ -+#define EXCEPTION_NETWORK_ERROR 19 /* rvp */ -+#define EXCEPTION_BUS_ERROR 20 -+#define EXCEPTION_COOKIE_ERROR 21 -+#define EXCEPTION_PACKET_TIMEOUT 22 -+#define EXCEPTION_BAD_DMA 23 /* */ -+#define EXCEPTION_ENOMEM 24 -+ -+/* -+ * Values for the 'proc' field to ElanException(). -+ */ -+#define COMMAND_PROC 1 -+#define THREAD_PROC 2 -+#define DMA_PROC 3 -+#define INPUT_PROC 4 -+#define EVENT_PROC 5 -+ -+/* Flags to IssueDmaCommand */ -+#define ISSUE_COMMAND_FOR_CPROC 1 -+#define ISSUE_COMMAND_CANT_WAIT 2 -+ -+/* Return code from IssueDmaCommand.*/ -+#define ISSUE_COMMAND_OK 0 -+#define ISSUE_COMMAND_TRAPPED 1 -+#define ISSUE_COMMAND_RETRY 2 -+#define ISSUE_COMMAND_WAIT 3 -+ -+#ifdef __KERNEL__ -+ -+extern ELAN3_CTXT *elan3_alloc(ELAN3_DEV *dev, int kernel); -+extern void elan3_free (ELAN3_CTXT *ctxt); -+ -+extern int elan3_attach (ELAN3_CTXT *ctxt, ELAN_CAPABILITY *cap); -+extern int elan3_doattach (ELAN3_CTXT *ctxt, ELAN_CAPABILITY *cap); -+extern void elan3_detach (ELAN3_CTXT *ctxt); -+extern void elan3_dodetach (ELAN3_CTXT *ctxt); -+ -+extern int elan3_addvp (ELAN3_CTXT *ctxt, int process, ELAN_CAPABILITY *cap); -+extern int elan3_removevp (ELAN3_CTXT *ctxt, int process); -+extern int elan3_addbcastvp(ELAN3_CTXT *ctxt, int process, int base, int count); -+ -+extern int elan3_process (ELAN3_CTXT *ctxt); -+ -+extern int elan3_load_route (ELAN3_CTXT *ctxt, int process, E3_uint16 *flits); -+extern int elan3_check_route(ELAN3_CTXT *ctxt, int process, E3_uint16 *flits, E3_uint32 *routeError); -+ -+extern int elan3_lwp (ELAN3_CTXT *ctxt); -+ -+extern void elan3_swapin (ELAN3_CTXT *ctxt, int reason); -+extern void elan3_swapout (ELAN3_CTXT *ctxt, int reason); -+extern int elan3_pagefault (ELAN3_CTXT *ctxt, E3_FaultSave_BE *FaultSave, int npages); -+extern void elan3_block_inputter (ELAN3_CTXT *ctxt, int block); -+ -+ -+extern E3_Addr elan3_init_thread (ELAN3_DEV *dev, E3_Addr fn, E3_Addr addr, sdramaddr_t stack, int stackSize, int nargs, ...); -+ -+extern void SetInputterState (ELAN3_CTXT *ctxt, E3_uint32 Pend, E3_uint32 *Maskp); -+extern void SetInputterStateForContext (ELAN3_CTXT *ctxt, E3_uint32 Pend, E3_uint32 *Maskp); -+extern void UnloadCommandPageMapping (ELAN3_CTXT *ctxt); -+extern void StartSwapoutContext (ELAN3_CTXT *ctxt, E3_uint32 Pend, E3_uint32 *Maskp); -+ -+extern int HandleExceptions (ELAN3_CTXT *ctxt, unsigned long *flags); -+extern int RestartContext (ELAN3_CTXT *ctxt, unsigned long *flags); -+extern int CheckCommandQueueFlushed (ELAN3_CTXT *ctxt, E3_uint32 cflags, int how, unsigned long *flags); -+extern int IssueCommand (ELAN3_CTXT *ctxt, unsigned cmdoff, E3_Addr value, int flags); -+extern int IssueDmaCommand (ELAN3_CTXT *ctxt, E3_Addr value, void *item, int flags); -+extern int WaitForDmaCommand (ELAN3_CTXT *ctxt, void *item, int flags); -+extern void FixupEventTrap (ELAN3_CTXT *ctxt, int proc, void *trap, E3_uint32 TrapType, -+ E3_FaultSave_BE *FaultSaveArea, int flags); -+extern int SimulateBlockCopy (ELAN3_CTXT *ctxt, E3_Addr EventAddress); -+extern void ReissueEvent (ELAN3_CTXT *ctxt, E3_Addr addr,int flags); -+extern int SetEventsNeedRestart (ELAN3_CTXT *ctxt); -+extern void RestartSetEvents (ELAN3_CTXT *ctxt); -+extern int RunEventType (ELAN3_CTXT *ctxt, E3_FaultSave_BE *FaultSaveArea, E3_uint32 EventType); -+extern void WakeupLwp (ELAN3_DEV *dev, void *arg); -+extern void QueueEventInterrupt (ELAN3_CTXT *ctxt, E3_uint32 cookie); -+extern int WaitForCommandPort (ELAN3_CTXT *ctxt); -+ -+extern int ElanException (ELAN3_CTXT *ctxt, int type, int proc, void *trap, ...); -+ -+/* context_osdep.c */ -+extern int LoadElanTranslation (ELAN3_CTXT *ctxt, E3_Addr elanAddr, int len, int protFault, int writeable); -+extern void LoadCommandPortTranslation (ELAN3_CTXT *ctxt); -+ -+#if defined(DIGITAL_UNIX) -+/* seg_elan.c */ -+extern caddr_t elan3_segelan3_create (ELAN3_CTXT *ctxt); -+extern void elan3_segelan3_destroy (ELAN3_CTXT *ctxt); -+extern int elan3_segelan3_map (ELAN3_CTXT *ctxt); -+extern void elan3_segelan3_unmap (ELAN3_CTXT *ctxt); -+ -+/* seg_elanmem.c */ -+extern int elan3_segelanmem_create (ELAN3_DEV *dev, unsigned object, unsigned off, vm_offset_t *addrp, int len); -+#endif /* defined(DIGITAL_UNIX) */ -+ -+/* route_table.c */ -+extern ELAN3_ROUTE_TABLE *AllocateRouteTable (ELAN3_DEV *dev, int size); -+extern void FreeRouteTable (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl); -+extern int LoadRoute (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl, int vp, int ctxnum, int nflits, E3_uint16 *flits); -+extern int GetRoute (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl, int process, E3_uint16 *flits); -+extern void InvalidateRoute (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl, int vp); -+extern void ValidateRoute (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl, int vp); -+extern void ClearRoute (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl, int vp); -+ -+extern int GenerateRoute (ELAN_POSITION *pos, E3_uint16 *flits, int lowid, int highid, int timeout, int highPri); -+extern int GenerateProbeRoute (E3_uint16 *flits, int nodeid, int level, int *linkup, int *linkdown, int adaptive); -+extern int GenerateCheckRoute (ELAN_POSITION *pos, E3_uint16 *flits, int level, int adaptive); -+ -+/* virtual_process.c */ -+extern ELAN_LOCATION ProcessToLocation (ELAN3_CTXT *ctxt, ELAN3_VPSEG *seg, int process, ELAN_CAPABILITY *cap); -+extern int ResolveVirtualProcess (ELAN3_CTXT *ctxt, int process); -+extern caddr_t CapabilityString (ELAN_CAPABILITY *cap); -+extern void UnloadVirtualProcess (ELAN3_CTXT *ctxt, ELAN_CAPABILITY *cap); -+ -+extern int elan3_get_route (ELAN3_CTXT *ctxt, int process, E3_uint16 *flits); -+extern int elan3_reset_route (ELAN3_CTXT *ctxt, int process); -+ -+/* cproc.c */ -+extern int NextCProcTrap (ELAN3_CTXT *ctxt, COMMAND_TRAP *trap); -+extern void ResolveCProcTrap (ELAN3_CTXT *ctxt); -+extern int RestartCProcTrap (ELAN3_CTXT *ctxt); -+ -+/* iproc.c */ -+extern void InspectIProcTrap (ELAN3_CTXT *ctxt, INPUT_TRAP *trap); -+extern void ResolveIProcTrap (ELAN3_CTXT *ctxt, INPUT_TRAP *trap, NETERR_RESOLVER **rvp); -+extern int RestartIProcTrap (ELAN3_CTXT *ctxt, INPUT_TRAP *trap); -+extern char *IProcTrapString (E3_IprocTrapHeader_BE *hdrp, E3_IprocTrapData *datap); -+extern void SimulateUnlockQueue (ELAN3_CTXT *ctxt, E3_Addr QueuePointer, int SentAck); -+ -+/* tproc.c */ -+extern int NextTProcTrap (ELAN3_CTXT *ctxt, THREAD_TRAP *trap); -+extern void ResolveTProcTrap (ELAN3_CTXT *ctxt, THREAD_TRAP *trap); -+extern int TProcNeedsRestart (ELAN3_CTXT *ctxt); -+extern void RestartTProcItems (ELAN3_CTXT *ctxt); -+extern E3_Addr SaveThreadToStack (ELAN3_CTXT *ctxt, THREAD_TRAP *trap, int SkipInstruction); -+extern void ReissueStackPointer (ELAN3_CTXT *ctxt, E3_Addr StackPointer); -+ -+/* tprocinsts.c */ -+extern int RollThreadToClose (ELAN3_CTXT *ctxt, THREAD_TRAP *trap, E3_uint32 PAckVal); -+ -+/* tproc_osdep.c */ -+extern int ThreadSyscall (ELAN3_CTXT *ctxt, THREAD_TRAP *trap, int *skip); -+extern int ThreadElancall (ELAN3_CTXT *ctxt, THREAD_TRAP *trap, int *skip); -+ -+/* dproc.c */ -+extern int NextDProcTrap (ELAN3_CTXT *ctxt, DMA_TRAP *trap); -+extern void ResolveDProcTrap (ELAN3_CTXT *ctxt, DMA_TRAP *trap); -+extern int DProcNeedsRestart (ELAN3_CTXT *ctxt); -+extern void RestartDProcItems (ELAN3_CTXT *ctxt); -+extern void RestartDmaDesc (ELAN3_CTXT *ctxt, E3_DMA_BE *desc); -+extern void RestartDmaTrap (ELAN3_CTXT *ctxt, DMA_TRAP *trap); -+extern void RestartDmaPtr (ELAN3_CTXT *ctxt, E3_Addr ptr); -+ -+/* network_error.c */ -+extern void InitialiseNetworkErrorResolver (void); -+extern void FinaliseNetworkErrorResolver (void); -+extern int QueueNetworkErrorResolver (ELAN3_CTXT *ctxt, INPUT_TRAP *trap, NETERR_RESOLVER **rvpp); -+extern void FreeNetworkErrorResolver (NETERR_RESOLVER *rvp); -+extern void CancelNetworkErrorResolver (NETERR_RESOLVER *rvp); -+extern int ExecuteNetworkErrorFixup (NETERR_MSG *msg); -+extern void CompleteNetworkErrorFixup (ELAN3_CTXT *ctxt, NETERR_FIXUP *nef, int status); -+ -+extern int AddNeterrServerSyscall (int elanId, void *configp, void *addrp, char *namep); -+ -+/* eventcookie.c */ -+extern void cookie_init(void); -+extern void cookie_fini(void); -+extern EVENT_COOKIE_TABLE *cookie_alloc_table (unsigned long task, unsigned long handle); -+extern void cookie_free_table (EVENT_COOKIE_TABLE *tbl); -+extern int cookie_alloc_cookie (EVENT_COOKIE_TABLE *tbl, EVENT_COOKIE cookie); -+extern int cookie_free_cookie (EVENT_COOKIE_TABLE *tbl, EVENT_COOKIE cookie); -+extern int cookie_fire_cookie (EVENT_COOKIE_TABLE *tbl, EVENT_COOKIE cookie); -+extern int cookie_wait_cookie (EVENT_COOKIE_TABLE *tbl, EVENT_COOKIE cookie); -+extern int cookie_arm_cookie (EVENT_COOKIE_TABLE *tbl, EVENT_COOKIE cookie); -+ -+/* routecheck.c */ -+extern int elan3_route_check (ELAN3_CTXT *ctxt, E3_uint16 *flits, int destNode); -+extern int elan3_route_broadcast_check(ELAN3_CTXT *ctxt, E3_uint16 *flitsA, int lowNode, int highNode); -+ -+ -+#endif /* __KERNEL__ */ -+ -+#ifdef __cplusplus -+} -+#endif -+ -+#endif /* _ELAN3_ELANCTXT_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/include/elan3/elandebug.h linux-2.6.9/include/elan3/elandebug.h ---- clean/include/elan3/elandebug.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan3/elandebug.h 2003-09-24 09:57:24.000000000 -0400 -@@ -0,0 +1,106 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef _ELAN3_ELANDEBUG_H -+#define _ELAN3_ELANDEBUG_H -+ -+#ident "$Id: elandebug.h,v 1.38 2003/09/24 13:57:24 david Exp $" -+/* $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/elandebug.h,v $ */ -+ -+#if defined(__KERNEL__) -+ -+extern u_int elan3_debug; -+extern u_int elan3_debug_console; -+extern u_int elan3_debug_buffer; -+extern u_int elan3_debug_ignore_dev; -+extern u_int elan3_debug_ignore_kcomm; -+extern u_int elan3_debug_ignore_ctxt; -+extern u_int elan3_debug_display_ctxt; -+ -+#define DBG_CONFIG 0x00000001 /* Module configuration */ -+#define DBG_HAT 0x00000002 -+#define DBG_FN 0x00000004 -+#define DBG_SEG 0x00000008 -+#define DBG_INTR 0x00000010 -+#define DBG_LWP 0x00000020 -+#define DBG_FAULT 0x00000040 -+#define DBG_EVENT 0x00000080 -+#define DBG_CPROC 0x00000100 -+#define DBG_TPROC 0x00000200 -+#define DBG_DPROC 0x00000400 -+#define DBG_IPROC 0x00000800 -+#define DBG_SWAP 0x00001000 -+#define DBG_CMD 0x00002000 -+#define DBG_VP 0x00004000 -+#define DBG_SYSCALL 0x00008000 -+#define DBG_BSCAN 0x00010000 -+#define DBG_LINKERR 0x00020000 -+#define DBG_NETERR 0x00040000 -+#define DBG_NETRPC 0x00080000 -+#define DBG_EVENTCOOKIE 0x00100000 -+#define DBG_SDRAM 0x00200000 -+ -+#define DBG_EP 0x10000000 -+#define DBG_EPCONSOLE 0x20000000 -+ -+#define DBG_EIP 0x40000000 -+#define DBG_EIPFAIL 0x80000000 -+ -+#define DBG_ALL 0xffffffff -+ -+/* values to pass as "ctxt" rather than a "ctxt" pointer */ -+#define DBG_DEVICE ((void *) 0) -+#define DBG_KCOMM ((void *) 1) -+#define DBG_ICS ((void *) 2) -+#define DBG_USER ((void *) 3) -+#define DBG_NTYPES 64 -+ -+#if defined(DEBUG_PRINTF) -+# define DBG(m,fn) ((elan3_debug&(m)) ? (void)(fn) : (void)0) -+# define PRINTF0(ctxt,m,fmt) ((elan3_debug&(m)) ? elan3_debugf(ctxt,m,fmt) : (void)0) -+# define PRINTF1(ctxt,m,fmt,a) ((elan3_debug&(m)) ? elan3_debugf(ctxt,m,fmt,a) : (void)0) -+# define PRINTF2(ctxt,m,fmt,a,b) ((elan3_debug&(m)) ? elan3_debugf(ctxt,m,fmt,a,b) : (void)0) -+# define PRINTF3(ctxt,m,fmt,a,b,c) ((elan3_debug&(m)) ? elan3_debugf(ctxt,m,fmt,a,b,c) : (void)0) -+# define PRINTF4(ctxt,m,fmt,a,b,c,d) ((elan3_debug&(m)) ? elan3_debugf(ctxt,m,fmt,a,b,c,d) : (void)0) -+# define PRINTF5(ctxt,m,fmt,a,b,c,d,e) ((elan3_debug&(m)) ? elan3_debugf(ctxt,m,fmt,a,b,c,d,e) : (void)0) -+# define PRINTF6(ctxt,m,fmt,a,b,c,d,e,f) ((elan3_debug&(m)) ? elan3_debugf(ctxt,m,fmt,a,b,c,d,e,f) : (void)0) -+#ifdef __GNUC__ -+# define PRINTF(ctxt,m,args...) ((elan3_debug&(m)) ? elan3_debugf(ctxt,m, ##args) : (void)0) -+#endif -+ -+#else -+ -+# define DBG(m, fn) do { ; } while (0) -+# define PRINTF0(ctxt,m,fmt) do { ; } while (0) -+# define PRINTF1(ctxt,m,fmt,a) do { ; } while (0) -+# define PRINTF2(ctxt,m,fmt,a,b) do { ; } while (0) -+# define PRINTF3(ctxt,m,fmt,a,b,c) do { ; } while (0) -+# define PRINTF4(ctxt,m,fmt,a,b,c,d) do { ; } while (0) -+# define PRINTF5(ctxt,m,fmt,a,b,c,d,e) do { ; } while (0) -+# define PRINTF6(ctxt,m,fmt,a,b,c,d,e,f) do { ; } while (0) -+#ifdef __GNUC__ -+# define PRINTF(ctxt,m,args...) do { ; } while (0) -+#endif -+ -+#endif /* DEBUG_PRINTF */ -+ -+#ifdef __GNUC__ -+extern void elan3_debugf (void *ctxt, unsigned int mode, char *fmt, ...) -+ __attribute__ ((format (printf,3,4))); -+#else -+extern void elan3_debugf (void *ctxt, unsigned int mode, char *fmt, ...); -+#endif -+ -+ -+#endif /* __KERNEL__ */ -+#endif /* _ELAN3_ELANDEBUG_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/include/elan3/elandev.h linux-2.6.9/include/elan3/elandev.h ---- clean/include/elan3/elandev.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan3/elandev.h 2005-07-20 07:35:20.000000000 -0400 -@@ -0,0 +1,581 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __ELAN3_ELANDEV_H -+#define __ELAN3_ELANDEV_H -+ -+#ident "$Id: elandev.h,v 1.76.2.1 2005/07/20 11:35:20 mike Exp $" -+/* $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/elandev.h,v $ */ -+ -+#include -+#include -+#include -+ -+#if defined(DIGITAL_UNIX) -+# include -+#elif defined(LINUX) -+# include -+#elif defined(SOLARIS) -+# include -+#endif -+ -+#ifndef TRUE -+# define TRUE 1 -+#endif -+#ifndef FALSE -+# define FALSE 0 -+#endif -+ -+/* -+ * Elan base address registers defined as follows : -+ */ -+#define ELAN3_BAR_SDRAM 0 -+#define ELAN3_BAR_COMMAND_PORT 1 -+#define ELAN3_BAR_REGISTERS 2 -+#define ELAN3_BAR_EBUS 3 -+ -+/* Macro to generate 'offset' to mmap "mem" device */ -+#define OFF_TO_SPACE(off) ((off) >> 28) -+#define OFF_TO_OFFSET(off) ((off) & 0x0FFFFFFF) -+#define GEN_OFF(space,off) (((space) << 28) | ((off) & 0x0FFFFFFF)) -+ -+#ifdef __KERNEL__ -+ -+/* -+ * Elan EBUS is configured as follows : -+ */ -+#define ELAN3_EBUS_ROM_OFFSET 0x000000 /* rom */ -+#define ELAN3_EBUS_INTPAL_OFFSET 0x180000 /* interrupt pal (write only) */ -+ -+#define ELAN3_EBUS_ROM_SIZE 0x100000 -+ -+/* -+ * Elan SDRAM is arranged as follows : -+ */ -+#define ELAN3_TANDQ_SIZE 0x0020000 /* Trap And Queue Size */ -+#define ELAN3_CONTEXT_SIZE 0x0010000 /* Context Table Size */ -+#define ELAN3_COMMAND_TRAP_SIZE 0x0010000 /* Command Port Trap Size */ -+ -+#ifdef MPSAS -+#define ELAN3_LN2_NUM_CONTEXTS 8 /* Support 256 contexts */ -+#else -+#define ELAN3_LN2_NUM_CONTEXTS 12 /* Support 4096 contexts */ -+#endif -+#define ELAN3_NUM_CONTEXTS (1 << ELAN3_LN2_NUM_CONTEXTS) /* Entries in context table */ -+ -+#define ELAN3_SDRAM_NUM_BANKS 4 /* Elan supports 4 Banks of Sdram */ -+#define ELAN3_SDRAM_BANK_SHIFT 26 /* each of which can be 64 mbytes ? */ -+#define ELAN3_SDRAM_BANK_SIZE (1 << ELAN3_SDRAM_BANK_SHIFT) -+ -+#define ELAN3_MAX_CACHE_SIZE (64 * 1024) /* Maximum cache size */ -+#define ELAN3_CACHE_SIZE (64 * 4 * E3_CACHELINE_SIZE) /* Elan3 has 8K cache */ -+ -+#ifndef offsetof -+#define offsetof(s, m) (size_t)(&(((s *)0)->m)) -+#endif -+ -+/* -+ * circular queue and macros to access members. -+ */ -+typedef struct -+{ -+ u_int q_back; /* Next free space */ -+ u_int q_front; /* First object to remove */ -+ u_int q_size; /* Size of queue */ -+ u_int q_count; /* Current number of entries */ -+ u_int q_slop; /* FULL <=> (count+slop) == size */ -+} ELAN3_QUEUE; -+ -+typedef struct -+{ -+ u_int q_back; /* Next free space */ -+ u_int q_middle; /* Middle pointer */ -+ u_int q_front; /* First object to remove */ -+ u_int q_size; /* Size of queue */ -+ u_int q_count; /* Current number of entries */ -+ u_int q_slop; /* FULL <=> (count+slop) == size */ -+} ELAN3_SPLIT_QUEUE; -+ -+#define ELAN3_QUEUE_INIT(q,num,slop) ((q).q_size = (num), (q).q_slop = (slop)+1, (q).q_front = (q).q_back = 0, (q).q_count = 0) -+#define ELAN3_QUEUE_FULL(q) ((q).q_count == ((q).q_size - (q).q_slop)) -+#define ELAN3_QUEUE_REALLY_FULL(q) ((q).q_count == (q).q_size - 1) -+#define ELAN3_QUEUE_EMPTY(q) ((q).q_count == 0) -+#define ELAN3_QUEUE_FRONT_EMPTY(q) ((q).q_front == (q).q_middle) -+#define ELAN3_QUEUE_BACK_EMPTY(q) ((q).q_middle == (q).q_back) -+#define ELAN3_QUEUE_ADD(q) ((q).q_back = ((q).q_back+1) % (q).q_size, (q).q_count++) -+#define ELAN3_QUEUE_REMOVE(q) ((q).q_front = ((q).q_front+1) % (q).q_size, (q).q_count--) -+#define ELAN3_QUEUE_ADD_FRONT(q) ((q).q_front = ((q).q_front-1) % (q).q_size, (q).q_count++) -+#define ELAN3_QUEUE_CONSUME(q) ((q).q_middle = ((q).q_middle+1) % (q).q_size) -+#define ELAN3_QUEUE_FRONT(q,qArea) (&(qArea)[(q).q_front]) -+#define ELAN3_QUEUE_MIDDLE(q,qArea) (&(qArea)[(q).q_middle]) -+#define ELAN3_QUEUE_BACK(q,qArea) (&(qArea)[(q).q_back]) -+ -+#define SDRAM_MIN_BLOCK_SHIFT 10 -+#define SDRAM_NUM_FREE_LISTS 17 /* allows max 64Mb block */ -+#define SDRAM_MIN_BLOCK_SIZE (1 << SDRAM_MIN_BLOCK_SHIFT) -+#define SDRAM_MAX_BLOCK_SIZE (SDRAM_MIN_BLOCK_SIZE << (SDRAM_NUM_FREE_LISTS-1)) -+#define SDRAM_FREELIST_TRIGGER 32 -+ -+typedef struct elan3_sdram_bank -+{ -+ u_int Size; /* Size of bank of memory */ -+ -+ ioaddr_t Mapping; /* Where mapped in the kernel */ -+ DeviceMappingHandle Handle; /* and mapping handle */ -+ -+ struct elan3_ptbl_gr **PtblGroups; -+ -+ bitmap_t *Bitmaps[SDRAM_NUM_FREE_LISTS]; -+} ELAN3_SDRAM_BANK; -+ -+typedef struct elan3_haltop -+{ -+ struct elan3_haltop *Next; /* Chain to next in list. */ -+ E3_uint32 Mask; /* Interrupt mask to see before calling function */ -+ -+ void (*Function)(void *, void *); /* Function to call */ -+ void *Arguement; /* Arguement to pass to function */ -+} ELAN3_HALTOP; -+ -+#define HALTOP_BATCH 32 -+ -+#endif /* __KERNEL__ */ -+ -+typedef struct elan3_stats -+{ -+ u_long Version; /* version field */ -+ u_long Interrupts; /* count of elan interrupts */ -+ u_long TlbFlushes; /* count of tlb flushes */ -+ u_long InvalidContext; /* count of traps with invalid context */ -+ u_long ComQueueHalfFull; /* count of interrupts due to com queue being half full */ -+ -+ u_long CProcTraps; /* count of cproc traps */ -+ u_long DProcTraps; /* count of dproc traps */ -+ u_long TProcTraps; /* cound of tproc traps */ -+ u_long IProcTraps; /* count of iproc traps */ -+ u_long EventInterrupts; /* count of event interrupts */ -+ -+ u_long PageFaults; /* count of elan page faults */ -+ -+ /* inputter related */ -+ u_long EopBadAcks; /* count of EOP_BAD_ACKs */ -+ u_long EopResets; /* count of EOP_ERROR_RESET */ -+ u_long InputterBadLength; /* count of BadLength */ -+ u_long InputterCRCDiscards; /* count of CRC_STATUS_DISCARD */ -+ u_long InputterCRCErrors; /* count of CRC_STATUS_ERROR */ -+ u_long InputterCRCBad; /* count of CRC_STATUS_BAD */ -+ u_long DmaNetworkErrors; /* count of errors in dma data */ -+ u_long DmaIdentifyNetworkErrors; /* count of errors after dma identify */ -+ u_long ThreadIdentifyNetworkErrors; /* count of errors after thread identify */ -+ -+ /* dma related */ -+ u_long DmaRetries; /* count of dma retries (due to retry fail count) */ -+ u_long DmaOutputTimeouts; /* count of dma output timeouts */ -+ u_long DmaPacketAckErrors; /* count of dma packet ack errors */ -+ -+ /* thread related */ -+ u_long ForcedTProcTraps; /* count of forced tproc traps */ -+ u_long TrapForTooManyInsts; /* count of too many instruction traps */ -+ u_long ThreadOutputTimeouts; /* count of thread output timeouts */ -+ u_long ThreadPacketAckErrors; /* count of thread packet ack errors */ -+ -+ /* link related */ -+ u_long LockError; /* count of RegPtr->Exts.LinkErrorTypes:LS_LockError */ -+ u_long DeskewError; /* count of RegPtr->Exts.LinkErrorTypes:LS_DeskewError */ -+ u_long PhaseError; /* count of RegPtr->Exts.LinkErrorTypes:LS_PhaseError */ -+ u_long DataError; /* count of RegPtr->Exts.LinkErrorTypes:LS_DataError */ -+ u_long FifoOvFlow0; /* count of RegPtr->Exts.LinkErrorTypes:LS_FifoOvFlow0 */ -+ u_long FifoOvFlow1; /* count of RegPtr->Exts.LinkErrorTypes:LS_FifoOvFlow1 */ -+ u_long LinkErrorValue; /* link error value on data error */ -+ -+ /* memory related */ -+ u_long CorrectableErrors; /* count of correctable ecc errors */ -+ u_long UncorrectableErrors; /* count of uncorrectable ecc errors */ -+ u_long MultipleErrors; /* count of multiple ecc errors */ -+ u_long SdramBytesFree; /* count of sdram bytes free */ -+ -+ /* Interrupt related */ -+ u_long LongestInterrupt; /* length of longest interrupt in ticks */ -+ -+ u_long EventPunts; /* count of punts of event interrupts to thread */ -+ u_long EventRescheds; /* count of reschedules of event interrupt thread */ -+} ELAN3_STATS; -+ -+#define ELAN3_STATS_VERSION (ulong)2 -+#define ELAN3_NUM_STATS (sizeof (ELAN3_STATS)/sizeof (u_long)) -+ -+#define ELAN3_STATS_DEV_FMT "elan3_stats_dev_%d" -+ -+#ifdef __KERNEL__ -+ -+#define BumpStat(dev,stat) ((dev)->Stats.stat++) -+ -+typedef struct elan3_level_ptbl_block -+{ -+ spinlock_t PtblLock; /* Page table freelist lock */ -+ int PtblTotal; /* Count of level N page tables allocated */ -+ int PtblFreeCount; /* Count of free level N page tables */ -+ struct elan3_ptbl *PtblFreeList; /* Free level N page tables */ -+ struct elan3_ptbl_gr *PtblGroupList; /* List of Groups of level N page tables */ -+} ELAN3_LEVEL_PTBL_BLOCK; -+ -+typedef struct elan3_dev -+{ -+ ELAN3_DEV_OSDEP Osdep; /* OS specific entries */ -+ int Instance; /* Device number */ -+ ELAN_DEVINFO Devinfo; -+ ELAN_POSITION Position; /* position in switch network (for user code) */ -+ ELAN_DEV_IDX DeviceIdx; /* device index registered with elanmod */ -+ -+ int ThreadsShouldStop; /* flag that kernel threads should stop */ -+ -+ spinlock_t IntrLock; -+ spinlock_t TlbLock; -+ spinlock_t CProcLock; -+ kcondvar_t IntrWait; /* place event interrupt thread sleeps */ -+ unsigned EventInterruptThreadStarted:1; /* event interrupt thread started */ -+ unsigned EventInterruptThreadStopped:1; /* event interrupt thread stopped */ -+ -+ DeviceMappingHandle RegHandle; /* DDI Handle */ -+ ioaddr_t RegPtr; /* Elan Registers */ -+ -+ volatile E3_uint32 InterruptMask; /* copy of RegPtr->InterruptMask */ -+ volatile E3_uint32 Event_Int_Queue_FPtr; /* copy of RegPtr->Event_Int_Queue_FPtr */ -+ volatile E3_uint32 SchCntReg; /* copy of RegPtr->SchCntReg */ -+ volatile E3_uint32 Cache_Control_Reg; /* true value for RegPtr->Cache_Control_Reg */ -+ -+ ELAN3_SDRAM_BANK SdramBanks[ELAN3_SDRAM_NUM_BANKS]; /* Elan sdram banks */ -+ spinlock_t SdramLock; /* Sdram allocator */ -+ sdramaddr_t SdramFreeLists[SDRAM_NUM_FREE_LISTS]; -+ unsigned SdramFreeCounts[SDRAM_NUM_FREE_LISTS]; -+ -+ sdramaddr_t TAndQBase; /* Trap and Queue area */ -+ sdramaddr_t ContextTable; /* Elan Context Table */ -+ u_int ContextTableSize; /* # entries in context table */ -+ -+ struct elan3_ctxt **CtxtTable; /* array of ctxt pointers or nulls */ -+ -+ sdramaddr_t CommandPortTraps[2]; /* Command port trap overflow */ -+ int CurrentCommandPortTrap; /* Which overflow queue we're using */ -+ -+ u_int HaltAllCount; /* Count of reasons to halt context 0 queues */ -+ u_int HaltNonContext0Count; /* Count of reasons to halt non-context 0 queues */ -+ u_int HaltDmaDequeueCount; /* Count of reasons to halt dma from dequeuing */ -+ u_int HaltThreadCount; /* Count of reasons to halt the thread processor */ -+ u_int FlushCommandCount; /* Count of reasons to flush command queues */ -+ u_int DiscardAllCount; /* Count of reasons to discard context 0 */ -+ u_int DiscardNonContext0Count; /* Count of reasons to discard non context 0 */ -+ -+ struct thread_trap *ThreadTrap; /* Thread Processor trap space */ -+ struct dma_trap *DmaTrap; /* DMA Processor trap space */ -+ -+ spinlock_t FreeHaltLock; /* Lock for haltop free list */ -+ ELAN3_HALTOP *FreeHaltOperations; /* Free list of haltops */ -+ u_int NumHaltOperations; /* Number of haltops allocated */ -+ u_int ReservedHaltOperations; /* Number of haltops reserved */ -+ -+ ELAN3_HALTOP *HaltOperations; /* List of operations to call */ -+ ELAN3_HALTOP **HaltOperationsTailpp; /* Pointer to last "next" pointer in list */ -+ E3_uint32 HaltOperationsMask; /* Or of all bits in list of operations */ -+ -+ physaddr_t SdramPhysBase; /* Physical address of SDRAM */ -+ physaddr_t SdramPhysMask; /* and mask of significant bits */ -+ -+ physaddr_t PciPhysBase; /* physical address of local PCI segment */ -+ physaddr_t PciPhysMask; /* and mask of significant bits */ -+ -+ long ErrorTime; /* lbolt at last error (link,ecc etc) */ -+ long ErrorsPerTick; /* count of errors for this tick */ -+ timer_fn_t ErrorTimeoutId; /* id of timeout when errors masked out */ -+ timer_fn_t DmaPollTimeoutId; /* id of timeout to poll for "bad" dmas */ -+ int FilterHaltQueued; -+ -+ /* -+ * HAT layer specific entries. -+ */ -+ ELAN3_LEVEL_PTBL_BLOCK Level[4]; -+ spinlock_t PtblGroupLock; /* Lock for Page Table group lists */ -+ struct elan3_ptbl_gr *Level3PtblGroupHand; /* Hand for ptbl stealing */ -+ -+ /* -+ * Per-Context Information structures. -+ */ -+ struct elan3_info *Infos; /* List of "infos" for this device */ -+ -+ char LinkShutdown; /* link forced into reset by panic/shutdown/dump */ -+ -+ /* -+ * Device statistics. -+ */ -+ ELAN3_STATS Stats; -+ ELAN_STATS_IDX StatsIndex; -+ -+ struct { -+ E3_Regs *RegPtr; -+ char *Sdram[ELAN3_SDRAM_NUM_BANKS]; -+ } PanicState; -+} ELAN3_DEV; -+ -+#define ELAN3_DEV_CTX_TABLE(dev,ctxtn) ( (dev)->CtxtTable[ (ctxtn) & MAX_ROOT_CONTEXT_MASK] ) -+ -+/* macros for accessing dev->RegPtr.Tags/Sets. */ -+#define write_cache_tag(dev,what,val) writeq (val, (void *) (dev->RegPtr + offsetof (E3_Regs, Tags.what))) -+#define read_cache_tag(dev,what) readq ((void *) (dev->RegPtr + offsetof (E3_Regs, Tags.what))) -+#define write_cache_set(dev,what,val) writeq (val, (void *) (dev->RegPtr + offsetof (E3_Regs, Sets.what))) -+#define read_cache_set(dev,what) readq ((void *) (dev->RegPtr + offsetof (E3_Regs, Sets.what))) -+ -+/* macros for accessing dev->RegPtr.Regs. */ -+#define write_reg64(dev,what,val) writeq (val, (void *) (dev->RegPtr + offsetof (E3_Regs, Regs.what))) -+#define write_reg32(dev,what,val) writel (val, (void *) (dev->RegPtr + offsetof (E3_Regs, Regs.what))) -+#define read_reg64(dev,what) readq ((void *) (dev->RegPtr + offsetof (E3_Regs, Regs.what))) -+#define read_reg32(dev,what) readl ((void *) (dev->RegPtr + offsetof (E3_Regs, Regs.what))) -+ -+/* macros for accessing dev->RegPtr.uRegs. */ -+#define write_ureg64(dev,what,val) writeq (val, (void *) (dev->RegPtr + offsetof (E3_Regs, URegs.what))) -+#define write_ureg32(dev,what,val) writel (val, (void *) (dev->RegPtr + offsetof (E3_Regs, URegs.what))) -+#define read_ureg64(dev,what) readq ((void *) (dev->RegPtr + offsetof (E3_Regs, URegs.what))) -+#define read_ureg32(dev,what) readl ((void *) (dev->RegPtr + offsetof (E3_Regs, URegs.what))) -+ -+/* macros for accessing dma descriptor/thread regs */ -+#define copy_dma_regs(dev, desc) \ -+MACRO_BEGIN \ -+ register int i; \ -+ for (i = 0; i < sizeof (E3_DMA)/sizeof(E3_uint64); i++) \ -+ ((E3_uint64 *) desc)[i] = readq ((void *)(dev->RegPtr + offsetof (E3_Regs, Regs.Dma_Desc) + i*sizeof (E3_uint64))); \ -+MACRO_END -+ -+#define copy_thread_regs(dev, regs) \ -+MACRO_BEGIN \ -+ register int i; \ -+ for (i = 0; i < (32*sizeof (E3_uint32))/sizeof(E3_uint64); i++) \ -+ ((E3_uint64 *) regs)[i] = readq ((void *)(dev->RegPtr + offsetof (E3_Regs, Regs.Globals[0]) + i*sizeof (E3_uint64))); \ -+MACRO_END -+ -+_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::IntrLock, -+ _E3_DataBusMap::Exts _E3_DataBusMap::Input_Context_Fil_Flush -+ elan3_dev::CurrentCommandPortTrap elan3_dev::HaltAllCount elan3_dev::HaltDmaDequeueCount -+ elan3_dev::FlushCommandCount elan3_dev::DiscardAllCount elan3_dev::DiscardNonContext0Count -+ elan3_dev::HaltOperations elan3_dev::HaltOperationsMask)) -+_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::TlbLock, -+ _E3_DataBusMap::Cache_Control_Reg)) -+_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::InfoLock, -+ elan3_dev::Infos elan3_dev::InfoTable)) -+_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::FreeHaltLock, -+ elan3_dev::FreeHaltOperations elan3_dev::NumHaltOperations elan3_dev::ReservedHaltOperations)) -+_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::PageFreeListLock, -+ elan3_dev::PageFreeList elan3_dev::PageFreeListSize)) -+_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::Level1PtblLock, -+ elan3_dev::Level1PtblTotal elan3_dev::Level1PtblFreeCount elan3_dev::Level1PtblFreeList)) -+_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::Level2PtblLock, -+ elan3_dev::Level2PtblTotal elan3_dev::Level2PtblFreeCount elan3_dev::Level2PtblFreeList)) -+_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::Level3PtblLock, -+ elan3_dev::Level3PtblTotal elan3_dev::Level3PtblFreeCount elan3_dev::Level3PtblFreeList)) -+_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::PtblGroupLock, -+ elan3_dev::Level1PtblGroupList elan3_dev::Level2PtblGroupList elan3_dev::Level3PtblGroupList)) -+ -+_NOTE(DATA_READABLE_WITHOUT_LOCK(elan3_dev::InfoTable elan3_dev::Level1PtblFreeList -+ elan3_dev::Level2PtblFreeList elan3_dev::Level3PtblFreeList)) -+ -+_NOTE(LOCK_ORDER(elan3_dev::InfoLock elan3_dev::IntrLock)) -+_NOTE(LOCK_ORDER(as::a_lock elan3_dev::InfoLock)) -+_NOTE(LOCK_ORDER(as::a_lock elan3_dev::IntrLock)) -+ -+#define SET_INT_MASK(dev,Mask) MACRO_BEGIN write_reg32 (dev, Exts.InterruptMask, ((dev)->InterruptMask = (Mask))); mmiob(); MACRO_END -+#define ENABLE_INT_MASK(dev, bits) MACRO_BEGIN write_reg32 (dev, Exts.InterruptMask, ((dev->InterruptMask |= (bits)))); mmiob(); MACRO_END -+#define DISABLE_INT_MASK(dev, bits) MACRO_BEGIN write_reg32 (dev, Exts.InterruptMask, ((dev->InterruptMask &= ~(bits)))); mmiob(); MACRO_END -+ -+#define INIT_SCHED_STATUS(dev, val) \ -+MACRO_BEGIN \ -+ (dev)->SchCntReg = (val); \ -+ write_reg32 (dev, Exts.SchCntReg, (dev)->SchCntReg); \ -+ mmiob(); \ -+MACRO_END -+ -+#define SET_SCHED_STATUS(dev, val) \ -+MACRO_BEGIN \ -+ ASSERT (((val) & HaltStopAndExtTestMask) == (val)); \ -+ (dev)->SchCntReg |= (val); \ -+ write_reg32 (dev, Exts.SchCntReg, (dev)->SchCntReg); \ -+ mmiob (); \ -+MACRO_END -+ -+#define CLEAR_SCHED_STATUS(dev, val) \ -+MACRO_BEGIN \ -+ ASSERT (((val) & HaltStopAndExtTestMask) == (val)); \ -+ (dev)->SchCntReg &= ~(val); \ -+ write_reg32 (dev, Exts.SchCntReg, (dev)->SchCntReg); \ -+ mmiob(); \ -+MACRO_END -+ -+#define MODIFY_SCHED_STATUS(dev, SetBits, ClearBits) \ -+MACRO_BEGIN \ -+ ASSERT ((((SetBits)|(ClearBits)) & HaltStopAndExtTestMask) == ((SetBits)|(ClearBits))); \ -+ (dev)->SchCntReg = (((dev)->SchCntReg | (SetBits)) & ~(ClearBits)); \ -+ write_reg32 (dev, Exts.SchCntReg, (dev)->SchCntReg); \ -+ mmiob(); \ -+MACRO_END -+ -+#define PULSE_SCHED_STATUS(dev, RestartBits) \ -+MACRO_BEGIN \ -+ ASSERT (((RestartBits) & HaltStopAndExtTestMask) == 0); \ -+ write_reg32 (dev, Exts.SchCntReg, (dev)->SchCntReg | (RestartBits)); \ -+ mmiob(); \ -+MACRO_END -+ -+#define SET_SCHED_LINK_VALUE(dev, enabled, val) \ -+MACRO_BEGIN \ -+ (dev)->SchCntReg = (((dev)->SchCntReg & HaltAndStopMask) | ((enabled) ? LinkBoundaryScan : 0) | LinkSetValue(val, 0)); \ -+ write_reg32 (dev, Exts.SchCntReg, (dev)->SchCntReg); \ -+ mmiob(); \ -+MACRO_END -+ -+#ifdef DEBUG_ASSERT -+# define ELAN3_ASSERT(dev, EX) ((void)((EX) || elan3_assfail(dev, #EX, __FILE__, __LINE__))) -+#else -+# define ELAN3_ASSERT(dev, EX) -+#endif -+ -+/* elandev_generic.c */ -+extern int InitialiseElan (ELAN3_DEV *dev, ioaddr_t CmdPort); -+extern void FinaliseElan (ELAN3_DEV *dev); -+extern int InterruptHandler (ELAN3_DEV *dev); -+extern void PollForDmaHungup (void *arg); -+ -+extern int SetLinkBoundaryScan (ELAN3_DEV *dev); -+extern void ClearLinkBoundaryScan (ELAN3_DEV *dev); -+extern int WriteBoundaryScanValue (ELAN3_DEV *dev, int value); -+extern int ReadBoundaryScanValue(ELAN3_DEV *dev, int link); -+ -+extern int ReadVitalProductData (ELAN3_DEV *dev, int *CasLatency); -+ -+extern struct elan3_ptbl_gr *ElanGetPtblGr (ELAN3_DEV *dev, sdramaddr_t offset); -+extern void ElanSetPtblGr (ELAN3_DEV *dev, sdramaddr_t offset, struct elan3_ptbl_gr *ptg); -+ -+extern void ElanFlushTlb (ELAN3_DEV *dev); -+ -+extern void SetSchedStatusRegister (ELAN3_DEV *dev, E3_uint32 Pend, volatile E3_uint32 *Maskp); -+extern void FreeHaltOperation (ELAN3_DEV *dev, ELAN3_HALTOP *op); -+extern int ReserveHaltOperations (ELAN3_DEV *dev, int count, int cansleep); -+extern void ReleaseHaltOperations (ELAN3_DEV *dev, int count); -+extern void ProcessHaltOperations (ELAN3_DEV *dev, E3_uint32 Pend); -+extern void QueueHaltOperation (ELAN3_DEV *dev, E3_uint32 Pend, volatile E3_uint32 *Maskp, -+ E3_uint32 ReqMask, void (*Function)(ELAN3_DEV *, void *), void *Arguement); -+ -+extern int ComputePosition (ELAN_POSITION *pos, unsigned NodeId, unsigned NumNodes, unsigned numDownLinksVal); -+ -+extern caddr_t MiToName (int mi); -+extern void ElanBusError (ELAN3_DEV *dev); -+ -+extern void TriggerLsa (ELAN3_DEV *dev); -+ -+extern ELAN3_DEV *elan3_device (int instance); -+extern int DeviceRegisterSize (ELAN3_DEV *dev, int rnumber, int *sizep); -+extern int MapDeviceRegister (ELAN3_DEV *dev, int rnumber, ioaddr_t *addrp, int offset, -+ int len, DeviceMappingHandle *handlep); -+extern void UnmapDeviceRegister (ELAN3_DEV *dev, DeviceMappingHandle *handlep); -+ -+ -+/* sdram.c */ -+/* sdram accessing functions - define 4 different types for 8,16,32,64 bit accesses */ -+extern unsigned char elan3_sdram_readb (ELAN3_DEV *dev, sdramaddr_t ptr); -+extern unsigned short elan3_sdram_readw (ELAN3_DEV *dev, sdramaddr_t ptr); -+extern unsigned int elan3_sdram_readl (ELAN3_DEV *dev, sdramaddr_t ptr); -+extern unsigned long long elan3_sdram_readq (ELAN3_DEV *dev, sdramaddr_t ptr); -+extern void elan3_sdram_writeb (ELAN3_DEV *dev, sdramaddr_t ptr, unsigned char val); -+extern void elan3_sdram_writew (ELAN3_DEV *dev, sdramaddr_t ptr, unsigned short val); -+extern void elan3_sdram_writel (ELAN3_DEV *dev, sdramaddr_t ptr, unsigned int val); -+extern void elan3_sdram_writeq (ELAN3_DEV *dev, sdramaddr_t ptr, unsigned long long val); -+ -+extern void elan3_sdram_zerob_sdram (ELAN3_DEV *dev, sdramaddr_t ptr, int nbytes); -+extern void elan3_sdram_zerow_sdram (ELAN3_DEV *dev, sdramaddr_t ptr, int nbytes); -+extern void elan3_sdram_zerol_sdram (ELAN3_DEV *dev, sdramaddr_t ptr, int nbytes); -+extern void elan3_sdram_zeroq_sdram (ELAN3_DEV *dev, sdramaddr_t ptr, int nbytes); -+ -+extern void elan3_sdram_copyb_from_sdram (ELAN3_DEV *dev, sdramaddr_t from, void *to, int nbytes); -+extern void elan3_sdram_copyw_from_sdram (ELAN3_DEV *dev, sdramaddr_t from, void *to, int nbytes); -+extern void elan3_sdram_copyl_from_sdram (ELAN3_DEV *dev, sdramaddr_t from, void *to, int nbytes); -+extern void elan3_sdram_copyq_from_sdram (ELAN3_DEV *dev, sdramaddr_t from, void *to, int nbytes); -+extern void elan3_sdram_copyb_to_sdram (ELAN3_DEV *dev, void *from, sdramaddr_t to, int nbytes); -+extern void elan3_sdram_copyw_to_sdram (ELAN3_DEV *dev, void *from, sdramaddr_t to, int nbytes); -+extern void elan3_sdram_copyl_to_sdram (ELAN3_DEV *dev, void *from, sdramaddr_t to, int nbytes); -+extern void elan3_sdram_copyq_to_sdram (ELAN3_DEV *dev, void *from, sdramaddr_t to, int nbytes); -+ -+extern void elan3_sdram_init (ELAN3_DEV *dev); -+extern void elan3_sdram_fini (ELAN3_DEV *dev); -+extern void elan3_sdram_add (ELAN3_DEV *dev, sdramaddr_t base, sdramaddr_t top); -+extern sdramaddr_t elan3_sdram_alloc (ELAN3_DEV *dev, int nbytes); -+extern void elan3_sdram_free (ELAN3_DEV *dev, sdramaddr_t ptr, int nbytes); -+extern physaddr_t elan3_sdram_to_phys (ELAN3_DEV *dev, sdramaddr_t addr); -+ -+/* cproc.c */ -+extern void HandleCProcTrap (ELAN3_DEV *dev, E3_uint32 Pend, E3_uint32 *Mask); -+ -+/* iproc.c */ -+extern void HandleIProcTrap (ELAN3_DEV *dev, int Channel, E3_uint32 Pend, sdramaddr_t FaultSaveOff, -+ sdramaddr_t TransactionsOff, sdramaddr_t DataOff); -+ -+/* tproc.c */ -+extern int HandleTProcTrap (ELAN3_DEV *dev, E3_uint32 *RestartBits); -+extern void DeliverTProcTrap (ELAN3_DEV *dev, struct thread_trap *threadTrap, E3_uint32 Pend); -+ -+/* dproc.c */ -+extern int HandleDProcTrap (ELAN3_DEV *dev, E3_uint32 *RestartBits); -+extern void DeliverDProcTrap (ELAN3_DEV *dev, struct dma_trap *dmaTrap, E3_uint32 Pend); -+ -+#if defined(LINUX) -+/* procfs_linux.h */ -+extern struct proc_dir_entry *elan3_procfs_root; -+extern struct proc_dir_entry *elan3_config_root; -+ -+extern void elan3_procfs_init(void); -+extern void elan3_procfs_fini(void); -+extern void elan3_procfs_device_init (ELAN3_DEV *dev); -+extern void elan3_procfs_device_fini (ELAN3_DEV *dev); -+#endif /* defined(LINUX) */ -+ -+/* elan3_osdep.c */ -+extern int BackToBackMaster; -+extern int BackToBackSlave; -+ -+#define ELAN_REG_REC_MAX (100) -+#define ELAN_REG_REC(REG) { \ -+elan_reg_rec_file [elan_reg_rec_index] = __FILE__; \ -+elan_reg_rec_line [elan_reg_rec_index] = __LINE__; \ -+elan_reg_rec_reg [elan_reg_rec_index] = REG; \ -+elan_reg_rec_cpu [elan_reg_rec_index] = smp_processor_id(); \ -+elan_reg_rec_lbolt[elan_reg_rec_index] = lbolt; \ -+elan_reg_rec_index = ((elan_reg_rec_index+1) % ELAN_REG_REC_MAX);} -+ -+extern char * elan_reg_rec_file [ELAN_REG_REC_MAX]; -+extern int elan_reg_rec_line [ELAN_REG_REC_MAX]; -+extern long elan_reg_rec_lbolt[ELAN_REG_REC_MAX]; -+extern int elan_reg_rec_cpu [ELAN_REG_REC_MAX]; -+extern E3_uint32 elan_reg_rec_reg [ELAN_REG_REC_MAX]; -+extern int elan_reg_rec_index; -+ -+#endif /* __KERNEL__ */ -+ -+ -+#define ELAN3_PROCFS_ROOT "/proc/qsnet/elan3" -+#define ELAN3_PROCFS_VERSION "/proc/qsnet/elan3/version" -+#define ELAN3_PROCFS_DEBUG "/proc/qsnet/elan3/config/elandebug" -+#define ELAN3_PROCFS_DEBUG_CONSOLE "/proc/qsnet/elan3/config/elandebug_console" -+#define ELAN3_PROCFS_DEBUG_BUFFER "/proc/qsnet/elan3/config/elandebug_buffer" -+#define ELAN3_PROCFS_MMU_DEBUG "/proc/qsnet/elan3/config/elan3mmu_debug" -+#define ELAN3_PROCFS_PUNT_LOOPS "/proc/qsnet/elan3/config/eventint_punt_loops" -+ -+#define ELAN3_PROCFS_DEVICE_STATS_FMT "/proc/qsnet/elan3/device%d/stats" -+#define ELAN3_PROCFS_DEVICE_POSITION_FMT "/proc/qsnet/elan3/device%d/position" -+#define ELAN3_PROCFS_DEVICE_NODESET_FMT "/proc/qsnet/elan3/device%d/nodeset" -+ -+#endif /* __ELAN3_ELANDEV_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/include/elan3/elandev_linux.h linux-2.6.9/include/elan3/elandev_linux.h ---- clean/include/elan3/elandev_linux.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan3/elandev_linux.h 2005-04-05 11:28:37.000000000 -0400 -@@ -0,0 +1,74 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __ELANDEV_LINUX_H -+#define __ELANDEV_LINUX_H -+ -+#ident "$Id: elandev_linux.h,v 1.14 2005/04/05 15:28:37 robin Exp $" -+/* $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/elandev_linux.h,v $*/ -+ -+#ifdef __KERNEL__ -+#include -+#include -+#include -+ -+#include -+ -+#if !defined(NO_COPROC) /* The older coproc kernel patch is applied */ -+#include -+ -+#define ioproc_ops coproc_ops_struct -+#define ioproc_register_ops register_coproc_ops -+#define ioproc_unregister_ops unregister_coproc_ops -+ -+#define IOPROC_MM_STRUCT_ARG 1 -+#define IOPROC_PATCH_APPLIED 1 -+ -+#elif !defined(NO_IOPROC) /* The new ioproc kernel patch is applied */ -+#include -+ -+#define IOPROC_PATCH_APPLIED 1 -+#endif -+#endif -+ -+#define ELAN3_MAJOR 60 -+#define ELAN3_NAME "elan3" -+#define ELAN3_MAX_CONTROLLER 16 /* limited to 4 bits */ -+ -+#define ELAN3_MINOR_DEVNUM(m) ((m) & 0x0f) /* card number */ -+#define ELAN3_MINOR_DEVFUN(m) (((m) >> 4) & 0x0f) /* function */ -+#define ELAN3_MINOR_CONTROL 0 /* function values */ -+#define ELAN3_MINOR_MEM 1 -+#define ELAN3_MINOR_USER 2 -+ -+typedef void *DeviceMappingHandle; -+ -+/* task and ctxt handle types */ -+typedef struct mm_struct *TaskHandle; -+typedef int CtxtHandle; -+ -+#define ELAN3_MY_TASK_HANDLE() (current->mm) -+#define KERNEL_TASK_HANDLE() (get_kern_mm()) -+ -+/* -+ * OS-dependent component of ELAN3_DEV struct. -+ */ -+typedef struct elan3_dev_osdep -+{ -+ struct pci_dev *pci; /* PCI config data */ -+ int ControlDeviceOpen; /* flag to indicate control */ -+ /* device open */ -+ struct proc_dir_entry *procdir; -+} ELAN3_DEV_OSDEP; -+ -+#endif /* __ELANDEV_LINUX_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/include/elan3/elanio.h linux-2.6.9/include/elan3/elanio.h ---- clean/include/elan3/elanio.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan3/elanio.h 2003-12-08 10:40:26.000000000 -0500 -@@ -0,0 +1,226 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __ELAN3_ELAN3IO_H -+#define __ELAN3_ELAN3IO_H -+ -+#ident "$Id: elanio.h,v 1.19 2003/12/08 15:40:26 mike Exp $" -+/* $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/elanio.h,v $*/ -+ -+#define ELAN3IO_CONTROL_PATHNAME "/dev/elan3/control%d" -+#define ELAN3IO_MEM_PATHNAME "/dev/elan3/mem%d" -+#define ELAN3IO_USER_PATHNAME "/dev/elan3/user%d" -+#define ELAN3IO_SDRAM_PATHNAME "/dev/elan3/sdram%d" -+#define ELAN3IO_MAX_PATHNAMELEN 32 -+ -+/* ioctls on /dev/elan3/control */ -+#define ELAN3IO_CONTROL_BASE 0 -+ -+#define ELAN3IO_SET_BOUNDARY_SCAN _IO ('e', ELAN3IO_CONTROL_BASE + 0) -+#define ELAN3IO_CLEAR_BOUNDARY_SCAN _IO ('e', ELAN3IO_CONTROL_BASE + 1) -+#define ELAN3IO_READ_LINKVAL _IOWR ('e', ELAN3IO_CONTROL_BASE + 2, E3_uint32) -+#define ELAN3IO_WRITE_LINKVAL _IOWR ('e', ELAN3IO_CONTROL_BASE + 3, E3_uint32) -+ -+typedef struct elanio_set_debug_struct -+{ -+ char what[32]; -+ u_long value; -+} ELAN3IO_SET_DEBUG_STRUCT; -+#define ELAN3IO_SET_DEBUG _IOW ('e', ELAN3IO_CONTROL_BASE + 4, ELAN3IO_SET_DEBUG_STRUCT) -+ -+typedef struct elanio_debug_buffer_struct -+{ -+ caddr_t addr; -+ size_t len; -+} ELAN3IO_DEBUG_BUFFER_STRUCT; -+#define ELAN3IO_DEBUG_BUFFER _IOWR ('e', ELAN3IO_CONTROL_BASE + 5, ELAN3IO_DEBUG_BUFFER_STRUCT) -+ -+typedef struct elanio_neterr_server_struct -+{ -+ u_int elanid; -+ void *addr; -+ char *name; -+} ELAN3IO_NETERR_SERVER_STRUCT; -+#define ELAN3IO_NETERR_SERVER _IOW ('e', ELAN3IO_CONTROL_BASE + 6, ELAN3IO_NETERR_SERVER_STRUCT) -+#define ELAN3IO_NETERR_FIXUP _IOWR ('e', ELAN3IO_CONTROL_BASE + 7, NETERR_MSG) -+ -+typedef struct elanio_set_position_struct -+{ -+ u_int device; -+ unsigned short nodeId; -+ unsigned short numNodes; -+} ELAN3IO_SET_POSITION_STRUCT; -+#define ELAN3IO_SET_POSITION _IOW ('e', ELAN3IO_CONTROL_BASE + 8, ELAN3IO_SET_POSITION_STRUCT) -+ -+#if defined(LINUX) -+ -+/* ioctls on /dev/elan3/sdram */ -+#define ELAN3IO_SDRAM_BASE 20 -+ -+/* ioctls on /dev/elan3/user */ -+#define ELAN3IO_USER_BASE 30 -+ -+#define ELAN3IO_FREE _IO ('e', ELAN3IO_USER_BASE + 0) -+ -+#define ELAN3IO_ATTACH _IOWR('e', ELAN3IO_USER_BASE + 1, ELAN_CAPABILITY) -+#define ELAN3IO_DETACH _IO ('e', ELAN3IO_USER_BASE + 2) -+ -+typedef struct elanio_addvp_struct -+{ -+ u_int process; -+ ELAN_CAPABILITY capability; -+} ELAN3IO_ADDVP_STRUCT; -+#define ELAN3IO_ADDVP _IOWR('e', ELAN3IO_USER_BASE + 3, ELAN3IO_ADDVP_STRUCT) -+#define ELAN3IO_REMOVEVP _IOW ('e', ELAN3IO_USER_BASE + 4, int) -+ -+typedef struct elanio_bcastvp_struct -+{ -+ u_int process; -+ u_int lowvp; -+ u_int highvp; -+} ELAN3IO_BCASTVP_STRUCT; -+#define ELAN3IO_BCASTVP _IOW ('e', ELAN3IO_USER_BASE + 5, ELAN3IO_BCASTVP_STRUCT) -+ -+typedef struct elanio_loadroute_struct -+{ -+ u_int process; -+ E3_uint16 flits[MAX_FLITS]; -+} ELAN3IO_LOAD_ROUTE_STRUCT; -+#define ELAN3IO_LOAD_ROUTE _IOW ('e', ELAN3IO_USER_BASE + 6, ELAN3IO_LOAD_ROUTE_STRUCT) -+ -+#define ELAN3IO_PROCESS _IO ('e', ELAN3IO_USER_BASE + 7) -+ -+typedef struct elanio_setperm_struct -+{ -+ caddr_t maddr; -+ E3_Addr eaddr; -+ size_t len; -+ int perm; -+} ELAN3IO_SETPERM_STRUCT; -+#define ELAN3IO_SETPERM _IOW ('e', ELAN3IO_USER_BASE + 8, ELAN3IO_SETPERM_STRUCT) -+ -+typedef struct elanio_clearperm_struct -+{ -+ E3_Addr eaddr; -+ size_t len; -+} ELAN3IO_CLEARPERM_STRUCT; -+#define ELAN3IO_CLEARPERM _IOW ('e', ELAN3IO_USER_BASE + 9, ELAN3IO_CLEARPERM_STRUCT) -+ -+typedef struct elanio_changeperm_struct -+{ -+ E3_Addr eaddr; -+ size_t len; -+ int perm; -+} ELAN3IO_CHANGEPERM_STRUCT; -+#define ELAN3IO_CHANGEPERM _IOW ('e', ELAN3IO_USER_BASE + 10, ELAN3IO_CHANGEPERM_STRUCT) -+ -+ -+#define ELAN3IO_HELPER_THREAD _IO ('e', ELAN3IO_USER_BASE + 11) -+#define ELAN3IO_WAITCOMMAND _IO ('e', ELAN3IO_USER_BASE + 12) -+#define ELAN3IO_BLOCK_INPUTTER _IOW ('e', ELAN3IO_USER_BASE + 13, int) -+#define ELAN3IO_SET_FLAGS _IOW ('e', ELAN3IO_USER_BASE + 14, int) -+ -+#define ELAN3IO_WAITEVENT _IOW ('e', ELAN3IO_USER_BASE + 15, E3_Event) -+#define ELAN3IO_ALLOC_EVENTCOOKIE _IOW ('e', ELAN3IO_USER_BASE + 16, EVENT_COOKIE) -+#define ELAN3IO_FREE_EVENTCOOKIE _IOW ('e', ELAN3IO_USER_BASE + 17, EVENT_COOKIE) -+#define ELAN3IO_ARM_EVENTCOOKIE _IOW ('e', ELAN3IO_USER_BASE + 18, EVENT_COOKIE) -+#define ELAN3IO_WAIT_EVENTCOOKIE _IOW ('e', ELAN3IO_USER_BASE + 19, EVENT_COOKIE) -+ -+#define ELAN3IO_SWAPSPACE _IOW ('e', ELAN3IO_USER_BASE + 20, SYS_SWAP_SPACE) -+#define ELAN3IO_EXCEPTION_SPACE _IOW ('e', ELAN3IO_USER_BASE + 21, SYS_EXCEPTION_SPACE) -+#define ELAN3IO_GET_EXCEPTION _IOR ('e', ELAN3IO_USER_BASE + 22, SYS_EXCEPTION) -+ -+typedef struct elanio_unload_struct -+{ -+ void *addr; -+ size_t len; -+} ELAN3IO_UNLOAD_STRUCT; -+#define ELAN3IO_UNLOAD _IOW ('e', ELAN3IO_USER_BASE + 23, ELAN3IO_UNLOAD_STRUCT) -+ -+ -+ -+typedef struct elanio_getroute_struct -+{ -+ u_int process; -+ E3_uint16 flits[MAX_FLITS]; -+} ELAN3IO_GET_ROUTE_STRUCT; -+#define ELAN3IO_GET_ROUTE _IOW ('e', ELAN3IO_USER_BASE + 24, ELAN3IO_GET_ROUTE_STRUCT) -+ -+typedef struct elanio_resetroute_struct -+{ -+ u_int process; -+} ELAN3IO_RESET_ROUTE_STRUCT; -+#define ELAN3IO_RESET_ROUTE _IOW ('e', ELAN3IO_USER_BASE + 25, ELAN3IO_RESET_ROUTE_STRUCT) -+ -+typedef struct elanio_checkroute_struct -+{ -+ u_int process; -+ E3_uint32 routeError; -+ E3_uint16 flits[MAX_FLITS]; -+} ELAN3IO_CHECK_ROUTE_STRUCT; -+#define ELAN3IO_CHECK_ROUTE _IOW ('e', ELAN3IO_USER_BASE + 26, ELAN3IO_CHECK_ROUTE_STRUCT) -+ -+typedef struct elanio_vp2nodeId_struct -+{ -+ u_int process; -+ unsigned short nodeId; -+ ELAN_CAPABILITY cap; -+} ELAN3IO_VP2NODEID_STRUCT; -+#define ELAN3IO_VP2NODEID _IOWR('e', ELAN3IO_USER_BASE + 27, ELAN3IO_VP2NODEID_STRUCT) -+ -+#define ELAN3IO_SET_SIGNAL _IOW ('e', ELAN3IO_USER_BASE + 28, int) -+ -+typedef struct elanio_process_2_location_struct -+{ -+ u_int process; -+ ELAN_LOCATION loc; -+} ELAN3IO_PROCESS_2_LOCATION_STRUCT; -+#define ELAN3IO_PROCESS_2_LOCATION _IOW ('e', ELAN3IO_USER_BASE + 29, ELAN3IO_PROCESS_2_LOCATION_STRUCT) -+ -+ -+ -+/* ioctls on all device */ -+#define ELAN3IO_GENERIC_BASE 100 -+typedef struct elanio_get_devinfo_struct -+{ -+ ELAN_DEVINFO *devinfo; -+} ELAN3IO_GET_DEVINFO_STRUCT; -+#define ELAN3IO_GET_DEVINFO _IOR ('e', ELAN3IO_GENERIC_BASE + 0, ELAN_DEVINFO) -+ -+typedef struct elanio_get_position_struct -+{ -+ ELAN_POSITION *position; -+} ELAN3IO_GET_POSITION_STRUCT; -+#define ELAN3IO_GET_POSITION _IOR ('e', ELAN3IO_GENERIC_BASE + 1, ELAN_POSITION) -+ -+typedef struct elanio_stats_struct -+{ -+ int which; -+ void *ptr; -+} ELAN3IO_STATS_STRUCT; -+#define ELAN3IO_STATS _IOR ('e', ELAN3IO_GENERIC_BASE + 2, ELAN3IO_STATS_STRUCT) -+# define ELAN3_SYS_STATS_DEVICE 0 -+# define ELAN3_SYS_STATS_MMU 1 -+ -+/* offsets on /dev/elan3/control */ -+ -+/* offsets on /dev/elan3/mem */ -+ -+/* page numbers on /dev/elan3/user */ -+#define ELAN3IO_OFF_COMMAND_PAGE 0 -+#define ELAN3IO_OFF_FLAG_PAGE 1 -+#define ELAN3IO_OFF_UREG_PAGE 2 -+ -+#endif /* LINUX */ -+ -+#endif /* __ELAN3_ELAN3IO_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/include/elan3/elanregs.h linux-2.6.9/include/elan3/elanregs.h ---- clean/include/elan3/elanregs.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan3/elanregs.h 2004-04-22 08:27:21.000000000 -0400 -@@ -0,0 +1,1063 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+/* -+ * Header file for internal slave mapping of the ELAN3 registers -+ */ -+ -+#ifndef _ELAN3_ELANREGS_H -+#define _ELAN3_ELANREGS_H -+ -+#ident "$Id: elanregs.h,v 1.87 2004/04/22 12:27:21 david Exp $" -+/* $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/elanregs.h,v $*/ -+ -+#include -+#include -+#include -+ -+#define MAX_ROOT_CONTEXT_MASK 0xfff -+#define SYS_CONTEXT_BIT 0x1000 -+#define ALL_CONTEXT_BITS (MAX_ROOT_CONTEXT_MASK | SYS_CONTEXT_BIT) -+#define ROOT_TAB_OFFSET(Cntxt) (((Cntxt) & MAX_ROOT_CONTEXT_MASK) << 4) -+#define CLEAR_SYS_BIT(Cntxt) ((Cntxt) & ~SYS_CONTEXT_BIT) -+ -+#define E3_CACHELINE_SIZE (32) -+#define E3_CACHE_SIZE (8192) -+ -+typedef volatile struct _E3_CacheSets -+{ -+ E3_uint64 Set0[256]; /* 2k bytes per set */ -+ E3_uint64 Set1[256]; /* 2k bytes per set */ -+ E3_uint64 Set2[256]; /* 2k bytes per set */ -+ E3_uint64 Set3[256]; /* 2k bytes per set */ -+} E3_CacheSets; -+ -+typedef union e3_cache_tag -+{ -+ E3_uint64 Value; -+ struct { -+#if defined(__LITTLE_ENDIAN__) -+ E3_uint32 pad2:8; /* Undefined value when read */ -+ E3_uint32 LineError:1; /* A line error has occured */ -+ E3_uint32 Modified:1; /* Cache data is modified */ -+ E3_uint32 FillPending:1; /* Pipelined fill occuring*/ -+ E3_uint32 AddrTag27to11:17; /* Tag address bits 27 to 11 */ -+ E3_uint32 pad1:4; /* Undefined value when read */ -+ E3_uint32 pad0; /* Undefined value when read */ -+#else -+ E3_uint32 pad0; /* Undefined value when read */ -+ E3_uint32 pad1:4; /* Undefined value when read */ -+ E3_uint32 AddrTag27to11:17; /* Tag address bits 27 to 11 */ -+ E3_uint32 FillPending:1; /* Pipelined fill occuring*/ -+ E3_uint32 Modified:1; /* Cache data is modified */ -+ E3_uint32 LineError:1; /* A line error has occured */ -+ E3_uint32 pad2:8; /* Undefined value when read */ -+#endif -+ } s; -+} E3_CacheTag; -+ -+#define E3_NumCacheLines 64 -+#define E3_NumCacheSets 4 -+ -+typedef volatile struct _E3_CacheTags -+{ -+ E3_CacheTag Tags[E3_NumCacheLines][E3_NumCacheSets]; /* 2k bytes per set */ -+} E3_CacheTags; -+ -+typedef union E3_IProcStatus_Reg -+{ -+ E3_uint32 Status; -+ struct -+ { -+#if defined(__LITTLE_ENDIAN__) -+ E3_uint32 TrapType:8; /* iprocs trap ucode address */ -+ E3_uint32 SuspendAddr:8; /* iprocs suspend address */ -+ E3_uint32 EopType:2; /* Type of Eop Received */ -+ E3_uint32 QueueingPacket:1; /* receiving a queueing packet */ -+ E3_uint32 AckSent:1; /* a packet ack has been sent */ -+ E3_uint32 Reject:1; /* a packet nack has been sent */ -+ E3_uint32 CrcStatus:2; /* Crc Status value */ -+ E3_uint32 BadLength:1; /* Eop was received in a bad place */ -+ E3_uint32 Chan1:1; /* This packet received on v chan1 */ -+ E3_uint32 First:1; /* This is the first transaction in the packet */ -+ E3_uint32 Last:1; /* This is the last transaction in the packet */ -+ E3_uint32 Unused:2; -+ E3_uint32 WakeupFunction:3; /* iprocs wakeup function */ -+#else -+ E3_uint32 WakeupFunction:3; /* iprocs wakeup function */ -+ E3_uint32 Unused:2; -+ E3_uint32 Last:1; /* This is the last transaction in the packet */ -+ E3_uint32 First:1; /* This is the first transaction in the packet */ -+ E3_uint32 Chan1:1; /* This packet received on v chan1 */ -+ E3_uint32 BadLength:1; /* Eop was received in a bad place */ -+ E3_uint32 CrcStatus:2; /* Crc Status value */ -+ E3_uint32 Reject:1; /* a packet nack has been sent */ -+ E3_uint32 AckSent:1; /* a packet ack has been sent */ -+ E3_uint32 QueueingPacket:1; /* receiving a queueing packet */ -+ E3_uint32 EopType:2; /* Type of Eop Received */ -+ E3_uint32 SuspendAddr:8; /* iprocs suspend address */ -+ E3_uint32 TrapType:8; /* iprocs trap ucode address */ -+#endif -+ } s; -+} E3_IProcStatus_Reg; -+ -+#define CRC_STATUS_GOOD (0 << 21) -+#define CRC_STATUS_DISCARD (1 << 21) -+#define CRC_STATUS_ERROR (2 << 21) -+#define CRC_STATUS_BAD (3 << 21) -+ -+#define CRC_MASK (3 << 21) -+ -+#define EOP_GOOD (1 << 16) -+#define EOP_BADACK (2 << 16) -+#define EOP_ERROR_RESET (3 << 16) -+ -+#define E3_IPS_LastTrans (1 << 26) -+#define E3_IPS_FirstTrans (1 << 25) -+#define E3_IPS_VChan1 (1 << 24) -+#define E3_IPS_BadLength (1 << 23) -+#define E3_IPS_CrcMask (3 << 21) -+#define E3_IPS_Rejected (1 << 20) -+#define E3_IPS_AckSent (1 << 19) -+#define E3_IPS_QueueingPacket (1 << 18) -+#define E3_IPS_EopType (3 << 16) -+ -+typedef union E3_Status_Reg -+{ -+ E3_uint32 Status; -+ struct -+ { -+#if defined(__LITTLE_ENDIAN__) -+ E3_uint32 TrapType:8; /* procs trap ucode address */ -+ E3_uint32 SuspendAddr:8; /* procs suspend address */ -+ E3_uint32 Context:13; /* procs current context */ -+ E3_uint32 WakeupFunction:3; /* procs wakeup function */ -+#else -+ E3_uint32 WakeupFunction:3; /* procs wakeup function */ -+ E3_uint32 Context:13; /* procs current context */ -+ E3_uint32 SuspendAddr:8; /* procs suspend address */ -+ E3_uint32 TrapType:8; /* procs trap ucode address */ -+#endif -+ } s; -+} E3_Status_Reg; -+ -+/* values for WakeupFunction */ -+#define SleepOneTick 0 -+#define WakeupToSendTransOrEop 1 -+#define SleepOneTickThenRunnable 2 -+#define WakeupNever 4 -+/* extra dma wakeup functions */ -+#define WakupeToSendTransOrEop 1 -+#define WakeupForPacketAck 3 -+#define WakeupToSendTrans 5 -+/* extra thread wakup function */ -+#define WakeupStopped 3 -+/* extra cproc wakup function */ -+#define WakeupSetEvent 3 -+ -+#define GET_STATUS_CONTEXT(Ptr) ((Ptr.Status >> 16) & 0x1fff) -+#define GET_STATUS_SUSPEND_ADDR(Ptr) ((Ptr.Status >> 8) & 0xff) -+#define GET_STATUS_TRAPTYPE(Ptr) ((E3_uint32)(Ptr.Status & 0xff)) -+ -+/* -+ * Interrupt register bits -+ */ -+#define INT_PciMemErr (1<<15) /* Pci memory access error */ -+#define INT_SDRamInt (1<<14) /* SDRam ECC interrupt */ -+#define INT_EventInterrupt (1<<13) /* Event Interrupt */ -+#define INT_LinkError (1<<12) /* Link Error */ -+#define INT_ComQueue (1<<11) /* a comm queue half full */ -+#define INT_TProcHalted (1<<10) /* Tproc Halted */ -+#define INT_DProcHalted (1<<9) /* Dmas Halted */ -+#define INT_DiscardingNonSysCntx (1<<8) /* Inputters Discarding Non-SysCntx */ -+#define INT_DiscardingSysCntx (1<<7) /* Inputters Discarding SysCntx */ -+#define INT_TProc (1<<6) /* tproc interrupt */ -+#define INT_CProc (1<<5) /* cproc interrupt */ -+#define INT_DProc (1<<4) /* dproc interrupt */ -+#define INT_IProcCh1NonSysCntx (1<<3) /* iproc non-SysCntx interrupt */ -+#define INT_IProcCh1SysCntx (1<<2) /* iproc SysCntx interrupt */ -+#define INT_IProcCh0NonSysCntx (1<<1) /* iproc non-SysCntx interrupt */ -+#define INT_IProcCh0SysCntx (1<<0) /* iproc SysCntx interrupt */ -+ -+#define INT_Inputters (INT_IProcCh0SysCntx | INT_IProcCh0NonSysCntx | INT_IProcCh1SysCntx | INT_IProcCh1NonSysCntx) -+#define INT_Discarding (INT_DiscardingSysCntx | INT_DiscardingNonSysCntx) -+#define INT_Halted (INT_DProcHalted | INT_TProcHalted) -+#define INT_ErrorInterrupts (INT_PciMemErr | INT_SDRamInt | INT_LinkError) -+ -+/* -+ * Link state bits. -+ */ -+#define LS_LinkNotReady (1 << 0) /* Link is in reset or recovering from an error */ -+#define LS_Locked (1 << 1) /* Linkinput PLL is locked */ -+#define LS_LockError (1 << 2) /* Linkinput PLL was unable to lock onto the input clock. */ -+#define LS_DeskewError (1 << 3) /* Linkinput was unable to Deskew all the inputs. (Broken wire?) */ -+#define LS_PhaseError (1 << 4) /* Linkinput Phase alignment error. */ -+#define LS_DataError (1 << 5) /* Received value was neither good data or a token. */ -+#define LS_FifoOvFlow0 (1 << 6) /* Channel 0 input fifo overflowed. */ -+#define LS_FifoOvFlow1 (1 << 7) /* Channel 1 input fifo overflowed. */ -+ -+/* -+ * Link State Constant defines, used for writing to LinkSetValue -+ */ -+ -+#define LRS_DataDel0 0x0 -+#define LRS_DataDel1 0x1 -+#define LRS_DataDel2 0x2 -+#define LRS_DataDel3 0x3 -+#define LRS_DataDel4 0x4 -+#define LRS_DataDel5 0x5 -+#define LRS_DataDel6 0x6 -+#define LRS_DataDel7 0x7 -+#define LRS_DataDel8 0x8 -+#define LRS_PllDelValue 0x9 -+#define LRS_ClockEven 0xA -+#define LRS_ClockOdd 0xB -+#define LRS_ErrorLSW 0xC -+#define LRS_ErrorMSW 0xD -+#define LRS_FinCoarseDeskew 0xE -+#define LRS_LinkInValue 0xF -+#define LRS_NumLinkDels 0x10 -+ -+#define LRS_Pllfast 0x40 -+ -+union Sched_Status -+{ -+ E3_uint32 Status; -+ struct -+ { -+#if defined(__LITTLE_ENDIAN__) -+ E3_uint32 StopNonSysCntxs:1; -+ E3_uint32 FlushCommandQueues:1; -+ E3_uint32 HaltDmas:1; -+ E3_uint32 HaltDmaDequeue:1; -+ E3_uint32 HaltThread:1; -+ E3_uint32 CProcStop:1; -+ E3_uint32 DiscardSysCntxIn:1; -+ E3_uint32 DiscardNonSysCntxIn:1; -+ E3_uint32 RestartCh0SysCntx:1; -+ E3_uint32 RestartCh0NonSysCntx:1; -+ E3_uint32 RestartCh1SysCntx:1; -+ E3_uint32 RestartCh1NonSysCntx:1; -+ E3_uint32 RestartDProc:1; -+ E3_uint32 RestartTProc:1; -+ E3_uint32 RestartCProc:1; -+ E3_uint32 ClearLinkErrorInt:1; -+ E3_uint32 :3; -+ E3_uint32 LinkSetValue:10; -+ E3_uint32 FixLinkDelays:1; -+ E3_uint32 LinkBoundaryScan:1; -+#else -+ E3_uint32 LinkBoundaryScan:1; -+ E3_uint32 FixLinkDelays:1; -+ E3_uint32 LinkSetValue:10; -+ E3_uint32 :3; -+ E3_uint32 ClearLinkErrorInt:1; -+ E3_uint32 RestartCProc:1; -+ E3_uint32 RestartTProc:1; -+ E3_uint32 RestartDProc:1; -+ E3_uint32 RestartCh1NonSysCntx:1; -+ E3_uint32 RestartCh1SysCntx:1; -+ E3_uint32 RestartCh0NonSysCntx:1; -+ E3_uint32 RestartCh0SysCntx:1; -+ E3_uint32 DiscardNonSysCntxIn:1; -+ E3_uint32 DiscardSysCntxIn:1; -+ E3_uint32 CProcStop:1; -+ E3_uint32 HaltThread:1; -+ E3_uint32 HaltDmaDequeue:1; -+ E3_uint32 HaltDmas:1; -+ E3_uint32 FlushCommandQueues:1; -+ E3_uint32 StopNonSysCntxs:1; -+#endif -+ } s; -+}; -+ -+#define LinkBoundaryScan ((E3_uint32) 1<<31) /* Clears the link error interrupt */ -+#define FixLinkDelays ((E3_uint32) 1<<30) /* Clears the link error interrupt */ -+#define LinkSetValue(Val, OldVal) ((E3_uint32) (((Val) & 0x3ff) << 20) | ((OldVal) & ((~0x3ff) << 20))) -+ -+#define ClearLinkErrorInt ((E3_uint32) 1<<16) /* Clears the link error interrupt */ -+#define RestartCProc ((E3_uint32) 1<<15) /* Clears command proc interrupt */ -+#define RestartTProc ((E3_uint32) 1<<14) /* Clears thread interrupt */ -+#define RestartDProc ((E3_uint32) 1<<13) /* Clears dma0 interrupt */ -+#define RestartCh1NonSysCntx ((E3_uint32) 1<<12) /* Clears interrupt */ -+#define RestartCh1SysCntx ((E3_uint32) 1<<11) /* Clears interrupt */ -+#define RestartCh0NonSysCntx ((E3_uint32) 1<<10) /* Clears interrupt */ -+#define RestartCh0SysCntx ((E3_uint32) 1<<9) /* Clears interrupt */ -+#define CProcStopped ((E3_uint32) 1<<9) /* Read value only */ -+ -+#define TraceSetEvents ((E3_uint32) 1<<8) -+#define DiscardNonSysCntxIn ((E3_uint32) 1<<7) -+#define DiscardSysCntxIn ((E3_uint32) 1<<6) -+#define CProcStop ((E3_uint32) 1<<5) /* Will empty all the command port queues. */ -+#define HaltThread ((E3_uint32) 1<<4) /* Will stop the thread proc and clear the tproc command queue */ -+#define HaltDmaDequeue ((E3_uint32) 1<<3) /* Will stop the dmaers starting new dma's. */ -+#define HaltDmas ((E3_uint32) 1<<2) /* Will stop the dmaers and clear the dma command queues */ -+#define FlushCommandQueues ((E3_uint32) 1<<1) /* Causes the command ports to be flushed. */ -+#define StopNonSysCntxs ((E3_uint32) 1<<0) /* Prevents a non-SysCntx from starting. */ -+ -+/* Initial value of schedule status register */ -+#define LinkResetToken 0x00F -+ -+#define Sched_Initial_Value (LinkBoundaryScan | (LinkResetToken << 20) | \ -+ DiscardSysCntxIn | DiscardNonSysCntxIn | HaltThread | HaltDmas) -+ -+#define StopDmaQueues (HaltDmaDequeue | HaltDmas | \ -+ DiscardNonSysCntxIn | DiscardSysCntxIn) -+#define CheckDmaQueueStopped (INT_DiscardingNonSysCntx | INT_DiscardingSysCntx | INT_DProcHalted) -+ -+#define HaltStopAndExtTestMask 0xfff001ff -+#define HaltAndStopMask 0x000001ff -+ -+ -+#define DmaComQueueNotEmpty (1<<0) -+#define ThreadComQueueNotEmpty (1<<1) -+#define EventComQueueNotEmpty (1<<2) -+#define DmaComQueueHalfFull (1<<3) -+#define ThreadComQueueHalfFull (1<<4) -+#define EventComQueueHalfFull (1<<5) -+#define DmaComQueueError (1<<6) -+#define ThreadComQueueError (1<<7) -+#define EventComQueueError (1<<8) -+ -+#define ComQueueNotEmpty (DmaComQueueNotEmpty | ThreadComQueueNotEmpty | EventComQueueNotEmpty) -+#define ComQueueError (DmaComQueueError | ThreadComQueueError | EventComQueueError) -+ -+typedef union _E3_DmaInfo -+{ -+ E3_uint32 Value; -+ struct -+ { -+#if defined(__LITTLE_ENDIAN__) -+ E3_uint32 DmaOutputOpen:1; /* The packet is currently open */ -+ E3_uint32 :7; -+ E3_uint32 TimeSliceCount:2; /* Time left to timeslice */ -+ E3_uint32 UseRemotePriv:1; /* Set for remote read dmas */ -+ E3_uint32 DmaLastPacket:1; /* Set for the last packet of a dma */ -+ E3_uint32 PacketAckValue:2; /* Packet ack type. Valid if AckBufferValid set. */ -+ E3_uint32 PacketTimeout:1; /* Packet timeout. Sent an EopError. Valid if AckBufferValid set. */ -+ E3_uint32 AckBufferValid:1; /* Packet ack is valid. */ -+ E3_uint32 :16; /* read as Zero */ -+#else -+ E3_uint32 :16; /* read as Zero */ -+ E3_uint32 AckBufferValid:1; /* Packet ack is valid. */ -+ E3_uint32 PacketTimeout:1; /* Packet timeout. Sent an EopError. Valid if AckBufferValid set. */ -+ E3_uint32 PacketAckValue:2; /* Packet ack type. Valid if AckBufferValid set. */ -+ E3_uint32 DmaLastPacket:1; /* Set for the last packet of a dma */ -+ E3_uint32 UseRemotePriv:1; /* Set for remote read dmas */ -+ E3_uint32 TimeSliceCount:2; /* Time left to timeslice */ -+ E3_uint32 :7; -+ E3_uint32 DmaOutputOpen:1; /* The packet is currently open */ -+#endif -+ } s; -+} E3_DmaInfo; -+ -+typedef volatile struct _E3_DmaRds -+{ -+ E3_uint32 DMA_Source4to0AndTwoReads; -+ E3_uint32 pad13; -+ E3_uint32 DMA_BytesToRead; -+ E3_uint32 pad14; -+ E3_uint32 DMA_MinusPacketSize; -+ E3_uint32 pad15; -+ E3_uint32 DMA_MaxMinusPacketSize; -+ E3_uint32 pad16; -+ E3_uint32 DMA_DmaOutputOpen; -+ E3_uint32 pad16a; -+ E3_DmaInfo DMA_PacketInfo; -+ E3_uint32 pad17[7]; -+ E3_uint32 IProcTrapBase; -+ E3_uint32 pad18; -+ E3_uint32 IProcBlockTrapBase; -+ E3_uint32 pad19[11]; -+} E3_DmaRds; -+ -+typedef volatile struct _E3_DmaWrs -+{ -+ E3_uint64 pad0; -+ E3_uint64 LdAlignment; -+ E3_uint64 ResetAckNLdBytesToWr; -+ E3_uint64 SetAckNLdBytesToWr; -+ E3_uint64 LdBytesToRd; -+ E3_uint64 LdDmaType; -+ E3_uint64 SendRoutes; -+ E3_uint64 SendEop; -+ E3_uint64 pad1[8]; -+} E3_DmaWrs; -+ -+typedef volatile struct _E3_Exts -+{ -+ E3_uint32 CurrContext; /* 0x12a00 */ -+ E3_uint32 pad0; -+ E3_Status_Reg DProcStatus; /* 0x12a08 */ -+ E3_uint32 pad1; -+ E3_Status_Reg CProcStatus; /* 0x12a10 */ -+ E3_uint32 pad2; -+ E3_Status_Reg TProcStatus; /* 0x12a18 */ -+ E3_uint32 pad3; -+ E3_IProcStatus_Reg IProcStatus; /* 0x12a20 */ -+ E3_uint32 pad4[3]; -+ -+ E3_uint32 IProcTypeContext; /* 0x12a30 */ -+ E3_uint32 pad5; -+ E3_uint32 IProcTransAddr; /* 0x12a38 */ -+ E3_uint32 pad6; -+ E3_uint32 IProcCurrTransData0; /* 0x12a40 */ -+ E3_uint32 pad7; -+ E3_uint32 IProcCurrTransData1; /* 0x12a48 */ -+ E3_uint32 pad8; -+ -+ E3_uint32 SchCntReg; /* 0x12a50 */ -+ E3_uint32 pad9; -+ E3_uint32 InterruptReg; /* 0x12a58 */ -+ E3_uint32 pad10; -+ E3_uint32 InterruptMask; /* 0x12a60 */ -+ E3_uint32 pad11; -+ E3_uint32 LinkErrorTypes; /* 0x12a68 */ -+ E3_uint32 pad12[3]; -+ E3_uint32 LinkState; /* a read here returens the DataDel value for the */ -+ /* link that has just been defined by a write to */ -+ /* Regs.Exts.SchCntReg.LinkSetValue */ -+ E3_uint32 pad13; -+ -+ union /* 0x12a80 */ -+ { -+ E3_DmaWrs DmaWrs; -+ E3_DmaRds DmaRds; -+ } Dmas; -+} E3_Exts; -+ -+typedef union com_port_entry -+{ -+ E3_uint64 type; -+ struct -+ { -+ E3_uint32 Address; /* Command VAddr */ -+#if defined(__LITTLE_ENDIAN__) -+ E3_uint32 Context0Issue:1; /* Issue was for context 0 */ -+ E3_uint32 EventNotCommand:1; /* Issue address bit 3 */ -+ E3_uint32 RemoteDesc:1; /* Issue address bit 5 */ -+ E3_uint32 :13; /* read as Zero */ -+ E3_uint32 Context:12; /* Command Context */ -+ E3_uint32 :4; /* read as Zero */ -+#else -+ E3_uint32 :4; /* read as Zero */ -+ E3_uint32 Context:12; /* Command Context */ -+ E3_uint32 :13; /* read as Zero */ -+ E3_uint32 RemoteDesc:1; /* Issue address bit 5 */ -+ E3_uint32 EventNotCommand:1; /* Issue address bit 3 */ -+ E3_uint32 Context0Issue:1; /* Issue was for context 0 */ -+#endif -+ } s; -+} E3_ComPortEntry; -+ -+/* control reg bits */ -+#define CONT_MMU_ENABLE (1 << 0) /* bit 0 enables mmu */ -+#define CONT_ENABLE_8K_PAGES (1 << 1) /* When set smallest page is 8k instead of 4k. */ -+#define CONT_EN_ALL_SETS (1 << 2) /* enable cache */ -+#define CONT_CACHE_LEVEL0 (1 << 3) /* cache context table */ -+#define CONT_CACHE_LEVEL1 (1 << 4) /* cache up level 1 PTD/PTE */ -+#define CONT_CACHE_LEVEL2 (1 << 5) /* cache up level 2 PTD/PTE */ -+#define CONT_CACHE_LEVEL3 (1 << 6) /* cache up level 3 PTD/PTE */ -+#define CONT_CACHE_TRAPS (1 << 7) /* cache up traps */ -+#define CONT_CACHE_LEV0_ROUTES (1 << 8) /* cache up small routes */ -+#define CONT_CACHE_LEV1_ROUTES (1 << 9) /* cache up large routes */ -+#define CONT_CACHE_ALL (CONT_CACHE_LEVEL0 | CONT_CACHE_LEVEL1 | CONT_CACHE_LEVEL2 | \ -+ CONT_CACHE_LEVEL3 | CONT_CACHE_TRAPS | \ -+ CONT_CACHE_LEV0_ROUTES | CONT_CACHE_LEV1_ROUTES) -+ -+#define CONT_SYNCHRONOUS (1 << 10) /* PCI running sync */ -+#define CONT_SER (1 << 11) /* Single bit output (Elan1 SER bit) */ -+#define CONT_SIR (1 << 12) /* Writing 1 resets elan. */ -+ -+#define CONT_PSYCHO_MODE (1 << 13) /* Enables all the perversion required by psycho */ -+#define CONT_ENABLE_ECC (1 << 14) /* Enables error detecting on the ECC */ -+#define CONT_SDRAM_TESTING (1 << 15) /* Switches to test mode for checking EEC data bits */ -+ -+/* defines SDRam CasLatency. Once set will not change again unless reset is reasserted. */ -+/* 1 = Cas Latency is 3, 0 = Cas Latency is 2 */ -+#define CAS_LATENCY_2 (0 << 16) -+#define CAS_LATENCY_3 (1 << 16) -+#define REFRESH_RATE_2US (0 << 17) /* defines 2us SDRam Refresh rate. */ -+#define REFRESH_RATE_4US (1 << 17) /* defines 4us SDRam Refresh rate. */ -+#define REFRESH_RATE_8US (2 << 17) /* defines 8us SDRam Refresh rate. */ -+#define REFRESH_RATE_16US (3 << 17) /* defines 16us SDRam Refresh rate. */ -+ -+#define CONT_PCI_ERR (1 << 19) /* Read 1 if PCI Error */ -+#define CONT_CLEAR_PCI_ERROR (1 << 19) /* Clears an PCI error. */ -+ -+/* Will cause the PCI error bit to become set. This is used to force the threads proc -+ and the uProc to start to stall. */ -+#define CONT_SET_PCI_ERROR (1 << 20) -+ -+/* Writes SDram control reg when set. Also starts SDram memory system refreshing. */ -+#define SETUP_SDRAM (1 << 21) -+ -+/* Flushes the tlb */ -+#define MMU_FLUSH (1 << 22) -+/* and read back when it's finished */ -+#define MMU_FLUSHED (1 << 0) -+ -+/* Clears any ECC error detected by SDRam interface */ -+#define CLEAR_SDRAM_ERROR (1 << 23) -+ -+#define ECC_ADDR_MASK 0x0ffffff8 -+#define ECC_UE_MASK 0x1 -+#define ECC_CE_MASK 0x2 -+#define ECC_ME_MASK 0x4 -+#define ECC_SYN_MASK 0xff -+ -+/* define page table entry bit fields */ -+#define TLB_PageSizeBits (3 << 0) -+#define TLB_ACCBits (7 << 2) -+#define TLB_LocalBit (1 << 5) -+#define TLB_PCI64BitTargetBit (1 << 6) -+#define TLB_PCIBigEndianBit (1 << 7) -+ -+#define TLB_ModifiedBit (1 << 55) -+#define TLB_ReferencedBit (1 << 63) -+ -+/* Used to read values from the tlb. */ -+#define TLB_TlbReadCntBitsSh 56 -+#define TLB_UseSelAddrSh (1ULL << 60) -+#define TLB_WriteTlbLine (1ULL << 61) -+ -+#define TLB_SEL_LINE(LineNo) (TLB_UseSelAddrSh | \ -+ ((E3_uint64)((LineNo) & 0xf) << TLB_TlbReadCntBitsSh)) -+ -+typedef union _E3_CacheContReg -+{ -+ E3_uint32 ContReg; -+ struct -+ { -+#if defined(__LITTLE_ENDIAN__) -+ E3_uint32 MMU_Enable:1; /* wr 1 to enable the MMU */ -+ E3_uint32 Set8kPages:1; /* wr 1 smallest page is 8k. */ -+ E3_uint32 EnableAllSets:1; /* wr 1 All the cache sets are enabled */ -+ E3_uint32 Cache_Level0:1; /* wr 1 lev0 page tabs will be cached */ -+ E3_uint32 Cache_Level1:1; /* wr 1 lev1 page tabs will be cached */ -+ E3_uint32 Cache_Level2:1; /* wr 1 lev2 page tabs will be cached */ -+ E3_uint32 Cache_Level3:1; /* wr 1 lev3 page tabs will be cached */ -+ E3_uint32 Cache_Traps:1; /* wr 1 trap info will be cached */ -+ E3_uint32 Cache_Lev0_Routes:1; /* wr 1 small routes will be cached */ -+ E3_uint32 Cache_Lev1_Routes:1; /* wr 1 big routes will be cached */ -+ E3_uint32 PCI_Synchronous:1; /* Pci and sys clocks are running synchronously*/ -+ E3_uint32 SER:1; /* 1 bit output port */ -+ E3_uint32 SIR:1; /* write 1 will reset elan */ -+ E3_uint32 PsychoMode:1; /* Enables psycho perversion mode. */ -+ E3_uint32 CasLatency:1; /* 1=cas latency=3, 1=cas latency=2 */ -+ E3_uint32 RefreshRate:2; /* 0=2us, 1=4us, 2=8us, 3=16us */ -+ E3_uint32 Pci_Err:1; /* pci error. Write 1 clears err */ -+ E3_uint32 Set_Pci_Error:1; /* Will simulate an Pci error */ -+ E3_uint32 StartSDRam:1; /* Starts the sdram subsystem */ -+ E3_uint32 FlushTlb:1; /* Flush the contence of the tlb */ -+ E3_uint32 :11; -+#else -+ E3_uint32 :11; -+ E3_uint32 FlushTlb:1; /* Flush the contence of the tlb */ -+ E3_uint32 StartSDRam:1; /* Starts the sdram subsystem */ -+ E3_uint32 Set_Pci_Error:1; /* Will simulate an Pci error */ -+ E3_uint32 Pci_Err:1; /* pci error. Write 1 clears err */ -+ E3_uint32 RefreshRate:2; /* 0=2us, 1=4us, 2=8us, 3=16us */ -+ E3_uint32 CasLatency:1; /* 1=cas latency=3, 1=cas latency=2 */ -+ E3_uint32 PsychoMode:1; /* Enables psycho perversion mode. */ -+ E3_uint32 SIR:1; /* write 1 will reset elan */ -+ E3_uint32 SER:1; /* 1 bit output port */ -+ E3_uint32 PCI_Synchronous:1; /* Pci and sys clocks are running synchronously*/ -+ E3_uint32 Cache_Lev1_Routes:1; /* wr 1 big routes will be cached */ -+ E3_uint32 Cache_Lev0_Routes:1; /* wr 1 small routes will be cached */ -+ E3_uint32 Cache_Traps:1; /* wr 1 trap info will be cached */ -+ E3_uint32 Cache_Level3:1; /* wr 1 lev3 page tabs will be cached */ -+ E3_uint32 Cache_Level2:1; /* wr 1 lev2 page tabs will be cached */ -+ E3_uint32 Cache_Level1:1; /* wr 1 lev1 page tabs will be cached */ -+ E3_uint32 Cache_Level0:1; /* wr 1 lev0 page tabs will be cached */ -+ E3_uint32 EnableAllSets:1; /* wr 1 All the cache sets are enabled */ -+ E3_uint32 Set8kPages:1; /* wr 1 smallest page is 8k. */ -+ E3_uint32 MMU_Enable:1; /* wr 1 to enable the MMU */ -+#endif -+ } s; -+} E3_CacheContReg; -+ -+typedef union _E3_TrapBits -+{ -+ volatile E3_uint32 Bits; -+ struct -+ { -+#if defined(__LITTLE_ENDIAN__) -+ E3_uint32 ForcedTProcTrap:1; /* The theads proc has been halted */ -+ E3_uint32 InstAccessException:1; /* An instruction access exception */ -+ E3_uint32 Unimplemented:1; /* Unimplemented instruction executed */ -+ E3_uint32 DataAccessException:1; /* A data access exception */ -+ -+ E3_uint32 ThreadTimeout:1; /* The threads outputer has timed out */ -+ E3_uint32 OpenException:1; /* Invalid sequence of open, sendtr or close */ -+ E3_uint32 OpenRouteFetch:1; /* Fault while fetching routes for previous open*/ -+ E3_uint32 TrapForTooManyInsts:1; /* Thread has been executing for too long */ -+ -+ E3_uint32 PacketAckValue:2; /* Packet ack type. Valid if AckBufferValid set. */ -+ E3_uint32 PacketTimeout:1; /* Packet timeout. Sent an EopError. Valid if AckBufferValid set. */ -+ -+ E3_uint32 AckBufferValid:1; /* The PacketAckValue bits are valid */ -+ E3_uint32 OutputWasOpen:1; /* The output was open when tproc trapped */ -+ E3_uint32 TProcDeschedule:2; /* The reason the tproc stopped running. */ -+ E3_uint32 :17; -+#else -+ E3_uint32 :17; -+ E3_uint32 TProcDeschedule:2; /* The reason the tproc stopped running. */ -+ E3_uint32 OutputWasOpen:1; /* The output was open when tproc trapped */ -+ E3_uint32 AckBufferValid:1; /* The PacketAckValue bits are valid */ -+ -+ E3_uint32 PacketTimeout:1; /* Packet timeout. Sent an EopError. Valid if AckBufferValid set. */ -+ E3_uint32 PacketAckValue:2; /* Packet ack type. Valid if AckBufferValid set. */ -+ -+ E3_uint32 TrapForTooManyInsts:1; /* Thread has been executing for too long */ -+ E3_uint32 OpenRouteFetch:1; /* Fault while fetching routes for previous open*/ -+ E3_uint32 OpenException:1; /* Invalid sequence of open, sendtr or close */ -+ E3_uint32 ThreadTimeout:1; /* The threads outputer has timed out */ -+ -+ E3_uint32 DataAccessException:1; /* A data access exception */ -+ E3_uint32 Unimplemented:1; /* Unimplemented instruction executed */ -+ E3_uint32 InstAccessException:1; /* An instruction access exception */ -+ E3_uint32 ForcedTProcTrap:1; /* The theads proc has been halted */ -+#endif -+ } s; -+} E3_TrapBits; -+ -+typedef union _E3_DirtyBits -+{ -+ volatile E3_uint32 Bits; -+ struct -+ { -+#if defined(__LITTLE_ENDIAN__) -+ E3_uint32 GlobalsDirty:8; -+ E3_uint32 OutsDirty:8; /* will always read as dirty. */ -+ E3_uint32 LocalsDirty:8; -+ E3_uint32 InsDirty:8; -+#else -+ E3_uint32 InsDirty:8; -+ E3_uint32 LocalsDirty:8; -+ E3_uint32 OutsDirty:8; /* will always read as dirty. */ -+ E3_uint32 GlobalsDirty:8; -+#endif -+ } s; -+} E3_DirtyBits; -+ -+#define E3_TProcDescheduleMask 0x6000 -+#define E3_TProcDescheduleWait 0x2000 -+#define E3_TProcDescheduleSuspend 0x4000 -+#define E3_TProcDescheduleBreak 0x6000 -+ -+#define E3_TrapBitsMask 0x7fff -+ -+#define ThreadRestartFromTrapBit 1 -+#define ThreadReloadAllRegs 2 -+ -+#define E3_PAckOk 0 -+#define E3_PAckTestFail 1 -+#define E3_PAckDiscard 2 -+#define E3_PAckError 3 -+ -+typedef volatile struct _E3_DataBusMap -+{ -+ E3_uint64 Dma_Alignment_Port[8]; /* 0x00002800 */ -+ E3_uint32 pad0[0x30]; /* 0x00002840 */ -+ -+ E3_uint32 Input_Trans0_Data[0x10]; /* 0x00002900 */ -+ E3_uint32 Input_Trans1_Data[0x10]; -+ E3_uint32 Input_Trans2_Data[0x10]; -+ E3_uint32 Input_Trans3_Data[0x10]; -+ -+/* this is the start of the exts directly addressable from the ucode. */ -+ E3_Exts Exts; /* 0x00002a00 */ -+ -+/* this is the start of the registers directly addressable from the ucode. */ -+ E3_DMA Dma_Desc; /* 0x00002b00 */ -+ -+ E3_uint32 Dma_Last_Packet_Size; /* 0x00002b20 */ -+ E3_uint32 Dma_This_Packet_Size; /* 0x00002b24 */ -+ E3_uint32 Dma_Tmp_Source; /* 0x00002b28 */ -+ E3_uint32 Dma_Tmp_Dest; /* 0x00002b2c */ -+ -+ E3_Addr Thread_SP_Save_Ptr; /* points to the thread desched save word. */ -+ E3_uint32 Dma_Desc_Size_InProg; /* 0x00002b34 */ -+ -+ E3_uint32 Thread_Desc_SP; /* 0x00002b38 */ -+ E3_uint32 Thread_Desc_Context; /* 0x00002b3c */ -+ -+ E3_uint32 uCode_TMP[0x10]; /* 0x00002b40 */ -+ -+ E3_uint32 TProc_NonSysCntx_FPtr; /* 0x00002b80 */ -+ E3_uint32 TProc_NonSysCntx_BPtr; /* 0x00002b84 */ -+ E3_uint32 TProc_SysCntx_FPtr; /* 0x00002b88 */ -+ E3_uint32 TProc_SysCntx_BPtr; /* 0x00002b8c */ -+ E3_uint32 DProc_NonSysCntx_FPtr; /* 0x00002b90 */ -+ E3_uint32 DProc_NonSysCntx_BPtr; /* 0x00002b94 */ -+ E3_uint32 DProc_SysCntx_FPtr; /* 0x00002b98 */ -+ E3_uint32 DProc_SysCntx_BPtr; /* 0x00002b9c */ -+ -+ E3_uint32 Input_Trap_Base; /* 0x00002ba0 */ -+ E3_uint32 Input_Queue_Offset; /* 0x00002ba4 */ -+ E3_uint32 CProc_TrapSave_Addr; /* 0x00002ba8 */ -+ E3_uint32 Input_Queue_Addr; /* 0x00002bac */ -+ E3_uint32 uCode_TMP10; /* 0x00002bb0 */ -+ E3_uint32 uCode_TMP11; /* 0x00002bb4 */ -+ E3_uint32 Event_Trace_Ptr; /* 0x00002bb8 */ -+ E3_uint32 Event_Trace_Mask; /* 0x00002bbc */ -+ -+ E3_ComPortEntry DmaComQueue[3]; /* 0x00002bc0 */ -+ -+ E3_uint32 Event_Int_Queue_FPtr; /* 0x00002bd8 */ -+ E3_uint32 Event_Int_Queue_BPtr; /* 0x00002bdc */ -+ -+ E3_ComPortEntry ThreadComQueue[2]; /* 0x00002be0 */ -+ E3_ComPortEntry SetEventComQueue[2]; /* 0x00002bf0 */ -+ -+ E3_uint32 pad1[96]; /* 0x00002c00 */ -+ E3_uint32 ComQueueStatus; /* 0x00002d80 */ -+ E3_uint32 pad2[31]; /* 0x00002d84 */ -+ -+/* These are the internal registers of the threads proc. */ -+ E3_uint32 Globals[8]; /* 0x00002e00 */ -+ E3_uint32 Outs[8]; -+ E3_uint32 Locals[8]; -+ E3_uint32 Ins[8]; -+ -+ E3_uint32 pad3[16]; -+ -+ E3_uint32 IBufferReg[4]; -+ -+ E3_uint32 ExecuteNPC; -+ E3_uint32 ExecutePC; -+ -+ E3_uint32 StartPC; -+ E3_uint32 pad4; -+ -+ E3_uint32 StartnPC; -+ E3_uint32 pad5; -+ -+ E3_TrapBits TrapBits; -+ E3_DirtyBits DirtyBits; -+ E3_uint64 LoadDataReg; -+ E3_uint64 StoreDataReg; -+ -+ E3_uint32 ECC_STATUS0; -+ E3_uint32 ECC_STATUS1; -+ E3_uint32 pad6[0xe]; -+ -+/* Pci slave port regs */ -+ E3_uint32 PciSlaveReadCache[0x10]; -+ -+ E3_uint32 Fault_Base_Ptr; -+ E3_uint32 pad7; -+ E3_uint32 Context_Ptr; -+ E3_uint32 pad8; -+ E3_uint32 Input_Context_Filter; /* write only, No data */ -+ E3_uint32 Input_Context_Fil_Flush; /* write only, No data */ -+ E3_CacheContReg Cache_Control_Reg; -+ E3_uint32 pad9; -+ -+ E3_uint64 Tlb_Line_Value; -+ -+ E3_uint32 Walk_Datareg1; -+ E3_uint32 Walk_VAddr_Tab_Base; -+ E3_uint32 Walk_Datareg; -+ E3_uint32 Walk_ContextReg; -+ E3_uint32 Walk_FaultAddr; -+ E3_uint32 Walk_EventAddr; -+ -+/* outputers output cont ext registers. */ -+ E3_uint64 Dma_Route_012345_Context; -+ E3_uint64 pad10; -+ E3_uint64 Dma_Route_01234567; -+ E3_uint64 Dma_Route_89ABCDEF; -+ -+ E3_uint64 Thread_Route_012345_Context; -+ E3_uint64 pad11; -+ E3_uint64 Thread_Route_01234567; -+ E3_uint64 Thread_Route_89ABCDEF; -+} E3_DataBusMap; -+ -+typedef volatile struct _E3_Regs -+{ -+ E3_CacheSets Sets; /* 0x00000000 */ -+ E3_CacheTags Tags; /* 0x00002000 */ -+ E3_DataBusMap Regs; /* 0x00002800 */ -+ E3_uint32 pad1[0x400]; -+ E3_User_Regs URegs; -+} E3_Regs; -+ -+#define MAX_TRAPPED_TRANS 16 -+#define TRANS_DATA_WORDS 16 -+#define TRANS_DATA_BYTES 64 -+ -+/* -+ * Event interrupt -+ */ -+typedef volatile union _E3_EventInt -+{ -+ E3_uint64 ForceAlign; -+ struct { -+ E3_uint32 IntCookie; -+ E3_uint32 EventContext; /* Bits 16 to 28 */ -+ } s; -+} E3_EventInt; -+ -+#define GET_EVENT_CONTEXT(Ptr) ((Ptr->s.EventContext >> 16) & MAX_ROOT_CONTEXT_MASK) -+ -+typedef volatile union _E3_ThreadQueue -+{ -+ E3_uint64 ForceAlign; -+ struct -+ { -+ E3_Addr Thread; -+#if defined(__LITTLE_ENDIAN__) -+ E3_uint32 :16; /* Bits 0 to 15 */ -+ E3_uint32 Context:13; /* Bits 16 to 28 */ -+ E3_uint32 :3; /* Bits 29 to 31 */ -+#else -+ E3_uint32 :3; /* Bits 29 to 31 */ -+ E3_uint32 Context:13; /* Bits 16 to 28 */ -+ E3_uint32 :16; /* Bits 0 to 15 */ -+#endif -+ } s; -+} E3_ThreadQueue; -+ -+typedef volatile union _E3_FaultStatusReg -+{ -+ E3_uint32 Status; -+ struct -+ { -+#if defined(__LITTLE_ENDIAN__) -+ E3_uint32 AccTypePerm:3; /* Access permission. See below. Bits 0 to 2 */ -+ E3_uint32 AccSize:4; /* Access size. See below for different types. Bits 3 to 6 */ -+ E3_uint32 WrAcc:1; /* Access was a write. Bit 7 */ -+ E3_uint32 NonAllocAcc:1; /* Access was a cache non allocate type. Bit 8 */ -+ E3_uint32 BlkDataType:2; /* Data size used for endian flips. Bits 9 to 10 */ -+ E3_uint32 RdLine:1; /* Access was a dma read line. Bit 11 */ -+ E3_uint32 RdMult:1; /* Access was a dma read multiple. Bit 12 */ -+ E3_uint32 Walking:1; /* The fault occued when walking. Bit 13 */ -+ E3_uint32 Level:2; /* Page table level when the fault occued. Bits 14 to 15 */ -+ E3_uint32 ProtFault:1; /* A protection fault occured. Bit 16 */ -+ E3_uint32 FaultPte:2; /* Page table type when the fault occured. Bit 17 */ -+ E3_uint32 AlignmentErr:1; /* Address alignment did not match the access size. Bit 19 */ -+ E3_uint32 VProcSizeErr:1; /* VProc number is out of range. Bit 20 */ -+ E3_uint32 WalkBadData:1; /* Memory CRC error during a walk. Bit 21 */ -+ E3_uint32 :10; /* Bits 22 to 31 */ -+#else -+ E3_uint32 :10; /* Bits 22 to 31 */ -+ E3_uint32 WalkBadData:1; /* Memory CRC error during a walk. Bit 21 */ -+ E3_uint32 VProcSizeErr:1; /* VProc number is out of range. Bit 20 */ -+ E3_uint32 AlignmentErr:1; /* Address alignment did not match the access size. Bit 19 */ -+ E3_uint32 FaultPte:2; /* Page table type when the fault occured. Bit 17 */ -+ E3_uint32 ProtFault:1; /* A protection fault occured. Bit 16 */ -+ E3_uint32 Level:2; /* Page table level when the fault occued. Bits 14 to 15 */ -+ E3_uint32 Walking:1; /* The fault occued when walking. Bit 13 */ -+ E3_uint32 RdMult:1; /* Access was a dma read multiple. Bit 12 */ -+ E3_uint32 RdLine:1; /* Access was a dma read line. Bit 11 */ -+ E3_uint32 BlkDataType:2; /* Data size used for endian flips. Bits 9 to 10 */ -+ E3_uint32 NonAllocAcc:1; /* Access was a cache non allocate type. Bit 8 */ -+ E3_uint32 WrAcc:1; /* Access was a write. Bit 7 */ -+ E3_uint32 AccSize:4; /* Access size. See below for different types. Bits 3 to 6 */ -+ E3_uint32 AccTypePerm:3; /* Access permission. See below. Bits 0 to 2 */ -+#endif -+ } s; -+} E3_FaultStatusReg; -+ -+typedef union _E3_FaultSave -+{ -+ E3_uint64 ForceAlign; -+ struct { -+ E3_FaultStatusReg FSR; -+ volatile E3_uint32 FaultContext; -+ volatile E3_uint32 FaultAddress; -+ volatile E3_uint32 EventAddress; -+ } s; -+} E3_FaultSave; -+ -+/* MMU fault status reg bit positions. */ -+#define FSR_WritePermBit 0 /* 1=Write access perm, 0=Read access perm */ -+#define FSR_RemotePermBit 1 /* 1=Remote access perm, 0=local access perm */ -+#define FSR_EventPermBit 2 /* 1=Event access perm, 0=data access perm */ -+#define FSR_Size0Bit 3 -+#define FSR_Size1Bit 4 -+#define FSR_Size2Bit 5 -+#define FSR_Size3Bit 6 -+#define FSR_WriteAccBit 7 /* 1=Write access, 0=Read access. */ -+#define FSR_NonAllocBit 8 /* 1=Do not fill cache with this data */ -+#define FSR_BlkDataTy0Bit 9 -+#define FSR_BlkDataTy1Bit 10 -+#define FSR_ReadLineBit 11 -+#define FSR_ReadMultipleBit 12 -+ -+#define FSR_PermMask (0xf << FSR_WritePermBit) -+#define FSR_SizeMask (0xf << FSR_Size0Bit) -+#define FSR_AccTypeMask (3 << FSR_WriteAccBit) -+#define FSR_BlkDataTyMask (3 << FSR_BlkDataTy0Bit) -+#define FSR_PciAccTyMask (3 << FSR_ReadLineBit) -+#define FSR_Walking (0x1 << 13) -+#define FSR_Level_Mask (0x3 << 14) -+#define FSR_ProtFault (0x1 << 16) -+#define FSR_FaultPTEType (0x2 << 17) -+#define FSR_AddrSizeError (0x1 << 19) -+#define FSR_VProcSizeError (0x1 << 20) -+#define FSR_WalkBadData (0x1 << 21) -+ -+#define FSR_PermRead 0 -+#define FSR_PermWrite 1 -+#define FSR_PermRemoteRead 2 -+#define FSR_PermRemoteWrite 3 -+#define FSR_PermEventRd 4 -+#define FSR_PermEventWr 5 -+#define FSR_PermRemoteEventRd 6 -+#define FSR_PermRemoteEventWr 7 -+ -+/* AT size values for each access type */ -+#define FSR_Word (0x0 << FSR_Size0Bit) -+#define FSR_DWord (0x1 << FSR_Size0Bit) -+#define FSR_QWord (0x2 << FSR_Size0Bit) -+#define FSR_Block32 (0x3 << FSR_Size0Bit) -+#define FSR_ReservedBlock (0x6 << FSR_Size0Bit) -+#define FSR_Block64 (0x7 << FSR_Size0Bit) -+#define FSR_GetCntxFilter (0x8 << FSR_Size0Bit) -+#define FSR_QueueDWord (0x9 << FSR_Size0Bit) -+#define FSR_RouteFetch (0xa << FSR_Size0Bit) -+#define FSR_QueueBlock (0xb << FSR_Size0Bit) -+#define FSR_Block32PartWrite (0xe << FSR_Size0Bit) -+#define FSR_Block64PartWrite (0xf << FSR_Size0Bit) -+ -+#define FSR_AllocRead (0 << FSR_WriteAccBit) -+#define FSR_AllocWrite (1 << FSR_WriteAccBit) -+#define FSR_NonAllocRd (2 << FSR_WriteAccBit) -+#define FSR_NonAllocWr (3 << FSR_WriteAccBit) -+ -+#define FSR_TypeByte (0 << FSR_BlkDataTy0Bit) -+#define FSR_TypeHWord (1 << FSR_BlkDataTy0Bit) -+#define FSR_TypeWord (2 << FSR_BlkDataTy0Bit) -+#define FSR_TypeDWord (3 << FSR_BlkDataTy0Bit) -+ -+typedef union E3_TrTypeCntx -+{ -+ E3_uint32 TypeContext; -+ struct -+ { -+#if defined(__LITTLE_ENDIAN__) -+ E3_uint32 Type:16; /* Transaction type field */ -+ E3_uint32 Context:13; /* Transaction context */ -+ E3_uint32 TypeCntxInvalid:1; /* Bit 29 */ -+ E3_uint32 StatusRegValid:1; /* Bit 30 */ -+ E3_uint32 LastTrappedTrans:1; /* Bit 31 */ -+#else -+ E3_uint32 LastTrappedTrans:1; /* Bit 31 */ -+ E3_uint32 StatusRegValid:1; /* Bit 30 */ -+ E3_uint32 TypeCntxInvalid:1; /* Bit 29 */ -+ E3_uint32 Context:13; /* Transaction context */ -+ E3_uint32 Type:16; /* Transaction type field */ -+#endif -+ } s; -+} E3_TrTypeCntx; -+ -+#define GET_TRAP_TYPE(Ptr) (Ptr.TypeContext & 0xfff) -+#define GET_TRAP_CONTEXT(Ptr) ((Ptr.TypeContext >> 16) & 0x1fff) -+ -+/* Words have been swapped for big endian access when fetched with dword access from elan.*/ -+typedef union _E3_IprocTrapHeader -+{ -+ E3_uint64 forceAlign; -+ -+ struct -+ { -+ E3_TrTypeCntx TrTypeCntx; -+ E3_uint32 TrAddr; -+ E3_uint32 TrData0; -+ union -+ { -+ E3_IProcStatus_Reg u_IProcStatus; -+ E3_uint32 u_TrData1; -+ } ipsotd; -+ } s; -+} E3_IprocTrapHeader; -+ -+#define IProcTrapStatus ipsotd.u_IProcStatus -+#define TrData1 ipsotd.u_TrData1 -+ -+typedef struct E3_IprocTrapData -+{ -+ E3_uint32 TrData[TRANS_DATA_WORDS]; -+} E3_IprocTrapData; -+ -+/* -+ * 64 kbytes of elan local memory. Must be aligned on a 64k boundary -+ */ -+#define E3_NonSysCntxQueueSize 0x400 -+#define E3_SysCntxQueueSize 0x100 -+ -+typedef struct _E3_TrapAndQueue -+{ -+ E3_DMA NonSysCntxDmaQueue[E3_NonSysCntxQueueSize]; /* 0x000000 */ -+ E3_DMA SysCntxDmaQueue[E3_SysCntxQueueSize]; /* 0x008000 */ -+ E3_EventInt EventIntQueue[E3_NonSysCntxQueueSize]; /* 0x00A000 */ -+ E3_ThreadQueue NonSysCntxThreadQueue[E3_NonSysCntxQueueSize]; /* 0x00C000 */ -+ E3_ThreadQueue SysCntxThreadQueue[E3_SysCntxQueueSize]; /* 0x00E000 */ -+ E3_FaultSave IProcSysCntx; /* 0x00E800 */ -+ E3_Addr Thread_SP_Save; /* 0x00E810 */ -+ E3_uint32 dummy0[3]; /* 0x00E814 */ -+ E3_FaultSave ThreadProcData; /* 0x00E820 */ -+ E3_FaultSave ThreadProcInst; /* 0x00E830 */ -+ E3_FaultSave dummy1[2]; /* 0x00E840 */ -+ E3_FaultSave ThreadProcOpen; /* 0x00E860 */ -+ E3_FaultSave dummy2; /* 0x00E870 */ -+ E3_FaultSave IProcNonSysCntx; /* 0x00E880 */ -+ E3_FaultSave DProc; /* 0x00E890 */ -+ E3_FaultSave CProc; /* 0x00E8A0 */ -+ E3_FaultSave TProc; /* 0x00E8B0 */ -+ E3_FaultSave DProcData0; /* 0x00E8C0 */ -+ E3_FaultSave DProcData1; /* 0x00E8D0 */ -+ E3_FaultSave DProcData2; /* 0x00E8E0 */ -+ E3_FaultSave DProcData3; /* 0x00E8F0 */ -+ E3_uint32 dummy3[0xc0]; /* 0x00E900 */ -+ E3_IprocTrapHeader VCh0_C0_TrHead[MAX_TRAPPED_TRANS]; -+ E3_IprocTrapHeader VCh0_NonC0_TrHead[MAX_TRAPPED_TRANS]; -+ E3_IprocTrapHeader VCh1_C0_TrHead[MAX_TRAPPED_TRANS]; -+ E3_IprocTrapHeader VCh1_NonC0_TrHead[MAX_TRAPPED_TRANS]; -+ E3_IprocTrapData VCh0_C0_TrData[MAX_TRAPPED_TRANS]; -+ E3_IprocTrapData VCh0_NonC0_TrData[MAX_TRAPPED_TRANS]; -+ E3_IprocTrapData VCh1_C0_TrData[MAX_TRAPPED_TRANS]; -+ E3_IprocTrapData VCh1_NonC0_TrData[MAX_TRAPPED_TRANS]; -+ E3_uint64 DmaOverflowQueueSpace[0x1000]; -+ E3_uint64 ThreadOverflowQueueSpace[0x800]; -+ E3_uint64 EventOverflowQueueSpace[0x800]; -+} E3_TrapAndQueue; -+ -+ -+typedef struct _E3_ContextControlBlock -+{ -+ E3_uint32 rootPTP; -+ E3_uint32 filter; -+ E3_uint32 VPT_ptr; -+ E3_uint32 VPT_mask; -+} E3_ContextControlBlock; -+ -+#define E3_CCB_CNTX0 (0x20000000) -+#define E3_CCB_DISCARD_ALL (0x40000000) -+#define E3_CCB_ACKOK_ALL (0x80000000) -+#define E3_CCB_MASK (0xc0000000) -+ -+#define E3_NUM_CONTEXT_0 (0x20) -+ -+/* Macros to manipulate event queue pointers */ -+/* generate index in EventIntQueue */ -+#define E3_EVENT_INTQ_INDEX(fptr) (((fptr) & 0x1fff) >> 3) -+/* generate next fptr */ -+#define E3_EVENT_INTQ_NEXT(fptr) ((((fptr) + 8) & ~0x4000) | 0x2000) -+ -+ -+#endif /* notdef _ELAN3_ELANREGS_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/include/elan3/elansyscall.h linux-2.6.9/include/elan3/elansyscall.h ---- clean/include/elan3/elansyscall.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan3/elansyscall.h 2004-06-07 09:50:06.000000000 -0400 -@@ -0,0 +1,124 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __ELAN3_ELANSYSCALL_H -+#define __ELAN3_ELANSYSCALL_H -+ -+#ident "$Id: elansyscall.h,v 1.34 2004/06/07 13:50:06 mike Exp $" -+/* $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/elansyscall.h,v $*/ -+ -+#ifdef __cplusplus -+extern "C" { -+#endif -+ -+#ifndef _ASM -+ -+typedef struct sys_word_item -+{ -+ struct sys_word_item *Next; -+ E3_uint32 Value; -+} SYS_WORD_ITEM; -+ -+typedef struct sys_block_item -+{ -+ struct sys_block_item *Next; -+ E3_uint32 *Pointer; -+} SYS_BLOCK_ITEM; -+ -+typedef struct sys_swap_space -+{ -+ int Magic; -+ void *ItemListsHead[MAX_LISTS]; -+ void **ItemListsTailp[MAX_LISTS]; -+} SYS_SWAP_SPACE; -+ -+typedef struct sys_exception -+{ -+ int Type; -+ int Proc; -+ u_long Res; -+ u_long Value; -+ E3_FaultSave_BE FaultArea; -+ -+ union -+ { -+ DMA_TRAP Dma; -+ THREAD_TRAP Thread; -+ COMMAND_TRAP Command; -+ INPUT_TRAP Input; -+ } Union; -+} SYS_EXCEPTION; -+ -+typedef struct sys_exception_space -+{ -+ struct sys_exception_space *Next; -+ int Magic; -+ int Front; -+ int Back; -+ int Count; -+ int Overflow; -+ SYS_EXCEPTION Exceptions[1]; -+} SYS_EXCEPTION_SPACE; -+ -+#ifdef __KERNEL__ -+ -+typedef struct sys_ctxt -+{ -+ SYS_SWAP_SPACE *Swap; -+ SYS_EXCEPTION_SPACE *Exceptions; -+ kmutex_t Lock; -+ -+ spinlock_t WaitLock; -+ kcondvar_t NetworkErrorWait; -+ -+ int Armed; -+ int Backoff; -+ long Time; -+ -+ u_long Flags; -+ int signal; -+ -+ EVENT_COOKIE_TABLE *Table; -+} SYS_CTXT; -+ -+extern SYS_CTXT *sys_init (ELAN3_CTXT *ctxt); -+extern int sys_waitevent (ELAN3_CTXT *ctxt, E3_Event *event); -+extern void sys_addException (SYS_CTXT *sctx, int type, int proc, caddr_t ptr, int size, -+ E3_FaultSave_BE *, u_long res, u_long value); -+extern int sys_getException (SYS_CTXT *sctx, SYS_EXCEPTION *ex); -+ -+/* returns -ve error or ELAN_CAP_OK or ELAN_CAP_RMS */ -+/* use = ELAN_USER_ATTACH, ELAN_USER_P2P, ELAN_USER_BROADCAST */ -+extern int elan3_validate_cap (ELAN3_DEV *dev, ELAN_CAPABILITY *cap ,int use); -+ -+#endif /* __KERNEL__ */ -+ -+#endif /* _ASM */ -+ -+/* values for "Flags" */ -+#define ELAN3_SYS_FLAG_DMA_BADVP 1 -+#define ELAN3_SYS_FLAG_THREAD_BADVP 2 -+#define ELAN3_SYS_FLAG_DMAFAIL 4 -+#define ELAN3_SYS_FLAG_NETERR 8 -+ -+#define SYS_SWAP_MAGIC 0xB23C52DF -+#define SYS_EXCEPTION_MAGIC 0xC34D63E0 -+ -+#define EXCEPTION_GLOBAL_STRING "elan3_exceptions" -+#define EXCEPTION_ABORT_STRING "elan3_abortstring" -+ -+#ifdef __cplusplus -+} -+#endif -+ -+#endif /* __ELAN3_ELANSYSCALL_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/include/elan3/elanuregs.h linux-2.6.9/include/elan3/elanuregs.h ---- clean/include/elan3/elanuregs.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan3/elanuregs.h 2003-09-24 09:57:24.000000000 -0400 -@@ -0,0 +1,295 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __ELAN3_ELANUREGS_H -+#define __ELAN3_ELANUREGS_H -+ -+#ident "$Id: elanuregs.h,v 1.10 2003/09/24 13:57:24 david Exp $" -+/* $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/elanuregs.h,v $*/ -+ -+#ifdef __cplusplus -+extern "C" { -+#endif -+ -+/* -+ * Statistic control reg values -+ * Each 4-bit nibble of the control word specifies what statistic -+ * is to be recorded in each of the 8 statistic counters -+ */ -+ -+/* Count reg 0 */ -+#define STC_INPUT_TRANSACTIONS 0 -+#define STP_DMA_EOP_WAIT_ACK 1 -+#define STP_THREAD_RUNNING 2 -+#define STP_UCODE_WAIT_MEM 3 -+#define STC_CACHE_WRITE_BACKS 4 -+#define STC_PCI_SLAVE_READS 5 -+#define STC_REG0_UNUSED6 6 -+#define STP_REG0_UNUSED7 7 -+ -+#define STATS_REG0_NAMES { \ -+ "STC_INPUT_TRANSACTIONS", \ -+ "STP_DMA_EOP_WAIT_ACK", \ -+ "STP_THREAD_RUNNING", \ -+ "STP_UCODE_WAIT_MEM", \ -+ "STC_CACHE_WRITE_BACKS", \ -+ "STC_PCI_SLAVE_READS", \ -+ "STC_REG0_UNUSED6", \ -+ "STP_REG0_UNUSED7" \ -+} -+ -+/* Count reg 1 */ -+#define STC_INPUT_WRITE_BLOCKS (0 << 4) -+#define STP_DMA_DATA_TRANSMITTING (1 << 4) -+#define STP_THEAD_WAITING_INST (2 << 4) -+#define STC_REG1_UNUSED3 (3 << 4) -+#define STP_FETCHING_ROUTES (4 << 4) -+#define STC_REG1_UNUSED5 (5 << 4) -+#define STC_PCI_SLAVE_WRITES (6 << 4) -+#define STP_PCI_SLAVE_READ_WAITING (7 << 4) -+ -+#define STATS_REG1_NAMES { \ -+ "STC_INPUT_WRITE_BLOCKS", \ -+ "STP_DMA_DATA_TRANSMITTING", \ -+ "STP_THEAD_WAITING_INST", \ -+ "STC_REG1_UNUSED3", \ -+ "STP_FETCHING_ROUTES", \ -+ "STC_REG1_UNUSED5", \ -+ "STC_PCI_SLAVE_WRITES", \ -+ "STP_PCI_SLAVE_READ_WAITING" \ -+} -+ -+/* Count reg 2 */ -+#define STC_INPUT_PKTS (0 << 8) -+#define STP_DMA_WAITING_MEM (1 << 8) -+#define STP_THREAD_WAIT_OPEN_PKT (2 << 8) -+#define STC_REG2_UNUSED3 (3 << 8) -+#define STC_ROUTE_FETCHES (4 << 8) -+#define STC_CACHE_NON_ALLOC_MISSES (5 << 8) -+#define STC_REG2_UNUSED6 (6 << 8) -+#define STP_PCI_SLAVE_WRITE_WAITING (7 << 8) -+ -+#define STATS_REG2_NAMES { \ -+ "STC_INPUT_PKTS", \ -+ "STP_DMA_WAITING_MEM", \ -+ "STP_THREAD_WAIT_OPEN_PKT", \ -+ "STC_REG2_UNUSED3", \ -+ "STC_ROUTE_FETCHES", \ -+ "STC_CACHE_NON_ALLOC_MISSES", \ -+ "STC_REG2_UNUSED6", \ -+ "STP_PCI_SLAVE_WRITE_WAITING" \ -+} -+ -+/* Count reg 3 */ -+#define STC_INPUT_PKTS_REJECTED (0 << 12) -+#define STP_DMA_WAIT_NETWORK_BUSY (1 << 12) -+#define STP_THREAD_WAIT_PACK (2 << 12) -+#define STP_UCODE_BLOCKED_UCODE (3 << 12) -+#define STC_TLB_HITS (4 << 12) -+#define STC_REG3_UNUSED5 (5 << 12) -+#define STC_PCI_MASTER_READS (6 << 12) -+#define STP_PCI_MASTER_WRITE_WAITING (7 << 12) -+ -+#define STATS_REG3_NAMES { \ -+ "STC_INPUT_PKTS_REJECTED", \ -+ "STP_DMA_WAIT_NETWORK_BUSY", \ -+ "STP_THREAD_WAIT_PACK", \ -+ "STP_UCODE_BLOCKED_UCODE", \ -+ "STC_TLB_HITS", \ -+ "STC_REG3_UNUSED5", \ -+ "STC_PCI_MASTER_READS", \ -+ "STP_PCI_MASTER_WRITE_WAITING"\ -+} -+ -+/* Count reg 4 */ -+#define STP_INPUT_DATA_TRANSMITTING (0 << 16) -+#define STC_DMA_NON_CTX0_PKTS (1 << 16) -+#define STP_THREAD_EOP_WAIT_ACK (2 << 16) -+#define STP_UCODE_DPROC_RUNNING (3 << 16) -+#define STC_TLB_MEM_WALKS (4 << 16) -+#define STC_REG4_UNUSED5 (5 << 16) -+#define STC_PCI_MASTER_WRITES (6 << 16) -+#define STP_PCI_MASTER_READ_WAITING (7 << 16) -+ -+#define STATS_REG4_NAMES { \ -+ "STP_INPUT_DATA_TRANSMITTING", \ -+ "STC_DMA_NON_CTX0_PKTS", \ -+ "STP_THREAD_EOP_WAIT_ACK", \ -+ "STP_UCODE_DPROC_RUNNING", \ -+ "STC_TLB_MEM_WALKS", \ -+ "STC_REG4_UNUSED5", \ -+ "STC_PCI_MASTER_WRITES", \ -+ "STP_PCI_MASTER_READ_WAITING" \ -+} -+ -+/* Count reg 5 */ -+#define STP_INPUT_WAITING_NETWORK_DATA (0 << 20) -+#define STC_DMA_NON_CTX0_PKTS_REJECTED (1 << 20) -+#define STP_THREAD_WAITING_DATA (2 << 20) -+#define STP_UCODE_CPROC_RUNNING (3 << 20) -+#define STP_THREAD_TRANSMITTING_DATA (4 << 20) -+#define STP_PCI_WAITING_MAIN (5 << 20) -+#define STC_REG5_UNUSED6 (6 << 20) -+#define STC_REG5_UNUSED7 (7 << 20) -+ -+#define STATS_REG5_NAMES { \ -+ "STP_INPUT_WAITING_NETWORK_DATA", \ -+ "STC_DMA_NON_CTX0_PKTS_REJECTED", \ -+ "STP_THREAD_WAITING_DATA", \ -+ "STP_UCODE_CPROC_RUNNING", \ -+ "STP_THREAD_TRANSMITTING_DATA", \ -+ "STP_PCI_WAITING_MAIN", \ -+ "STC_REG5_UNUSED6", \ -+ "STC_REG5_UNUSED7" \ -+} -+ -+/* Count reg 6 */ -+#define STP_INPUT_WAITING_MEMORY (0 << 24) -+#define STC_DMA_CTX0_PKTS (1 << 24) -+#define STP_THREAD_WAITING_MEMORY (2 << 24) -+#define STP_UCODE_TPROC_RUNNING (3 << 24) -+#define STC_CACHE_HITS (4 << 24) -+#define STP_PCI_WAITING_ELAN (5 << 24) -+#define STC_REG6_UNUSED4 (6 << 24) -+#define STC_REG6_UNUSED7 (7 << 24) -+ -+#define STATS_REG6_NAMES { \ -+ "STP_INPUT_WAITING_MEMORY", \ -+ "STC_DMA_CTX0_PKTS", \ -+ "STP_THREAD_WAITING_MEMORY", \ -+ "STP_UCODE_TPROC_RUNNING", \ -+ "STC_CACHE_HITS", \ -+ "STP_PCI_WAITING_ELAN", \ -+ "STC_REG6_UNUSED4", \ -+ "STC_REG6_UNUSED7" \ -+} -+ -+/* Count reg 7 */ -+#define STC_INPUT_CTX_FILTER_FILL (0 << 28) -+#define STC_DMA_CTX0_PKTS_REJECTED (1 << 28) -+#define STP_THREAD_WAIT_NETWORK_BUSY (2 << 28) -+#define STP_UCODE_IPROC_RUNNING (3 << 28) -+#define STP_TLB_MEM_WALKING (4 << 28) -+#define STC_CACHE_ALLOC_MISSES (5 << 28) -+#define STP_PCI_DATA_TRANSFER (6 << 28) -+#define STC_REG7_UNUSED7 (7 << 28) -+ -+#define STATS_REG7_NAMES { \ -+ "STC_INPUT_CTX_FILTER_FILL", \ -+ "STC_DMA_CTX0_PKTS_REJECTED", \ -+ "STP_THREAD_WAIT_NETWORK_BUSY",\ -+ "STP_UCODE_IPROC_RUNNING", \ -+ "STP_TLB_MEM_WALKING", \ -+ "STC_CACHE_ALLOC_MISSES", \ -+ "STP_PCI_DATA_TRANSFER", \ -+ "STC_REG7_UNUSED7" \ -+} -+ -+#define STATS_REG_NAMES { \ -+ STATS_REG0_NAMES, \ -+ STATS_REG1_NAMES, \ -+ STATS_REG2_NAMES, \ -+ STATS_REG3_NAMES, \ -+ STATS_REG4_NAMES, \ -+ STATS_REG5_NAMES, \ -+ STATS_REG6_NAMES, \ -+ STATS_REG7_NAMES, \ -+} -+ -+extern const char *elan3_stats_names[8][8]; -+ -+#define ELAN3_STATS_NAME(COUNT, CONTROL) (elan3_stats_names[(COUNT)][(CONTROL) & 7]) -+ -+typedef volatile union e3_StatsControl -+{ -+ E3_uint32 StatsControl; -+ struct -+ { -+#if defined(__LITTLE_ENDIAN__) -+ E3_uint32 StatCont0:4; -+ E3_uint32 StatCont1:4; -+ E3_uint32 StatCont2:4; -+ E3_uint32 StatCont3:4; -+ E3_uint32 StatCont4:4; -+ E3_uint32 StatCont5:4; -+ E3_uint32 StatCont6:4; -+ E3_uint32 StatCont7:4; -+#else -+ E3_uint32 StatCont7:4; -+ E3_uint32 StatCont6:4; -+ E3_uint32 StatCont5:4; -+ E3_uint32 StatCont4:4; -+ E3_uint32 StatCont3:4; -+ E3_uint32 StatCont2:4; -+ E3_uint32 StatCont1:4; -+ E3_uint32 StatCont0:4; -+#endif -+ } s; -+} E3_StatsControl; -+ -+typedef volatile union e3_StatsCount -+{ -+ E3_uint64 ClockStat; -+ struct -+ { -+ E3_uint32 ClockLSW; /* read only */ -+ E3_uint32 StatsCount; -+ } s; -+} E3_StatsCount; -+ -+typedef volatile union e3_clock -+{ -+ E3_uint64 NanoSecClock; -+ struct -+ { -+ E3_uint32 ClockLSW; -+ E3_uint32 ClockMSW; -+ } s; -+} E3_Clock; -+#define E3_TIME( X ) ((X).NanoSecClock) -+ -+typedef volatile struct _E3_User_Regs -+{ -+ E3_StatsCount StatCounts[8]; -+ E3_StatsCount InstCount; -+ E3_uint32 pad0; -+ E3_StatsControl StatCont; -+ E3_Clock Clock; -+ E3_uint32 pad1[0x7ea]; -+} E3_User_Regs; -+ -+typedef volatile struct _E3_CommandPort -+{ -+ E3_Addr PutDma; /* 0x000 */ -+ E3_uint32 Pad1; -+ E3_Addr GetDma; /* 0x008 */ -+ E3_uint32 Pad2; -+ E3_Addr RunThread; /* 0x010 */ -+ E3_uint32 Pad3[3]; -+ E3_Addr WaitEvent0; /* 0x020 */ -+ E3_uint32 Pad4; -+ E3_Addr WaitEvent1; /* 0x028 */ -+ E3_uint32 Pad5; -+ E3_Addr SetEvent; /* 0x030 */ -+ E3_uint32 Pad6[3]; -+ E3_uint32 Pad7[0x7f0]; /* Fill out to an 8K page */ -+} E3_CommandPort; -+/* Should have the new structures for the top four pages of the elan3 space */ -+ -+#define E3_COMMANDPORT_SIZE (sizeof (E3_CommandPort)) -+ -+#ifdef __cplusplus -+} -+#endif -+ -+#endif /* __ELAN3_ELANUREGS_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/include/elan3/elanvp.h linux-2.6.9/include/elan3/elanvp.h ---- clean/include/elan3/elanvp.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan3/elanvp.h 2004-06-18 05:28:06.000000000 -0400 -@@ -0,0 +1,165 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef _ELAN3_ELANVP_H -+#define _ELAN3_ELANVP_H -+ -+#ident "$Id: elanvp.h,v 1.45 2004/06/18 09:28:06 mike Exp $" -+/* $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/elanvp.h,v $ */ -+ -+#include -+#include -+#include -+ -+#ifdef __cplusplus -+extern "C" { -+#endif -+ -+/* -+ * Context number allocation. -+ * [0-31] system contexts -+ * [32-63] hardware test -+ * [64-1023] available -+ * [1024-2047] RMS allocatable -+ * [2048-4095] kernel comms data contexts -+ */ -+#define ELAN3_KCOMM_CONTEXT_NUM 0x001 /* old kernel comms context (system) */ -+#define ELAN3_CM_CONTEXT_NUM 0x002 /* new cluster member ship comms context (system) */ -+#define ELAN3_MRF_CONTEXT_NUM 0x003 /* multi-rail kernel comms context */ -+#define ELAN3_DMARING_BASE_CONTEXT_NUM 0x010 /* 16 contexts for dma ring issue (system) */ -+#define ELAN3_DMARING_TOP_CONTEXT_NUM 0x01f -+ -+#define ELAN3_HWTEST_BASE_CONTEXT_NUM 0x020 /* reserved for hardware test */ -+#define ELAN3_HWTEST_TOP_CONTEXT_NUM 0x03f -+ -+#define ELAN3_KCOMM_BASE_CONTEXT_NUM 0x800 /* kernel comms data transfer contexts */ -+#define ELAN3_KCOMM_TOP_CONTEXT_NUM 0xfff -+ -+#define ELAN3_HWTEST_CONTEXT(ctx) ((ctx) >= ELAN3_HWTEST_BASE_CONTEXT_NUM && \ -+ (ctx) <= ELAN3_HWTEST_TOP_CONTEXT_NUM) -+ -+#define ELAN3_SYSTEM_CONTEXT(ctx) (((ctx) & SYS_CONTEXT_BIT) != 0 || \ -+ (ctx) < E3_NUM_CONTEXT_0 || \ -+ (ctx) >= ELAN3_KCOMM_BASE_CONTEXT_NUM) -+ -+/* Maximum number of virtual processes */ -+#define ELAN3_MAX_VPS (16384) -+ -+#define ELAN3_INVALID_PROCESS (0x7fffffff) /* A GUARANTEED invalid process # */ -+#define ELAN3_INVALID_NODE (0xFFFF) -+#define ELAN3_INVALID_CONTEXT (0xFFFF) -+ -+ -+ -+#if defined(__KERNEL__) && !defined(__ELAN3__) -+ -+/* -+ * Contexts are accessible via Elan capabilities, -+ * for each context that can be "attached" to there -+ * is a ELAN3_CTXT_INFO structure created by its -+ * "owner". This also "remembers" all remote -+ * segments that have "blazed" a trail to it. -+ * -+ * If the "owner" goes away the soft info is -+ * destroyed when it is no longer "attached" or -+ * "referenced" by a remote segment. -+ * -+ * If the owner changes the capability, then -+ * the soft info must be not "referenced" or -+ * "attached" before a new process can "attach" -+ * to it. -+ */ -+ -+_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::InfoLock, -+ elan3_info::Next elan3_info::Prev elan3_info::Device elan3_info::Owner -+ elan3_info::Capability elan3_info::AttachedCapability elan3_info::Context)) -+_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::IntrLock, -+ elan3_info::Nacking elan3_info::Disabled)) -+_NOTE(DATA_READABLE_WITHOUT_LOCK(elan3_info::Context elan3_info::Device elan3_info::Capability)) -+ -+#endif /* __KERNEL__ */ -+ -+#define LOW_ROUTE_PRIORITY 0 -+#define HIGH_ROUTE_PRIORITY 1 -+ -+#define DEFAULT_ROUTE_TIMEOUT 3 -+#define DEFAULT_ROUTE_PRIORITY LOW_ROUTE_PRIORITY -+ -+ -+/* a small route is 4 flits (8 bytes), a big route */ -+/* is 8 flits (16 bytes) - each packed route is 4 bits */ -+/* so giving us a maximum of 28 as flit0 does not contain */ -+/* packed routes */ -+#define MAX_FLITS 8 -+#define MAX_PACKED 28 -+ -+/* bit definitions for 64 bit route pointer */ -+#define ROUTE_VALID (1ULL << 63) -+#define ROUTE_PTR (1ULL << 62) -+#define ROUTE_CTXT_SHIFT 48 -+#define ROUTE_PTR_MASK ((1ull << ROUTE_CTXT_SHIFT)-1) -+#define ROUTE_GET_CTXT ((VAL >> ROUTE_CTXT_SHIFT) & 0x3fff ) -+ -+#define SMALL_ROUTE(flits, context) (((E3_uint64) (flits)[0] << 0) | ((E3_uint64) (flits)[1] << 16) | \ -+ ((E3_uint64) (flits)[2] << 32) | ((E3_uint64) (context) << ROUTE_CTXT_SHIFT) | \ -+ ROUTE_VALID) -+ -+#define BIG_ROUTE_PTR(paddr, context) ((E3_uint64) (paddr) | ((E3_uint64) context << ROUTE_CTXT_SHIFT) | ROUTE_VALID | ROUTE_PTR) -+ -+#define BIG_ROUTE0(flits) (((E3_uint64) (flits)[0] << 0) | ((E3_uint64) (flits)[1] << 16) | \ -+ ((E3_uint64) (flits)[2] << 32) | ((E3_uint64) (flits)[3] << 48)) -+#define BIG_ROUTE1(flits) (((E3_uint64) (flits)[4] << 0) | ((E3_uint64) (flits)[5] << 16) | \ -+ ((E3_uint64) (flits)[6] << 32) | ((E3_uint64) (flits)[7] << 48)) -+ -+ -+/* defines for first flit of a route */ -+#define FIRST_HIGH_PRI (1 << 15) -+#define FIRST_AGE(Val) ((Val) << 11) -+#define FIRST_TIMEOUT(Val) ((Val) << 9) -+#define FIRST_PACKED(X) ((X) << 7) -+#define FIRST_ROUTE(Val) (Val) -+#define FIRST_ADAPTIVE (0x30) -+#define FIRST_BCAST_TREE (0x20) -+#define FIRST_MYLINK (0x10) -+#define FIRST_BCAST(Top, Bot) (0x40 | ((Top) << 3) | (Bot)) -+ -+/* defines for 3 bit packed entries for subsequent flits */ -+#define PACKED_ROUTE(Val) (8 | (Val)) -+#define PACKED_ADAPTIVE (3) -+#define PACKED_BCAST_TREE (2) -+#define PACKED_MYLINK (1) -+#define PACKED_BCAST0(Top,Bot) (4 | (Bot & 3)) -+#define PACKED_BCAST1(Top,Bot) ((Top << 1) | (Bot >> 2)) -+ -+/* ---------------------------------------------------------- -+ * elan3_route functions -+ * return ELAN3_ROUTE_xxx codes -+ * ---------------------------------------------------------- */ -+ -+#define ELAN3_ROUTE_SUCCESS (0x00) -+#define ELAN3_ROUTE_SYSCALL_FAILED (0x01) -+#define ELAN3_ROUTE_INVALID (0x02) -+#define ELAN3_ROUTE_TOO_LONG (0x04) -+#define ELAN3_ROUTE_LOAD_FAILED (0x08) -+#define ELAN3_ROUTE_PROC_RANGE (0x0f) -+#define ELAN3_ROUTE_INVALID_LEVEL (0x10) -+#define ELAN3_ROUTE_OCILATES (0x20) -+#define ELAN3_ROUTE_WRONG_DEST (0x40) -+#define ELAN3_ROUTE_TURN_LEVEL (0x80) -+#define ELAN3_ROUTE_NODEID_UNKNOWN (0xf0) -+ -+#ifdef __cplusplus -+} -+#endif -+ -+#endif /* _ELAN3_ELANVP_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/include/elan3/events.h linux-2.6.9/include/elan3/events.h ---- clean/include/elan3/events.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan3/events.h 2003-09-24 09:57:24.000000000 -0400 -@@ -0,0 +1,183 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef _ELAN3_EVENTS_H -+#define _ELAN3_EVENTS_H -+ -+#ident "$Id: events.h,v 1.45 2003/09/24 13:57:24 david Exp $" -+/* $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/events.h,v $*/ -+ -+/* -+ * Alignments for events, event queues and blockcopy blocks. -+ */ -+#define E3_EVENT_ALIGN (8) -+#define E3_QUEUE_ALIGN (32) -+#define E3_BLK_ALIGN (64) -+#define E3_BLK_SIZE (64) -+#define E3_BLK_PATTERN (0xfeedface) -+ -+#define E3_EVENT_FREE ((0 << 4) | EV_WCOPY) -+#define E3_EVENT_PENDING ((1 << 4) | EV_WCOPY) -+#define E3_EVENT_ACTIVE ((2 << 4) | EV_WCOPY) -+#define E3_EVENT_FIRED ((3 << 4) | EV_WCOPY) -+#define E3_EVENT_FAILED ((4 << 4) | EV_WCOPY) -+#define E3_EVENT_DONE ((5 << 4) | EV_WCOPY) -+#define E3_EVENT_PRIVATE ((6 << 4) | EV_WCOPY) -+ -+/* -+ * Event values and masks -+ * -+ * Block Copy event xxxxxxxxxxxxxxxx1 -+ * Chained event 30 bit ptr ....0x -+ * Event interrupt 29 bit cookie 01x -+ * Dma event 28 bit ptr 011x -+ * thread event 28 bit ptr 111x -+ */ -+#define EV_CLEAR (0x00000000) -+#define EV_TYPE_BCOPY (0x00000001) -+#define EV_TYPE_CHAIN (0x00000000) -+#define EV_TYPE_EVIRQ (0x00000002) -+#define EV_TYPE_DMA (0x00000006) -+#define EV_TYPE_THREAD (0x0000000e) -+ -+#define EV_TYPE_BCOPY_BYTE (0) -+#define EV_TYPE_BCOPY_HWORD (1) -+#define EV_TYPE_BCOPY_WORD (2) -+#define EV_TYPE_BCOPY_DWORD (3) -+ -+/* -+ * Data type is in the lowest two bits of the Dest pointer. -+ */ -+#define EV_BCOPY_DTYPE_MASK (3) -+#define EV_WCOPY (1) /* [DestWord] = Source */ -+#define EV_BCOPY (0) /* [DestBlock] = [SourceBlock] */ -+ -+#define EV_TYPE_MASK (0x0000000e) -+#define EV_TYPE_MASK_BCOPY (0x00000001) -+#define EV_TYPE_MASK_CHAIN (0x00000002) -+#define EV_TYPE_MASK_EVIRQ (0x00000006) -+#define EV_TYPE_MASK_DMA (0x0000000e) -+#define EV_TYPE_MASK_THREAD (0x0000000e) -+#define EV_TYPE_MASK2 (0x0000000f) -+ -+/* -+ * Min/Max size for Elan queue entries -+ */ -+#define E3_QUEUE_MIN E3_BLK_SIZE -+#define E3_QUEUE_MAX (E3_BLK_SIZE * 5) -+ -+/* -+ * Elan queue state bits -+ */ -+#define E3_QUEUE_FULL (1<<0) -+#define E3_QUEUE_LOCKED (1<<8) -+ -+#ifndef _ASM -+ -+typedef union _E3_Event -+{ -+ E3_uint64 ev_Int64; -+ struct { -+ volatile E3_int32 u_Count; -+ E3_uint32 u_Type; -+ } ev_u; -+} E3_Event; -+ -+typedef union _E3_BlockCopyEvent -+{ -+ E3_uint64 ev_ForceAlign; -+ struct E3_BlockCopyEvent_u { -+ volatile E3_int32 u_Count; -+ E3_uint32 u_Type; -+ E3_Addr u_Source; -+ E3_Addr u_Dest; /* lowest bits are the data type for endian conversion */ -+ } ev_u; -+} E3_BlockCopyEvent; -+ -+#define ev_Type ev_u.u_Type -+#define ev_Count ev_u.u_Count -+#define ev_Source ev_u.u_Source -+#define ev_Dest ev_u.u_Dest -+ -+typedef union _E3_WaitEvent0 -+{ -+ E3_uint64 we_ForceAlign; -+ struct { -+ E3_Addr u_EventLoc; -+ E3_int32 u_WaitCount; -+ } we_u; -+} E3_WaitEvent0; -+#define we_EventLoc we_u.u_EventLoc -+#define we_WaitCount we_u.u_WaitCount -+ -+typedef union _E3_Event_Blk -+{ -+ E3_uint8 eb_Bytes[E3_BLK_SIZE]; -+ E3_uint32 eb_Int32[E3_BLK_SIZE/sizeof (E3_uint32)]; -+ E3_uint64 eb_Int64[E3_BLK_SIZE/sizeof (E3_uint64)]; -+} E3_Event_Blk; -+ -+/* We make eb_done the last word of the blk -+ * so that we can guarantee the rest of the blk is -+ * correct when this value is set. -+ * However, when the TPORT code copies the envelope -+ * info into the blk, it uses a dword endian type. -+ * Thus we must correct for this when initialising -+ * the pattern in the Elan SDRAM blk (eeb_done) -+ */ -+#define eb_done eb_Int32[15] -+#define eeb_done eb_Int32[15^WordEndianFlip] -+ -+#define EVENT_WORD_READY(WORD) (*((volatile E3_uint32 *) WORD) != 0) -+#define EVENT_BLK_READY(BLK) (((volatile E3_Event_Blk *) (BLK))->eb_done != 0) -+#define EVENT_READY(EVENT) (((volatile E3_Event *) (EVENT))->ev_Count <= 0) -+ -+#define ELAN3_WAIT_EVENT (0) -+#define ELAN3_POLL_EVENT (-1) -+ -+#define SETUP_EVENT_TYPE(ptr,typeval) (((unsigned long)(ptr)) | (typeval)) -+ -+#define E3_RESET_BCOPY_BLOCK(BLK) \ -+ do { \ -+ (BLK)->eb_done = 0; \ -+ } while (0) -+ -+typedef struct e3_queue -+{ -+ volatile E3_uint32 q_state; /* queue is full=bit0, queue is locked=bit8 */ -+ volatile E3_Addr q_bptr; /* block aligned ptr to current back item */ -+ E3_uint32 q_size; /* size of queue item; 0x1 <= size <= (0x40 * 5) */ -+ E3_Addr q_top; /* block aligned ptr to last queue item */ -+ E3_Addr q_base; /* block aligned ptr to first queue item */ -+ volatile E3_Addr q_fptr; /* block aligned ptr to current front item */ -+ E3_Event q_event; /* queue event */ -+} E3_Queue; -+ -+typedef struct e3_blockcopy_queue -+{ -+ volatile E3_uint32 q_state; /* queue is full=bit0, queue is locked=bit8 */ -+ volatile E3_Addr q_bptr; /* block aligned ptr to current back item */ -+ E3_uint32 q_size; /* size of queue item; 0x1 <= size <= (0x40 * 5) */ -+ E3_Addr q_top; /* block aligned ptr to last queue item */ -+ E3_Addr q_base; /* block aligned ptr to first queue item */ -+ volatile E3_Addr q_fptr; /* block aligned ptr to current front item */ -+ E3_BlockCopyEvent q_event; /* queue event */ -+ E3_uint32 q_pad[6]; -+} E3_BlockCopyQueue; -+ -+#define E3_QUEUE_EVENT_OFFSET 24 -+#define QUEUE_FULL(Q) ((Q)->q_state & E3_QUEUE_FULL) -+ -+#endif /* ! _ASM */ -+ -+#endif /* _ELAN3_EVENTS_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/include/elan3/intrinsics.h linux-2.6.9/include/elan3/intrinsics.h ---- clean/include/elan3/intrinsics.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan3/intrinsics.h 2003-09-24 09:57:24.000000000 -0400 -@@ -0,0 +1,320 @@ -+/* -+ * Copyright (c) 2003 by Quadrics Limited. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef _ELAN3_INTRINSICS_H -+#define _ELAN3_INTRINSICS_H -+ -+#ident "$Id: intrinsics.h,v 1.35 2003/09/24 13:57:24 david Exp $" -+/* $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/intrinsics.h,v $ */ -+ -+#include -+#include -+ -+/* -+ * This file contains definitions of the macros for accessing the QSW -+ * specific instructions, as if they were functions. -+ * The results from the function -+ */ -+ -+#define C_ACK_OK 0 /* return from c_close() */ -+#define C_ACK_TESTFAIL 1 /* return from c_close() */ -+#define C_ACK_DISCARD 2 /* return from c_close() */ -+#define C_ACK_ERROR 3 /* return from c_close() */ -+ -+/* -+ * Elan asi's for tproc block accesses -+ */ -+#define EASI_BYTE 0 -+#define EASI_HALF 1 -+#define EASI_WORD 2 -+#define EASI_DOUBLE 3 -+ -+#if defined(__ELAN3__) && !defined (_ASM) -+ -+extern inline void c_abort(void) -+{ -+ asm volatile (".word 0x0000 ! die you thread you " : : ); -+} -+ -+extern inline void c_suspend(void) -+{ -+ asm volatile ( -+ "set 1f, %%i7 ! RevB bug fix. get address of the wakeup inst\n" -+ "andcc %%i7,0x4,%%g0 ! RevB bug fix. check alignment\n" -+ "bne 1f ! RevB bug fix. jump to other alignment\n" -+ "nop ! RevB bug fix. delay slot\n" -+ "ldd [%%i7],%%i6 ! RevB bug fix. data fetch of instructions\n" -+ "suspend ! do the real suspend\n" -+ "1: add %%i7,5*4,%%i7 ! RevB bug fix. Point i7 to first ldblock\n" -+ "ldd [%%i7],%%i6 ! RevB bug fix. data fetch of instructions\n" -+ "suspend ! do the real suspend\n" : : ); -+} -+ -+extern inline int c_close(void) -+{ -+ register int rc asm("o0"); -+ -+ asm volatile ("close %0" : "=r" (rc) : ); -+ -+ return (rc); -+} -+ -+extern inline int c_close_cookie(volatile E3_uint32 *cookiep, E3_uint32 next) -+{ -+ register int rc asm("o0"); -+ -+ asm volatile ("close %0 ! close the packet\n" -+ "bz,a 1f ! ack received\n" -+ "st %1, [%2] ! update cookie on ack\n" -+ "1: ! label for not-ack\n" -+ : "=r" (rc) : "r" (next), "r" (cookiep)); -+ -+ return (rc); -+} -+ -+extern inline void c_break_busywait(void) -+{ -+ asm volatile ( -+ "breaktest ! test to see if break necessary\n" -+ "bpos 1f ! no other thread ready\n" -+ "nop ! delay slot\n" -+ "sub %%sp,3*8*4,%%sp ! Space to save the registers\n" -+ "stblock %%g0,[%%sp+0] ! save the globals\n" -+ "stblock %%i0,[%%sp+8*4] ! save the ins\n" -+ "stblock %%l0,[%%sp+16*4] ! save the locals\n" -+ "set 2f, %%i7 ! RevB bug fix. get address of the wakeup inst\n" -+ "andcc %%i7,0x4,%%g0 ! RevB bug fix. check alignment\n" -+ "bne 3f ! RevB bug fix. jump to other alignment\n" -+ "nop ! RevB bug fix. delay slot\n" -+ "ldd [%%i7],%%i6 ! RevB bug fix. data fetch of instructions\n" -+ "break ! do the real break\n" -+ "2: b 4f ! RevB bug fix. Branch over other alignment case\n" -+ " ldblock [%%sp+16*4],%%l0 ! RevB bug fix. restore locals in delay slot\n" -+ "3: add %%i7,5*4,%%i7 ! RevB bug fix. Point i7 to first ldblock\n" -+ "ldd [%%i7],%%i6 ! RevB bug fix. data fetch of instructions\n" -+ "break ! do the real break\n" -+ "ldblock [%%sp+16*4],%%l0 ! restore locals\n" -+ "4: ldblock [%%sp+8*4], %%i0 ! restore ins\n" -+ "ldblock [%%sp+0],%%g0 ! restore globals\n" -+ "add %%sp,3*8*4,%%sp ! restore stack pointer\n" -+ "1: " : : ); -+} -+ -+extern inline void c_break(void) -+{ -+ asm volatile ( -+ "breaktest ! test to see if break necessary\n" -+ "bne 1f ! haven't exceeded our inst count yet\n" -+ "nop ! delay slot\n" -+ "sub %%sp,3*8*4,%%sp ! Space to save the registers\n" -+ "stblock %%g0,[%%sp+0] ! save the globals\n" -+ "stblock %%i0,[%%sp+8*4] ! save the ins\n" -+ "stblock %%l0,[%%sp+16*4] ! save the locals\n" -+ "set 2f, %%i7 ! RevB bug fix. get address of the wakeup inst\n" -+ "andcc %%i7,0x4,%%g0 ! RevB bug fix. check alignment\n" -+ "bne 3f ! RevB bug fix. jump to other alignment\n" -+ "nop ! RevB bug fix. delay slot\n" -+ "ldd [%%i7],%%i6 ! RevB bug fix. data fetch of instructions\n" -+ "break ! do the real break\n" -+ "2: b 4f ! RevB bug fix. Branch over other alignment case\n" -+ " ldblock [%%sp+16*4],%%l0 ! RevB bug fix. restore locals in delay slot\n" -+ "3: add %%i7,5*4,%%i7 ! RevB bug fix. Point i7 to first ldblock\n" -+ "ldd [%%i7],%%i6 ! RevB bug fix. data fetch of instructions\n" -+ "break ! do the real break\n" -+ "ldblock [%%sp+16*4],%%l0 ! restore locals\n" -+ "4: ldblock [%%sp+8*4], %%i0 ! restore ins\n" -+ "ldblock [%%sp+0],%%g0 ! restore globals\n" -+ "add %%sp,3*8*4,%%sp ! restore stack pointer\n" -+ "1: " : : ); -+} -+ -+extern inline void c_open( const int arg ) -+{ -+ asm volatile ("open %0" : : "r" (arg) ); -+ asm volatile ("nop; nop; nop; nop"); -+ asm volatile ("nop; nop; nop; nop"); -+ asm volatile ("nop; nop; nop; nop"); -+ asm volatile ("nop; nop; nop; nop"); -+ asm volatile ("nop; nop; nop; nop"); -+ asm volatile ("nop; nop; nop; nop"); -+} -+ -+extern inline void c_waitevent( volatile E3_Event *const ptr, -+ const int count) -+{ -+ register volatile E3_Event *a_unlikely asm("o0") = ptr; -+ register int a_very_unlikely asm("o1") = count; -+ -+ asm volatile ( -+ "sub %%sp,1*8*4,%%sp ! Space to save the registers\n" -+ "stblock %%i0,[%%sp+0] ! save the ins\n" -+ "set 2f, %%i7 ! RevB bug fix. get address of the wakeup inst\n" -+ "andcc %%i7,0x4,%%g0 ! RevB bug fix. check alignment\n" -+ "bne 3f ! RevB bug fix. jump to other alignment\n" -+ "nop ! RevB bug fix. delay slot\n" -+ "ldd [%%i7],%%i4 ! RevB bug fix. data fetch of instructions\n" -+ "waitevent ! do the business\n" -+ "2: b 4f ! RevB bug fix. Branch over other alignment case\n" -+ " ldblock [%%sp+0],%%i0 ! RevB bug fix. restore ins in delay slot\n" -+ "3: add %%i7,5*4,%%i7 ! RevB bug fix. Point i7 to first ldblock\n" -+ "ldd [%%i7],%%i4 ! RevB bug fix. data fetch of instructions\n" -+ "waitevent ! do the business\n" -+ "ldblock [%%sp+0],%%i0 ! restore ins\n" -+ "4: add %%sp,1*8*4,%%sp ! restore stack pointer\n" -+ : /* no outputs */ -+ : /* inputs */ "r" (a_unlikely), "r" (a_very_unlikely) -+ : /* clobbered */ "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7", -+ "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7" ); -+ -+} -+ -+#define c_sendtrans0(type,dest) \ -+ asm volatile ("sendtrans %0, %%g0, %1" : : "i" (type), "r" (dest)) -+ -+#define c_sendtrans1(type,dest,arg) \ -+ asm volatile ("sendtrans %0, %2, %1" : : "i" (type), "r" (dest), "r" (arg)) -+ -+#define c_sendtrans2(type,dest,arg1,arg2) \ -+ do { \ -+ register const unsigned long a_unlikely_1 asm("o4") = arg1; \ -+ register const unsigned long a_unlikely_2 asm("o5") = arg2; \ -+ asm volatile ("sendtrans %0, %2, %1" \ -+ : : "i" (type), "r" (dest), "r" (a_unlikely_1), "r" (a_unlikely_2)); \ -+ } while(0) -+ -+#define c_sendmem(type,dest,ptr) \ -+ asm volatile ("sendtrans %0, [%2], %1" : : "i" (type), "r" (dest), "r" (ptr)) -+ -+/* Copy a single 64-byte block (src blk is read using a BYTE endian type) */ -+extern inline void elan3_copy64b(void *src, void *dst) -+{ -+ /* Copy 64 bytes using ldblock/stblock -+ * We save and restore the locals/ins because if we don't gcc -+ * really makes a bad job of optimisising the rest of the thread code! -+ * -+ * We force the parameters in g5, g6 so that they aren't -+ * trashed by the loadblk32 into the locals/ins -+ */ -+ register void *tmp1 asm("g5") = src; -+ register void *tmp2 asm("g6") = dst; -+ -+ asm volatile ( -+ "and %%sp,63,%%g7 ! Calculate stack alignment\n" -+ "sub %%sp,2*8*4,%%sp ! Space to save the registers\n" -+ "sub %%sp,%%g7,%%sp ! align stack\n" -+ "stblock64 %%l0,[%%sp] ! save the locals and ins\n" -+ "ldblock64a [%0]%2,%%l0 ! load 64-byte block into locals/ins\n" -+ "stblock64a %%l0,[%1]%2 ! store 64-byte block from local/ins\n" -+ "ldblock64 [%%sp],%%l0 ! restore locals and ins\n" -+ "add %%sp,%%g7, %%sp ! undo alignment\n" -+ "add %%sp,2*8*4,%%sp ! restore stack pointer\n" -+ : /* outputs */ -+ : /* inputs */ "r" (tmp1), "r" (tmp2), "n" (EASI_BYTE) -+ : /* clobbered */ "g5", "g6", "g7" ); -+} -+ -+/* Copy a single 64-byte block (src blk is read using a WORD endian type) */ -+extern inline void elan3_copy64w(void *src, void *dst) -+{ -+ /* Copy 64 bytes using ldblock/stblock -+ * We save and restore the locals/ins because if we don't gcc -+ * really makes a bad job of optimisising the rest of the thread code! -+ * -+ * We force the parameters in g5, g6 so that they aren't -+ * trashed by the loadblk32 into the locals/ins -+ */ -+ register void *tmp1 asm("g5") = src; -+ register void *tmp2 asm("g6") = dst; -+ -+ asm volatile ( -+ "and %%sp,63,%%g7 ! Calculate stack alignment\n" -+ "sub %%sp,2*8*4,%%sp ! Space to save the registers\n" -+ "sub %%sp,%%g7,%%sp ! align stack\n" -+ "stblock64 %%l0,[%%sp] ! save the locals and ins\n" -+ "ldblock64a [%0]%2,%%l0 ! load 64-byte block into locals/ins\n" -+ "stblock64a %%l0,[%1]%2 ! store 64-byte block from local/ins\n" -+ "ldblock64 [%%sp],%%l0 ! restore locals and ins\n" -+ "add %%sp,%%g7, %%sp ! undo alignment\n" -+ "add %%sp,2*8*4,%%sp ! restore stack pointer\n" -+ : /* outputs */ -+ : /* inputs */ "r" (tmp1), "r" (tmp2), "n" (EASI_WORD) -+ : /* clobbered */ "g5", "g6", "g7" ); -+} -+ -+/* Read a 64-bit value with a WORD (32-bit) endian type */ -+extern inline E3_uint64 elan3_read64w( volatile E3_uint64 *const ptr ) -+{ -+ E3_uint64 result; -+ -+ asm volatile ( -+ "ldblock8a [%1]%2, %0\n" -+ : /* outputs */ "=r" (result) -+ : /* inputs */ "r" (ptr), "n" (EASI_WORD) ); -+ -+ return( result ); -+} -+ -+/* Read a 64-bit value with a DOUBLEWORD (64-bit) endian type */ -+extern inline E3_uint64 elan3_read64dw( volatile E3_uint64 *const ptr ) -+{ -+ E3_uint64 result; -+ -+ asm volatile ( -+ "ldblock8a [%1]%2, %0\n" -+ : /* outputs */ "=r" (result) -+ : /* inputs */ "r" (ptr), "n" (EASI_DOUBLE) ); -+ -+ return( result ); -+} -+ -+/* Write a 32-bit value with a WORD (32-bit) endian type */ -+extern inline void elan3_write64w( volatile E3_uint64 *const ptr, E3_uint64 value ) -+{ -+ asm volatile ( -+ "stblock8a %1, [%0]%2\n" -+ : /* no outputs */ -+ : /* inputs */ "r" (ptr), "r" (value), "n" (EASI_WORD) ); -+} -+ -+/* Write a 64-bit value with a DOUBLEWORD (64-bit) endian type */ -+extern inline void elan3_write64dw( volatile E3_uint64 *const ptr, E3_uint64 value ) -+{ -+ asm volatile ( -+ "stblock8a %1, [%0]%2\n" -+ : /* no outputs */ -+ : /* inputs */ "r" (ptr), "r" (value), "n" (EASI_DOUBLE) ); -+} -+ -+extern inline E3_uint32 c_swap(volatile E3_uint32 *source, E3_uint32 result) -+{ -+ asm volatile("swap [%1],%0\n" -+ : "=r" (result) -+ : "r" (source) ,"0" (result) -+ : "memory"); -+ return result; -+} -+ -+extern inline E3_uint32 c_swap_save(volatile E3_uint32 *source, const E3_uint32 result) -+{ -+ register E3_uint32 a_unlikely; -+ asm volatile("" : "=r" (a_unlikely) : ); -+ -+ asm volatile("mov %2,%0; swap [%1],%0\n" -+ : "=r" (a_unlikely) -+ : "r" (source) ,"r" (result), "0" (a_unlikely) -+ : "memory"); -+ return a_unlikely; -+} -+#endif /* (__ELAN3__) && !(_ASM) */ -+ -+#endif /* _ELAN3_INTRINSICS_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/include/elan3/minames.h linux-2.6.9/include/elan3/minames.h ---- clean/include/elan3/minames.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan3/minames.h 2005-09-07 10:39:37.000000000 -0400 -@@ -0,0 +1,256 @@ -+{MI_WaitForRemoteDescRead, "MI_WaitForRemoteDescRead"}, -+{MI_WaitForRemoteDescRead2, "MI_WaitForRemoteDescRead2"}, -+{MI_WaitForRemoteDescRead2_seq1, "MI_WaitForRemoteDescRead2_seq1"}, -+{MI_SendRemoteDmaRoutes, "MI_SendRemoteDmaRoutes"}, -+{MI_IProcTrapped, "MI_IProcTrapped"}, -+{MI_DProcTrapped, "MI_DProcTrapped"}, -+{MI_CProcTrapped, "MI_CProcTrapped"}, -+{MI_TProcTrapped, "MI_TProcTrapped"}, -+{MI_TestWhichDmaQueue, "MI_TestWhichDmaQueue"}, -+{MI_TestWhichDmaQueue_seq1, "MI_TestWhichDmaQueue_seq1"}, -+{MI_InputRemoteDmaUpdateBPtr, "MI_InputRemoteDmaUpdateBPtr"}, -+{MI_FixupQueueContextAndRemoteBit, "MI_FixupQueueContextAndRemoteBit"}, -+{MI_FixupQueueContextAndRemoteBit_seq1, "MI_FixupQueueContextAndRemoteBit_seq1"}, -+{MI_FixupQueueContextAndRemoteBit_seq2, "MI_FixupQueueContextAndRemoteBit_seq2"}, -+{MI_FixupQueueContextAndRemoteBit_seq3, "MI_FixupQueueContextAndRemoteBit_seq3"}, -+{MI_FixupQueueContextAndRemoteBit_seq4, "MI_FixupQueueContextAndRemoteBit_seq4"}, -+{MI_RunDmaCommand, "MI_RunDmaCommand"}, -+{MI_DoSendRemoteDmaDesc, "MI_DoSendRemoteDmaDesc"}, -+{MI_DequeueNonSysCntxDma, "MI_DequeueNonSysCntxDma"}, -+{MI_WaitForRemoteDescRead1, "MI_WaitForRemoteDescRead1"}, -+{MI_RemoteDmaCommand, "MI_RemoteDmaCommand"}, -+{MI_WaitForRemoteRoutes, "MI_WaitForRemoteRoutes"}, -+{MI_DequeueSysCntxDma, "MI_DequeueSysCntxDma"}, -+{MI_ExecuteDmaDescriptorForQueue, "MI_ExecuteDmaDescriptorForQueue"}, -+{MI_ExecuteDmaDescriptor1, "MI_ExecuteDmaDescriptor1"}, -+{MI_ExecuteDmaDescriptor1_seq1, "MI_ExecuteDmaDescriptor1_seq1"}, -+{MI_ExecuteDmaDescriptor1_seq2, "MI_ExecuteDmaDescriptor1_seq2"}, -+{MI_ExecuteDmaDescriptor1_seq3, "MI_ExecuteDmaDescriptor1_seq3"}, -+{MI_GetNewSizeInProg, "MI_GetNewSizeInProg"}, -+{MI_GetNewSizeInProg_seq1, "MI_GetNewSizeInProg_seq1"}, -+{MI_FirstBlockRead, "MI_FirstBlockRead"}, -+{MI_ExtraFirstBlockRead, "MI_ExtraFirstBlockRead"}, -+{MI_UnimplementedError, "MI_UnimplementedError"}, -+{MI_UpdateDescriptor, "MI_UpdateDescriptor"}, -+{MI_UpdateDescriptor_seq1, "MI_UpdateDescriptor_seq1"}, -+{MI_UpdateDescriptor_seq2, "MI_UpdateDescriptor_seq2"}, -+{MI_UpdateDescriptor_seq3, "MI_UpdateDescriptor_seq3"}, -+{MI_UpdateDescriptor_seq4, "MI_UpdateDescriptor_seq4"}, -+{MI_UpdateDescriptor_seq5, "MI_UpdateDescriptor_seq5"}, -+{MI_GetNextSizeInProg, "MI_GetNextSizeInProg"}, -+{MI_DoStopThisDma, "MI_DoStopThisDma"}, -+{MI_DoStopThisDma_seq1, "MI_DoStopThisDma_seq1"}, -+{MI_GenNewBytesToRead, "MI_GenNewBytesToRead"}, -+{MI_WaitForEventReadTy1, "MI_WaitForEventReadTy1"}, -+{MI_WaitUpdateEvent, "MI_WaitUpdateEvent"}, -+{MI_WaitUpdateEvent_seq1, "MI_WaitUpdateEvent_seq1"}, -+{MI_DoSleepOneTickThenRunable, "MI_DoSleepOneTickThenRunable"}, -+{MI_RunEvent, "MI_RunEvent"}, -+{MI_EnqueueThread, "MI_EnqueueThread"}, -+{MI_CheckContext0, "MI_CheckContext0"}, -+{MI_EnqueueDma, "MI_EnqueueDma"}, -+{MI_CprocTrapping, "MI_CprocTrapping"}, -+{MI_CprocTrapping_seq1, "MI_CprocTrapping_seq1"}, -+{MI_WaitForRemoteRoutes1, "MI_WaitForRemoteRoutes1"}, -+{MI_SetEventCommand, "MI_SetEventCommand"}, -+{MI_DoSetEvent, "MI_DoSetEvent"}, -+{MI_DoRemoteSetEventNowOrTrapQueueingDma, "MI_DoRemoteSetEventNowOrTrapQueueingDma"}, -+{MI_DoRemoteSetEventNowOrTrapQueueingDma_seq1, "MI_DoRemoteSetEventNowOrTrapQueueingDma_seq1"}, -+{MI_SendRemoteDmaRoutes2, "MI_SendRemoteDmaRoutes2"}, -+{MI_WaitForRemoteRoutes2, "MI_WaitForRemoteRoutes2"}, -+{MI_WaitEventCommandTy0, "MI_WaitEventCommandTy0"}, -+{MI_DequeueNonSysCntxDma2, "MI_DequeueNonSysCntxDma2"}, -+{MI_WaitEventCommandTy1, "MI_WaitEventCommandTy1"}, -+{MI_WaitEventCommandTy1_seq1, "MI_WaitEventCommandTy1_seq1"}, -+{MI_DequeueNonSysCntxThread, "MI_DequeueNonSysCntxThread"}, -+{MI_DequeueSysCntxDma1, "MI_DequeueSysCntxDma1"}, -+{MI_DequeueSysCntxThread, "MI_DequeueSysCntxThread"}, -+{MI_TestNonSysCntxDmaQueueEmpty, "MI_TestNonSysCntxDmaQueueEmpty"}, -+{MI_TestNonSysCntxDmaQueueEmpty_seq1, "MI_TestNonSysCntxDmaQueueEmpty_seq1"}, -+{MI_TestNonSysCntxDmaQueueEmpty_seq2, "MI_TestNonSysCntxDmaQueueEmpty_seq2"}, -+{MI_RunThreadCommand, "MI_RunThreadCommand"}, -+{MI_SetEventWaitForLastAcess, "MI_SetEventWaitForLastAcess"}, -+{MI_SetEventReadWait, "MI_SetEventReadWait"}, -+{MI_SetEventReadWait_seq1, "MI_SetEventReadWait_seq1"}, -+{MI_TestEventType, "MI_TestEventType"}, -+{MI_TestEventType_seq1, "MI_TestEventType_seq1"}, -+{MI_TestEventBit2, "MI_TestEventBit2"}, -+{MI_DmaDescOrBlockCopyOrChainedEvent, "MI_DmaDescOrBlockCopyOrChainedEvent"}, -+{MI_RunThread, "MI_RunThread"}, -+{MI_RunThread1, "MI_RunThread1"}, -+{MI_RunThread1_seq1, "MI_RunThread1_seq1"}, -+{MI_IncDmaSysCntxBPtr, "MI_IncDmaSysCntxBPtr"}, -+{MI_IncDmaSysCntxBPtr_seq1, "MI_IncDmaSysCntxBPtr_seq1"}, -+{MI_IncDmaSysCntxBPtr_seq2, "MI_IncDmaSysCntxBPtr_seq2"}, -+{MI_WaitForCntxDmaDescRead, "MI_WaitForCntxDmaDescRead"}, -+{MI_FillInContext, "MI_FillInContext"}, -+{MI_FillInContext_seq1, "MI_FillInContext_seq1"}, -+{MI_WriteNewDescToQueue, "MI_WriteNewDescToQueue"}, -+{MI_WriteNewDescToQueue_seq1, "MI_WriteNewDescToQueue_seq1"}, -+{MI_TestForQueueWrap, "MI_TestForQueueWrap"}, -+{MI_TestForQueueWrap_seq1, "MI_TestForQueueWrap_seq1"}, -+{MI_TestQueueIsFull, "MI_TestQueueIsFull"}, -+{MI_TestQueueIsFull_seq1, "MI_TestQueueIsFull_seq1"}, -+{MI_TestQueueIsFull_seq2, "MI_TestQueueIsFull_seq2"}, -+{MI_CheckPsychoShitFixup, "MI_CheckPsychoShitFixup"}, -+{MI_PsychoShitFixupForcedRead, "MI_PsychoShitFixupForcedRead"}, -+{MI_PrepareDMATimeSlice, "MI_PrepareDMATimeSlice"}, -+{MI_PrepareDMATimeSlice_seq1, "MI_PrepareDMATimeSlice_seq1"}, -+{MI_TProcRestartFromTrapOrTestEventBit2, "MI_TProcRestartFromTrapOrTestEventBit2"}, -+{MI_TProcRestartFromTrapOrTestEventBit2_seq1, "MI_TProcRestartFromTrapOrTestEventBit2_seq1"}, -+{MI_WaitForGlobalsRead, "MI_WaitForGlobalsRead"}, -+{MI_WaitForNPCRead, "MI_WaitForNPCRead"}, -+{MI_EventInterrupt, "MI_EventInterrupt"}, -+{MI_EventInterrupt_seq1, "MI_EventInterrupt_seq1"}, -+{MI_EventInterrupt_seq2, "MI_EventInterrupt_seq2"}, -+{MI_EventInterrupt_seq3, "MI_EventInterrupt_seq3"}, -+{MI_TestSysCntxDmaQueueEmpty, "MI_TestSysCntxDmaQueueEmpty"}, -+{MI_TestSysCntxDmaQueueEmpty_seq1, "MI_TestSysCntxDmaQueueEmpty_seq1"}, -+{MI_TestIfRemoteDesc, "MI_TestIfRemoteDesc"}, -+{MI_DoDmaLocalSetEvent, "MI_DoDmaLocalSetEvent"}, -+{MI_DoDmaLocalSetEvent_seq1, "MI_DoDmaLocalSetEvent_seq1"}, -+{MI_DoDmaLocalSetEvent_seq2, "MI_DoDmaLocalSetEvent_seq2"}, -+{MI_DmaLoop1, "MI_DmaLoop1"}, -+{MI_ExitDmaLoop, "MI_ExitDmaLoop"}, -+{MI_ExitDmaLoop_seq1, "MI_ExitDmaLoop_seq1"}, -+{MI_RemoteDmaTestPAckType, "MI_RemoteDmaTestPAckType"}, -+{MI_PacketDiscardOrTestFailRecIfCCis0, "MI_PacketDiscardOrTestFailRecIfCCis0"}, -+{MI_PacketDiscardOrTestFailRecIfCCis0_seq1, "MI_PacketDiscardOrTestFailRecIfCCis0_seq1"}, -+{MI_TestNackFailIsZero2, "MI_TestNackFailIsZero2"}, -+{MI_TestNackFailIsZero3, "MI_TestNackFailIsZero3"}, -+{MI_DmaFailCountError, "MI_DmaFailCountError"}, -+{MI_TestDmaForSysCntx, "MI_TestDmaForSysCntx"}, -+{MI_TestDmaForSysCntx_seq1, "MI_TestDmaForSysCntx_seq1"}, -+{MI_TestDmaForSysCntx_seq2, "MI_TestDmaForSysCntx_seq2"}, -+{MI_TestAeqB2, "MI_TestAeqB2"}, -+{MI_TestAeqB2_seq1, "MI_TestAeqB2_seq1"}, -+{MI_GetNextDmaDescriptor, "MI_GetNextDmaDescriptor"}, -+{MI_DequeueSysCntxDma2, "MI_DequeueSysCntxDma2"}, -+{MI_InputSetEvent, "MI_InputSetEvent"}, -+{MI_PutBackSysCntxDma, "MI_PutBackSysCntxDma"}, -+{MI_PutBackSysCntxDma_seq1, "MI_PutBackSysCntxDma_seq1"}, -+{MI_PutBackSysCntxDma_seq2, "MI_PutBackSysCntxDma_seq2"}, -+{MI_InputRemoteDma, "MI_InputRemoteDma"}, -+{MI_InputRemoteDma_seq1, "MI_InputRemoteDma_seq1"}, -+{MI_WaitOneTickForWakeup1, "MI_WaitOneTickForWakeup1"}, -+{MI_SendRemoteDmaDesc, "MI_SendRemoteDmaDesc"}, -+{MI_InputLockQueue, "MI_InputLockQueue"}, -+{MI_CloseTheTrappedPacketIfCCis1, "MI_CloseTheTrappedPacketIfCCis1"}, -+{MI_CloseTheTrappedPacketIfCCis1_seq1, "MI_CloseTheTrappedPacketIfCCis1_seq1"}, -+{MI_PostDmaInterrupt, "MI_PostDmaInterrupt"}, -+{MI_InputUnLockQueue, "MI_InputUnLockQueue"}, -+{MI_WaitForUnLockDescRead, "MI_WaitForUnLockDescRead"}, -+{MI_SendEOPforRemoteDma, "MI_SendEOPforRemoteDma"}, -+{MI_LookAtRemoteAck, "MI_LookAtRemoteAck"}, -+{MI_InputWriteBlockQueue, "MI_InputWriteBlockQueue"}, -+{MI_WaitForSpStore, "MI_WaitForSpStore"}, -+{MI_TProcNext, "MI_TProcNext"}, -+{MI_TProcStoppedRunning, "MI_TProcStoppedRunning"}, -+{MI_InputWriteBlock, "MI_InputWriteBlock"}, -+{MI_RunDmaOrDeqNonSysCntxDma, "MI_RunDmaOrDeqNonSysCntxDma"}, -+{MI_ExecuteDmaDescriptorForRun, "MI_ExecuteDmaDescriptorForRun"}, -+{MI_ConfirmQueueLock, "MI_ConfirmQueueLock"}, -+{MI_DmaInputIdentify, "MI_DmaInputIdentify"}, -+{MI_TProcStoppedRunning2, "MI_TProcStoppedRunning2"}, -+{MI_TProcStoppedRunning2_seq1, "MI_TProcStoppedRunning2_seq1"}, -+{MI_TProcStoppedRunning2_seq2, "MI_TProcStoppedRunning2_seq2"}, -+{MI_ThreadInputIdentify, "MI_ThreadInputIdentify"}, -+{MI_InputIdWriteAddrAndType3, "MI_InputIdWriteAddrAndType3"}, -+{MI_IProcTrappedWriteStatus, "MI_IProcTrappedWriteStatus"}, -+{MI_FinishTrappingEop, "MI_FinishTrappingEop"}, -+{MI_InputTestTrans, "MI_InputTestTrans"}, -+{MI_TestAeqB3, "MI_TestAeqB3"}, -+{MI_ThreadUpdateNonSysCntxBack, "MI_ThreadUpdateNonSysCntxBack"}, -+{MI_ThreadQueueOverflow, "MI_ThreadQueueOverflow"}, -+{MI_RunContext0Thread, "MI_RunContext0Thread"}, -+{MI_RunContext0Thread_seq1, "MI_RunContext0Thread_seq1"}, -+{MI_RunContext0Thread_seq2, "MI_RunContext0Thread_seq2"}, -+{MI_RunDmaDesc, "MI_RunDmaDesc"}, -+{MI_RunDmaDesc_seq1, "MI_RunDmaDesc_seq1"}, -+{MI_RunDmaDesc_seq2, "MI_RunDmaDesc_seq2"}, -+{MI_TestAeqB, "MI_TestAeqB"}, -+{MI_WaitForNonCntxDmaDescRead, "MI_WaitForNonCntxDmaDescRead"}, -+{MI_DmaQueueOverflow, "MI_DmaQueueOverflow"}, -+{MI_BlockCopyEvent, "MI_BlockCopyEvent"}, -+{MI_BlockCopyEventReadBlock, "MI_BlockCopyEventReadBlock"}, -+{MI_BlockCopyWaitForReadData, "MI_BlockCopyWaitForReadData"}, -+{MI_InputWriteWord, "MI_InputWriteWord"}, -+{MI_TraceSetEvents, "MI_TraceSetEvents"}, -+{MI_TraceSetEvents_seq1, "MI_TraceSetEvents_seq1"}, -+{MI_TraceSetEvents_seq2, "MI_TraceSetEvents_seq2"}, -+{MI_InputWriteDoubleWd, "MI_InputWriteDoubleWd"}, -+{MI_SendLockTransIfCCis1, "MI_SendLockTransIfCCis1"}, -+{MI_WaitForDmaRoutes1, "MI_WaitForDmaRoutes1"}, -+{MI_LoadDmaContext, "MI_LoadDmaContext"}, -+{MI_InputTestAndSetWord, "MI_InputTestAndSetWord"}, -+{MI_InputTestAndSetWord_seq1, "MI_InputTestAndSetWord_seq1"}, -+{MI_GetDestEventValue, "MI_GetDestEventValue"}, -+{MI_SendDmaIdentify, "MI_SendDmaIdentify"}, -+{MI_InputAtomicAddWord, "MI_InputAtomicAddWord"}, -+{MI_LoadBFromTransD0, "MI_LoadBFromTransD0"}, -+{MI_ConditionalWriteBackCCTrue, "MI_ConditionalWriteBackCCTrue"}, -+{MI_WaitOneTickForWakeup, "MI_WaitOneTickForWakeup"}, -+{MI_SendFinalUnlockTrans, "MI_SendFinalUnlockTrans"}, -+{MI_SendDmaEOP, "MI_SendDmaEOP"}, -+{MI_GenLastAddrForPsycho, "MI_GenLastAddrForPsycho"}, -+{MI_FailedAckIfCCis0, "MI_FailedAckIfCCis0"}, -+{MI_FailedAckIfCCis0_seq1, "MI_FailedAckIfCCis0_seq1"}, -+{MI_WriteDmaSysCntxDesc, "MI_WriteDmaSysCntxDesc"}, -+{MI_TimesliceDmaQueueOverflow, "MI_TimesliceDmaQueueOverflow"}, -+{MI_DequeueNonSysCntxThread1, "MI_DequeueNonSysCntxThread1"}, -+{MI_DequeueNonSysCntxThread1_seq1, "MI_DequeueNonSysCntxThread1_seq1"}, -+{MI_TestThreadQueueEmpty, "MI_TestThreadQueueEmpty"}, -+{MI_ClearThreadQueueIfCC, "MI_ClearThreadQueueIfCC"}, -+{MI_DequeueSysCntxThread1, "MI_DequeueSysCntxThread1"}, -+{MI_DequeueSysCntxThread1_seq1, "MI_DequeueSysCntxThread1_seq1"}, -+{MI_TProcStartUpGeneric, "MI_TProcStartUpGeneric"}, -+{MI_WaitForPCload2, "MI_WaitForPCload2"}, -+{MI_WaitForNPCWrite, "MI_WaitForNPCWrite"}, -+{MI_WaitForEventWaitAddr, "MI_WaitForEventWaitAddr"}, -+{MI_WaitForWaitEventAccess, "MI_WaitForWaitEventAccess"}, -+{MI_WaitForWaitEventAccess_seq1, "MI_WaitForWaitEventAccess_seq1"}, -+{MI_WaitForWaitEventDesc, "MI_WaitForWaitEventDesc"}, -+{MI_WaitForEventReadTy0, "MI_WaitForEventReadTy0"}, -+{MI_SendCondTestFail, "MI_SendCondTestFail"}, -+{MI_InputMoveToNextTrans, "MI_InputMoveToNextTrans"}, -+{MI_ThreadUpdateSysCntxBack, "MI_ThreadUpdateSysCntxBack"}, -+{MI_FinishedSetEvent, "MI_FinishedSetEvent"}, -+{MI_EventIntUpdateBPtr, "MI_EventIntUpdateBPtr"}, -+{MI_EventQueueOverflow, "MI_EventQueueOverflow"}, -+{MI_MaskLowerSource, "MI_MaskLowerSource"}, -+{MI_DmaLoop, "MI_DmaLoop"}, -+{MI_SendNullSetEvent, "MI_SendNullSetEvent"}, -+{MI_SendFinalSetEvent, "MI_SendFinalSetEvent"}, -+{MI_TestNackFailIsZero1, "MI_TestNackFailIsZero1"}, -+{MI_DmaPacketTimedOutOrPacketError, "MI_DmaPacketTimedOutOrPacketError"}, -+{MI_NextPacketIsLast, "MI_NextPacketIsLast"}, -+{MI_TestForZeroLengthDma, "MI_TestForZeroLengthDma"}, -+{MI_WaitForPCload, "MI_WaitForPCload"}, -+{MI_ReadInIns, "MI_ReadInIns"}, -+{MI_WaitForInsRead, "MI_WaitForInsRead"}, -+{MI_WaitForLocals, "MI_WaitForLocals"}, -+{MI_WaitForOutsWrite, "MI_WaitForOutsWrite"}, -+{MI_WaitForWaitEvWrBack, "MI_WaitForWaitEvWrBack"}, -+{MI_WaitForLockRead, "MI_WaitForLockRead"}, -+{MI_TestQueueLock, "MI_TestQueueLock"}, -+{MI_InputIdWriteAddrAndType, "MI_InputIdWriteAddrAndType"}, -+{MI_InputIdWriteAddrAndType2, "MI_InputIdWriteAddrAndType2"}, -+{MI_ThreadInputIdentify2, "MI_ThreadInputIdentify2"}, -+{MI_WriteIntoTrapArea0, "MI_WriteIntoTrapArea0"}, -+{MI_GenQueueBlockWrAddr, "MI_GenQueueBlockWrAddr"}, -+{MI_InputDiscardFreeLock, "MI_InputDiscardFreeLock"}, -+{MI_WriteIntoTrapArea1, "MI_WriteIntoTrapArea1"}, -+{MI_WriteIntoTrapArea2, "MI_WriteIntoTrapArea2"}, -+{MI_ResetBPtrToBase, "MI_ResetBPtrToBase"}, -+{MI_InputDoTrap, "MI_InputDoTrap"}, -+{MI_RemoteDmaCntxt0Update, "MI_RemoteDmaCntxt0Update"}, -+{MI_ClearQueueLock, "MI_ClearQueueLock"}, -+{MI_IProcTrappedBlockWriteData, "MI_IProcTrappedBlockWriteData"}, -+{MI_FillContextFilter, "MI_FillContextFilter"}, -+{MI_IProcTrapped4, "MI_IProcTrapped4"}, -+{MI_RunSysCntxDma, "MI_RunSysCntxDma"}, -+{MI_ChainedEventError, "MI_ChainedEventError"}, -+{MI_InputTrappingEOP, "MI_InputTrappingEOP"}, -+{MI_CheckForRunIfZero, "MI_CheckForRunIfZero"}, -+{MI_TestForBreakOrSuspend, "MI_TestForBreakOrSuspend"}, -+{MI_SwapForRunable, "MI_SwapForRunable"}, -diff -urN clean/include/elan3/neterr_rpc.h linux-2.6.9/include/elan3/neterr_rpc.h ---- clean/include/elan3/neterr_rpc.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan3/neterr_rpc.h 2003-06-26 12:05:22.000000000 -0400 -@@ -0,0 +1,68 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __ELAN3_NETERR_RPC_H -+#define __ELAN3_NETERR_RPC_H -+ -+#ident "$Id: neterr_rpc.h,v 1.20 2003/06/26 16:05:22 fabien Exp $" -+/* $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/neterr_rpc.h,v $*/ -+ -+#define NETERR_SERVICE "neterr-srv" -+#define NETERR_PROGRAM ((u_long) 170002) -+#define NETERR_VERSION ((u_long) 1) -+ -+#define NETERR_NULL_RPC 0 -+#define NETERR_FIXUP_RPC 1 -+ -+/* network error rpc timeout */ -+#define NETERR_RPC_TIMEOUT 5 -+ -+/* -+ * XDR functions for Tru64 and Linux in userspace. -+ * NB Linux kernelspace xdr routines are in network_error. -+ * and *must* be kept consistent. -+ */ -+#if defined(DIGITAL_UNIX) || !defined(__KERNEL__) -+bool_t -+xdr_capability (XDR *xdrs, void *arg) -+{ -+ ELAN_CAPABILITY *cap = (ELAN_CAPABILITY *) arg; -+ -+ return (xdr_opaque (xdrs, (caddr_t) &cap->cap_userkey, sizeof (cap->cap_userkey)) && -+ xdr_int (xdrs, &cap->cap_version) && -+ xdr_u_short (xdrs, &cap->cap_type) && -+ xdr_int (xdrs, &cap->cap_lowcontext) && -+ xdr_int (xdrs, &cap->cap_highcontext) && -+ xdr_int (xdrs, &cap->cap_mycontext) && -+ xdr_int (xdrs, &cap->cap_lownode) && -+ xdr_int (xdrs, &cap->cap_highnode) && -+ xdr_u_int (xdrs, &cap->cap_railmask) && -+ xdr_opaque (xdrs, (caddr_t) &cap->cap_bitmap[0], sizeof (cap->cap_bitmap))); -+} -+ -+bool_t -+xdr_neterr_msg (XDR *xdrs, void *req) -+{ -+ NETERR_MSG *msg = (NETERR_MSG *) req; -+ -+ return (xdr_u_int (xdrs, &msg->Rail) && -+ xdr_capability (xdrs, &msg->SrcCapability) && -+ xdr_capability (xdrs, &msg->DstCapability) && -+ xdr_u_int (xdrs, &msg->DstProcess) && -+ xdr_u_int (xdrs, &msg->CookieAddr) && -+ xdr_u_int (xdrs, &msg->CookieVProc) && -+ xdr_u_int (xdrs, &msg->NextCookie) && -+ xdr_u_int (xdrs, &msg->WaitForEop)); -+} -+#endif /* INCLUDE_XDR_INLINE */ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -+#endif /* __ELAN3_NETERR_RPC_H */ -diff -urN clean/include/elan3/perm.h linux-2.6.9/include/elan3/perm.h ---- clean/include/elan3/perm.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan3/perm.h 2003-09-24 09:57:24.000000000 -0400 -@@ -0,0 +1,29 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __ELAN3_PERM_H -+#define __ELAN3_PERM_H -+ -+#ident "$Id: perm.h,v 1.7 2003/09/24 13:57:24 david Exp $" -+/* $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/perm.h,v $*/ -+ -+#define ELAN3_PERM_NULL 0x00 -+#define ELAN3_PERM_LOCAL_READ 0x04 -+#define ELAN3_PERM_READ 0x08 -+#define ELAN3_PERM_NOREMOTE 0x0c -+#define ELAN3_PERM_REMOTEREAD 0x10 -+#define ELAN3_PERM_REMOTEWRITE 0x14 -+#define ELAN3_PERM_REMOTEEVENT 0x18 -+#define ELAN3_PERM_REMOTEALL 0x1c -+ -+#endif /* __ELAN3_PERM_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/include/elan3/pte.h linux-2.6.9/include/elan3/pte.h ---- clean/include/elan3/pte.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan3/pte.h 2003-09-24 09:57:24.000000000 -0400 -@@ -0,0 +1,139 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __ELAN3_PTE_H -+#define __ELAN3_PTE_H -+ -+#ident "$Id: pte.h,v 1.26 2003/09/24 13:57:24 david Exp $" -+/* $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/pte.h,v $*/ -+ -+#ifdef __cplusplus -+extern "C" -+{ -+#endif -+ -+#include -+#include -+ -+typedef E3_uint64 ELAN3_PTE; -+typedef E3_uint32 ELAN3_PTP; -+ -+#define ELAN3_PTE_SIZE (8) -+#define ELAN3_PTP_SIZE (4) -+ -+#define ELAN3_PTE_REF ((E3_uint64) 1 << 63) /* 63 - referenced bit */ -+#define ELAN3_PTE_MOD ((E3_uint64) 1 << 55) /* 55 - modified bit */ -+#define ELAN3_RM_MASK (ELAN3_PTE_REF | ELAN3_PTE_MOD) -+ -+#define ELAN3_PTE_PFN_MASK 0x0000fffffffff000ull /* [12:48] - Physical address */ -+ -+#define ELAN3_PTE_BIG_ENDIAN 0x80 /* 7 - big endian */ -+#define ELAN3_PTE_64_BIT 0x40 /* 6 - 64 bit pci address */ -+#define ELAN3_PTE_LOCAL 0x20 /* 5 - local sdram */ -+ -+#define ELAN3_PTE_PERM_MASK 0x1c /* [2:4] - Permissions */ -+#define ELAN3_PTE_PERM_SHIFT 2 -+ -+#define ELAN3_ET_MASK 0x3 -+#define ELAN3_ET_INVALID 0x0 /* [0:1] */ -+#define ELAN3_ET_PTP 0x1 -+#define ELAN3_ET_PTE 0x2 -+ -+#define ELAN3_INVALID_PTP ((ELAN3_PTP) 0) -+#define ELAN3_INVALID_PTE ((ELAN3_PTE) 0) -+ -+#define ELAN3_PTP_TYPE(ptp) ((ptp) & ELAN3_ET_MASK) -+#define ELAN3_PTE_TYPE(pte) ((pte) & ELAN3_ET_MASK) -+#define ELAN3_PTE_PERM(pte) ((pte) & ELAN3_PTE_PERM_MASK) -+#define ELAN3_PTE_VALID(pte) (((pte) & ELAN3_ET_MASK) == ELAN3_ET_PTE) -+#define ELAN3_PTE_ISREF(pte) ((pte) & ELAN3_PTE_REF) -+#define ELAN3_PTE_ISMOD(pte) ((pte) & ELAN3_PTE_MOD) -+#define ELAN3_PTE_WRITEABLE(pte) (ELAN3_PERM_WRITEABLE(ELAN3_PTE_PERM(pte))) -+ -+#define ELAN3_PERM_WRITEABLE(perm) ((perm) == ELAN3_PERM_NOREMOTE || (perm) > ELAN3_PERM_REMOTEREAD) -+#define ELAN3_PERM_REMOTE(perm) ((perm) > ELAN3_PERM_NOREMOTE) -+ -+#define ELAN3_PERM_READONLY(perm) ((perm) == ELAN3_PERM_NOREMOTE ? ELAN3_PERM_LOCAL_READ : \ -+ (perm) > ELAN3_PERM_REMOTEREAD ? ELAN3_PERM_READ : (perm)) -+#if PAGE_SHIFT == 12 -+# define ELAN3_PAGE_SHIFT 12 -+#else -+# define ELAN3_PAGE_SHIFT 13 -+#endif -+ -+#define ELAN3_PAGE_SIZE (1 << ELAN3_PAGE_SHIFT) -+#define ELAN3_PAGE_OFFSET (ELAN3_PAGE_SIZE-1) -+#define ELAN3_PAGE_MASK (~ELAN3_PAGE_OFFSET) -+ -+#if ELAN3_PAGE_SHIFT == 13 -+# define ELAN3_L3_SHIFT 5 -+#else -+# define ELAN3_L3_SHIFT 6 -+#endif -+#define ELAN3_L2_SHIFT 6 -+#define ELAN3_L1_SHIFT 8 -+ -+/* Number of entries in a given level ptbl */ -+#define ELAN3_L3_ENTRIES (1 << ELAN3_L3_SHIFT) -+#define ELAN3_L2_ENTRIES (1 << ELAN3_L2_SHIFT) -+#define ELAN3_L1_ENTRIES (1 << ELAN3_L1_SHIFT) -+ -+/* Virtual address spanned by each entry */ -+#define ELAN3_L3_SIZE (1 << (ELAN3_PAGE_SHIFT)) -+#define ELAN3_L2_SIZE (1 << (ELAN3_L3_SHIFT+ELAN3_PAGE_SHIFT)) -+#define ELAN3_L1_SIZE (1 << (ELAN3_L3_SHIFT+ELAN3_L2_SHIFT+ELAN3_PAGE_SHIFT)) -+ -+/* Virtual address size of page table */ -+#define ELAN3_L1_PTSIZE (ELAN3_L1_ENTRIES * ELAN3_L1_SIZE) -+#define ELAN3_L3_PTSIZE (ELAN3_L3_ENTRIES * ELAN3_L3_SIZE) -+#define ELAN3_L2_PTSIZE (ELAN3_L2_ENTRIES * ELAN3_L2_SIZE) -+ -+/* Mask for offset into page table */ -+#define ELAN3_L1_PTOFFSET ((ELAN3_L1_SIZE*ELAN3_L1_ENTRIES)-1) -+#define ELAN3_L3_PTOFFSET ((ELAN3_L3_SIZE*ELAN3_L3_ENTRIES)-1) -+#define ELAN3_L2_PTOFFSET ((ELAN3_L2_SIZE*ELAN3_L2_ENTRIES)-1) -+ -+#define ELAN3_L1_INDEX(addr) (((E3_Addr) (addr) & 0xFF000000) >> (ELAN3_L2_SHIFT+ELAN3_L3_SHIFT+ELAN3_PAGE_SHIFT)) -+#define ELAN3_L2_INDEX(addr) (((E3_Addr) (addr) & 0x00FD0000) >> (ELAN3_L3_SHIFT+ELAN3_PAGE_SHIFT)) -+#define ELAN3_L3_INDEX(addr) (((E3_Addr) (addr) & 0x0003F000) >> ELAN3_PAGE_SHIFT) -+ -+#define ELAN3_L1_BASE(addr) (((E3_Addr)(addr)) & 0x00000000) -+#define ELAN3_L2_BASE(addr) (((E3_Addr)(addr)) & 0xFF000000) -+#define ELAN3_L3_BASE(addr) (((E3_Addr)(addr)) & 0xFFFC0000) -+ -+/* Convert a page table pointer entry to the PT */ -+#define PTP_TO_PT_PADDR(ptp) ((E3_Addr)(ptp & 0xFFFFFFFC)) -+ -+#ifdef __KERNEL__ -+/* -+ * incompatible access for permission macro. -+ */ -+extern u_char elan3mmu_permissionTable[8]; -+#define ELAN3_INCOMPAT_ACCESS(perm,access) (! (elan3mmu_permissionTable[(perm)>>ELAN3_PTE_PERM_SHIFT] & (1 << (access)))) -+ -+#define elan3_readptp(dev, ptp) (elan3_sdram_readl (dev, ptp)) -+#define elan3_writeptp(dev, ptp, value) (elan3_sdram_writel (dev, ptp, value)) -+#define elan3_readpte(dev, pte) (elan3_sdram_readq (dev, pte)) -+#define elan3_writepte(dev,pte, value) (elan3_sdram_writeq (dev, pte, value)) -+ -+#define elan3_invalidatepte(dev, pte) (elan3_sdram_writel (dev, pte, 0)) -+#define elan3_modifypte(dev,pte,new) (elan3_sdram_writel (dev, pte, (int) (new))) -+#define elan3_clrref(dev,pte) (elan3_sdram_writeb (dev, pte + 7) -+ -+#endif /* __KERNEL__ */ -+ -+#ifdef __cplusplus -+} -+#endif -+ -+#endif /* __ELAN3_PTE_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/include/elan3/spinlock.h linux-2.6.9/include/elan3/spinlock.h ---- clean/include/elan3/spinlock.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan3/spinlock.h 2003-09-24 09:57:24.000000000 -0400 -@@ -0,0 +1,195 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef _ELAN3_SPINLOCK_ -+#define _ELAN3_SPINLOCK_ -+ -+#ident "$Id: spinlock.h,v 1.31 2003/09/24 13:57:24 david Exp $" -+/* $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/spinlock.h,v $*/ -+ -+/* -+ * This spinlock is designed for main/elan processor interactions. -+ * The lock is split over Elan/Main memory in such a way that -+ * we don't end up busy-polling over the PCI. -+ * In the Elan memory we have two words; one is a sequence number -+ * and the other is a lock word for main. -+ * In main memory we have a copy of the sequence number which main polls when it is -+ * waiting for the Elan to drop the lock. Main polls this word until it becomes -+ * equal to the sequence number it sampled. -+ * The Elan drops the lock by writing the current sequence number to main memory. -+ * It is coded to always give priority to the Elan thread, and so when both go for the -+ * lock, main will back off first. -+ * -+ * 18/3/98 -+ * This has been extended to avoid a starvation case where both the main and thread claim the -+ * lock and so both backoff (thread does a break). So now, main attempts to claim the -+ * lock by writing 'mainLock' then samples the 'sl_seq' and if it has the lock -+ * it sets 'mainGotLock'. The thread will now see the 'sl_mainLock' set, but will only -+ * backoff with a c_break_busywait() if 'mainGotLock' is set too. -+ */ -+typedef struct elan3_spinlock_elan { -+ union { -+ volatile E3_uint64 mainLocks; /* main writes this dble word */ -+ struct { -+ volatile E3_uint32 mainLock; /* main wants a lock */ -+ volatile E3_uint32 mainGotLock; /* main has the lock */ -+ } s; -+ } sl_u; -+ volatile E3_uint32 sl_seq; /* thread owns this word */ -+ volatile E3_uint32 sl_mainWait; /* performance counter */ -+ volatile E3_uint32 sl_elanWait; /* performance counter */ -+ volatile E3_uint32 sl_elanBusyWait; /* performance counter */ -+ /* NOTE: The lock/seq words must be within the same 32-byte Elan cache-line */ -+ E3_uint64 sl_pad[5]; /* pad to 64-bytes */ -+} ELAN3_SPINLOCK_ELAN; -+ -+#define sl_mainLocks sl_u.mainLocks -+#define sl_mainLock sl_u.s.mainLock -+#define sl_mainGotLock sl_u.s.mainGotLock -+ -+#define SL_MAIN_RECESSIVE 1 -+#define SL_MAIN_DOMINANT 2 -+ -+/* Declare this as a main memory cache block for efficiency */ -+typedef union elan3_spinlock_main { -+ volatile E3_uint32 sl_seq; /* copy of seq number updated by Elan */ -+ volatile E3_uint32 sl_Int32[E3_BLK_SIZE/sizeof (E3_uint32)]; -+} ELAN3_SPINLOCK_MAIN; -+ -+/* Main/Main or Elan/Elan lock word */ -+typedef volatile int ELAN3_SPINLOCK; -+ -+#ifdef __ELAN3__ -+ -+/* Main/Elan interlock */ -+ -+#define ELAN3_ME_SPINENTER(SLE,SL) do {\ -+ asm volatile ("! elan3_spinlock store barrier");\ -+ (SLE)->sl_seq++; \ -+ if ((SLE)->sl_mainLock) \ -+ elan3_me_spinblock(SLE, SL);\ -+ asm volatile ("! elan3_spinlock store barrier");\ -+ } while (0) -+#define ELAN3_ME_SPINEXIT(SLE,SL) do {\ -+ asm volatile ("! elan3_spinlock store barrier");\ -+ (SL)->sl_seq = (SLE)->sl_seq;\ -+ asm volatile ("! elan3_spinlock store barrier");\ -+ } while (0) -+ -+ -+/* Elan/Elan interlock */ -+#define ELAN3_SPINENTER(L) do {\ -+ asm volatile ("! store barrier");\ -+ if (c_swap ((L), 1)) elan3_spinenter(L);\ -+ asm volatile ("! store barrier");\ -+ } while (0) -+#define ELAN3_SPINEXIT(L) do {\ -+ asm volatile ("! store barrier");\ -+ c_swap((L), 0);\ -+ asm volatile ("! store barrier");\ -+ } while (0) -+ -+extern void elan3_me_spinblock (ELAN3_SPINLOCK_ELAN *sle, ELAN3_SPINLOCK_MAIN *sl); -+extern void elan3_spinenter (ELAN3_SPINLOCK *l); -+ -+#else -+ -+/* Main/Elan interlock */ -+#ifdef DEBUG -+#define ELAN3_ME_SPINENTER(SDRAM,SLE,SL) do {\ -+ register E3_int32 maxLoops = 0x7fffffff; \ -+ register E3_uint32 seq;\ -+ elan3_write32_sdram(SDRAM, (SLE) + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainLock), SL_MAIN_RECESSIVE); \ -+ MEMBAR_STORELOAD(); \ -+ seq = elan3_read32_sdram(SDRAM, (SLE) + offsetof(ELAN3_SPINLOCK_ELAN, sl_seq)); \ -+ while (seq != (SL)->sl_seq) {\ -+ elan3_write32_sdram(SDRAM, (SLE) + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainLock), 0); \ -+ while ((SL)->sl_seq == (seq-1) && maxLoops--) ; \ -+ if (maxLoops < 0) { \ -+ printf("Failed to get ME lock %lx/%lx seq %d sle_seq %d sl_seq %d\n", \ -+ SL, SLE, seq, \ -+ elan3_read32_sdram(SDRAM, (SLE) + offsetof(ELAN3_SPINLOCK_ELAN, sl_seq)), \ -+ (SL)->sl_seq); \ -+ } \ -+ elan3_write32_sdram(SDRAM, (SLE) + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainLock), SL_MAIN_RECESSIVE); \ -+ MEMBAR_STORELOAD(); \ -+ seq = elan3_read32_sdram(SDRAM, (SLE) + offsetof(ELAN3_SPINLOCK_ELAN, sl_seq)); \ -+ }\ -+ elan3_write32_sdram(SDRAM, (SLE) + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainGotLock), 1); \ -+ MEMBAR_LOADLOAD();\ -+ } while (0) -+#else -+#define ELAN3_ME_SPINENTER(SDRAM,SLE,SL) do {\ -+ register E3_uint32 seq;\ -+ elan3_write32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainLock), SL_MAIN_RECESSIVE); \ -+ MEMBAR_STORELOAD(); \ -+ seq = elan3_read32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_seq)); \ -+ while (seq != (SL)->sl_seq) {\ -+ elan3_write32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainLock), 0); \ -+ while ((SL)->sl_seq == (seq-1)) ; \ -+ elan3_write32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainLock), SL_MAIN_RECESSIVE); \ -+ MEMBAR_STORELOAD(); \ -+ seq = elan3_read32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_seq)); \ -+ }\ -+ elan3_write32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainGotLock), 1); \ -+ MEMBAR_LOADLOAD();\ -+ } while (0) -+#endif -+#define ELAN3_ME_FORCEENTER(SDRAM,SLE,SL) do { \ -+ register E3_uint32 seq; \ -+ MEMBAR_STORELOAD(); \ -+ elan3_write32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainLock), SL_MAIN_DOMINANT); \ -+ MEMBAR_STORELOAD(); \ -+ seq = elan3_read32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_seq)); \ -+ while (seq != (SL)->sl_seq) \ -+ { \ -+ /* NOTE: we MUST call elan3_usecspin here for kernel comms */\ -+ while ((SL)->sl_seq == (seq)-1) \ -+ elan3_usecspin (1); \ -+ seq = elan3_read32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_seq)); \ -+ } \ -+ elan3_write32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainGotLock), 1); \ -+ MEMBAR_LOADLOAD(); \ -+} while (0) -+ -+#define ELAN3_ME_TRYENTER(SDRAM,SLE,SL,SEQ) do { \ -+ elan3_write32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainLock), SL_MAIN_RECESSIVE); \ -+ MEMBAR_STORELOAD(); \ -+ SEQ = elan3_read32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_seq)); \ -+} while (0) -+ -+#define ELAN3_ME_CHECKENTER(SDRAM,SLE,SL,SEQ) do { \ -+ if ((SEQ) == ((SL)->sl_seq)) { \ -+ elan3_write32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainGotLock), 1); \ -+ MEMBAR_LOADLOAD();\ -+ } \ -+ else ELAN3_ME_SPINENTER(SLE,SL); \ -+} while (0) -+ -+#define ELAN3_ME_SPINEXIT(SDRAM,SLE,SL) do {\ -+ MEMBAR_STORESTORE(); \ -+ elan3_write64_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainLocks), 0); \ -+ MEMBAR_STORESTORE(); \ -+ } while (0) -+ -+ -+/* Main/Main */ -+#define ELAN3_SPINENTER(L) do {\ -+ while (c_swap ((L), 1)) ; \ -+ } while (0) -+#define ELAN3_SPINEXIT(L) do {\ -+ c_swap((L), 0);\ -+ } while (0) -+#endif /* _ELAN3_ */ -+ -+#endif /* _ELAN3_SPINLOCK_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/include/elan3/thread.h linux-2.6.9/include/elan3/thread.h ---- clean/include/elan3/thread.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan3/thread.h 2002-08-09 07:23:34.000000000 -0400 -@@ -0,0 +1,137 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef _ELAN3_THREAD_H -+#define _ELAN3_THREAD_H -+ -+#ident "$Id: thread.h,v 1.17 2002/08/09 11:23:34 addy Exp $" -+/* $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/thread.h,v $*/ -+ -+/* Alignment for a stack frame */ -+#define E3_STACK_ALIGN (64) -+ -+typedef struct _E3_Frame { -+ E3_uint32 fr_local[8]; /* saved locals (not used) */ -+ E3_uint32 fr_arg[6]; /* saved arguements o0 -> o5 */ -+ E3_Addr fr_savefp; /* saved frame pointer o6 */ -+ E3_Addr fr_savepc; /* saved program counter o7 */ -+ E3_Addr fr_stret; /* stuct return addr */ -+ E3_uint32 fr_argd[6]; /* arg dump area */ -+ E3_uint32 fr_argx[1]; /* array of args past the sixth */ -+} E3_Frame; -+ -+typedef struct _E3_Stack { -+ E3_uint32 Locals[8]; -+ E3_uint32 Ins[8]; -+ E3_uint32 Globals[8]; -+ E3_uint32 Outs[8]; -+} E3_Stack; -+ -+typedef struct _E3_OutsRegs { -+ E3_uint32 o[8]; /* o6 == pc, o7 == fptr */ -+} E3_OutsRegs; -+ -+/* -+ * "Magic" value for stack pointer to be ignored. -+ */ -+#define VanishingStackPointer 0x42 -+ -+ -+/* -+ * When the Elan traps the N & Z CC bits are held in the NPC -+ * and the V & C bits are in the PC -+ */ -+#define PSR_C_BIT (1) -+#define PSR_V_BIT (2) -+#define PSR_Z_BIT (1) -+#define PSR_N_BIT (2) -+#define CC_MASK (3) -+#define PC_MASK (~3) -+#define SP_MASK (~3) -+ -+/* -+ * Threads processor Opcodes. -+ */ -+#define OPCODE_MASK (0xC1F80000) -+#define OPCODE_IMM (1 << 13) -+ -+#define OPCODE_CLASS(instr) ((instr) & 0xC0000000) -+#define OPCODE_CLASS_0 0x00000000 -+#define OPCODE_CLASS_1 0x40000000 -+#define OPCODE_CLASS_2 0x80000000 -+#define OPCODE_CLASS_3 0xC0000000 -+ -+#define OPCODE_CPOP 0x81B00000 -+#define OPCODE_Ticc 0x81D00000 -+ -+#define OPCODE_FCODE_SHIFT 19 -+#define OPCODE_FCODE_MASK 0x1f -+#define OPCODE_NOT_ALUOP 0x01000000 -+ -+#define OPCODE_SLL 0x81280000 -+#define OPCODE_SRL 0x81300000 -+#define OPCODE_SRA 0x81380000 -+ -+#define OPCODE_OPEN 0x81600000 -+#define OPCODE_CLOSE 0x81680000 -+#define OPCODE_BREAKTEST 0x81700000 -+ -+#define OPCODE_BREAK 0x81a00000 -+#define OPCODE_SUSPEND 0x81a80000 -+#define OPCODE_WAIT 0x81b00000 -+ -+#define OPCODE_JMPL 0x81c00000 -+ -+#define OPCODE_LD 0xC0000000 -+#define OPCODE_LDD 0xC0180000 -+ -+#define OPCODE_LDBLOCK16 0xC0900000 -+#define OPCODE_LDBLOCK32 0xC0800000 -+#define OPCODE_LDBLOCK64 0xC0980000 -+ -+#define OPCODE_ST 0xC0200000 -+#define OPCODE_STD 0xC0380000 -+ -+#define OPCODE_SWAP 0xC0780000 -+ -+#define OPCODE_STBLOCK16 0xC0b00000 -+#define OPCODE_STBLOCK32 0xC0a00000 -+#define OPCODE_STBLOCK64 0xC0b80000 -+ -+#define OPCODE_CLASS0_MASK 0xC1C00000 -+#define OPCODE_SETHI 0x01000000 -+#define OPCODE_BICC 0x00800000 -+#define OPCODE_SENDREG 0x01800000 -+#define OPCODE_SENDMEM 0x01c00000 -+ -+#define OPCODE_BICC_BN 0x00000000 -+#define OPCODE_BICC_BE 0x02000000 -+#define OPCODE_BICC_BLE 0x04000000 -+#define OPCODE_BICC_BL 0x06000000 -+#define OPCODE_BICC_BLEU 0x08000000 -+#define OPCODE_BICC_BCS 0x0A000000 -+#define OPCODE_BICC_BNEG 0x0C000000 -+#define OPCODE_BICC_BVS 0x0E000000 -+ -+#define OPCODE_BICC_MASK 0x0E000000 -+#define OPCODE_BICC_ANNUL 0x20000000 -+ -+#define INSTR_RS2(instr) (((instr) >> 0) & 0x1F) -+#define INSTR_RS1(instr) (((instr) >> 14) & 0x1F) -+#define INSTR_RD(instr) (((instr) >> 25) & 0x1F) -+#define INSTR_IMM(instr) (((instr) & 0x1000) ? ((instr) & 0xFFF) | 0xFFFFF000 : (instr) & 0xFFF) -+ -+#define Ticc_COND(instr) INSTR_RD(instr) -+#define Ticc_TA 8 -+ -+#endif /* _ELAN3_THREAD_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/include/elan3/threadlinkage.h linux-2.6.9/include/elan3/threadlinkage.h ---- clean/include/elan3/threadlinkage.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan3/threadlinkage.h 2002-08-09 07:23:34.000000000 -0400 -@@ -0,0 +1,103 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __ELAN3_THREADLINKAGE_H -+#define __ELAN3_THREADLINKAGE_H -+ -+#ident "$Id: threadlinkage.h,v 1.6 2002/08/09 11:23:34 addy Exp $" -+/* $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/threadlinkage.h,v $*/ -+ -+#ifdef __cplusplus -+extern "C" { -+#endif -+ -+#if defined(_ASM) || defined(__LANGUAGE_ASSEMBLY__) -+ -+/* -+ * Macro to define weak symbol aliases. These are similar to the ANSI-C -+ * #pragma weak name = _name -+ * except a compiler can determine type. The assembler must be told. Hence, -+ * the second parameter must be the type of the symbol (i.e.: function,...) -+ */ -+#define ANSI_PRAGMA_WEAK(sym, stype) \ -+ .weak sym; \ -+ .type sym, #stype; \ -+/* CSTYLED */ \ -+sym = _/**/sym -+ -+/* -+ * ENTRY provides the standard procedure entry code -+ */ -+#define ENTRY(x) \ -+ .section ".text"; \ -+ .align 4; \ -+ .global x; \ -+x: -+ -+/* -+ * ENTRY2 is identical to ENTRY but provides two labels for the entry point. -+ */ -+#define ENTRY2(x, y) \ -+ .section ".text"; \ -+ .align 4; \ -+ .global x, y; \ -+/* CSTYLED */ \ -+x: ; \ -+y: -+ -+ -+/* -+ * ALTENTRY provides for additional entry points. -+ */ -+#define ALTENTRY(x) \ -+ .global x; \ -+x: -+ -+/* -+ * DGDEF and DGDEF2 provide global data declarations. -+ * -+ * DGDEF provides a word aligned word of storage. -+ * -+ * DGDEF2 allocates "sz" bytes of storage with **NO** alignment. This -+ * implies this macro is best used for byte arrays. -+ * -+ * DGDEF3 allocates "sz" bytes of storage with "algn" alignment. -+ */ -+#define DGDEF2(name, sz) \ -+ .section ".data"; \ -+ .global name; \ -+ .size name, sz; \ -+name: -+ -+#define DGDEF3(name, sz, algn) \ -+ .section ".data"; \ -+ .align algn; \ -+ .global name; \ -+ .size name, sz; \ -+name: -+ -+#define DGDEF(name) DGDEF3(name, 4, 4) -+ -+/* -+ * SET_SIZE trails a function and set the size for the ELF symbol table. -+ */ -+#define SET_SIZE(x) \ -+ .size x, (.-x) -+ -+#endif /* _ASM || __LANGUAGE_ASSEMBLY__ */ -+ -+#ifdef __cplusplus -+} -+#endif -+ -+#endif /* __ELAN3_THREADLINKAGE_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/include/elan3/threadsyscall.h linux-2.6.9/include/elan3/threadsyscall.h ---- clean/include/elan3/threadsyscall.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan3/threadsyscall.h 2003-09-24 09:57:24.000000000 -0400 -@@ -0,0 +1,64 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __ELAN3_SYSCALL_H -+#define __ELAN3_SYSCALL_H -+ -+#ident "$Id: threadsyscall.h,v 1.12 2003/09/24 13:57:24 david Exp $" -+/* $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/threadsyscall.h,v $*/ -+ -+/* -+ * This file contains the system calls supported from the Elan. -+ */ -+#define ELAN3_DEBUG_TRAPNUM 5 /* thread debugging trap */ -+#define ELAN3_ABORT_TRAPNUM 6 /* bad abort trap */ -+#define ELAN3_ELANCALL_TRAPNUM 7 /* elansyscall trap */ -+#define ELAN3_SYSCALL_TRAPNUM 8 /* new syscall trap */ -+ -+#define ELAN3_T_SYSCALL_CODE 0 /* offsets in struct elan3_t_syscall */ -+#define ELAN3_T_SYSCALL_ERRNO 4 -+ -+#define ELAN3_SYS_open 1 -+#define ELAN3_SYS_close 2 -+#define ELAN3_SYS_write 3 -+#define ELAN3_SYS_read 4 -+#define ELAN3_SYS_poll 5 -+#define ELAN3_SYS_ioctl 6 -+#define ELAN3_SYS_lseek 7 -+#define ELAN3_SYS_mmap 8 -+#define ELAN3_SYS_munmap 9 -+#define ELAN3_SYS_kill 10 -+#define ELAN3_SYS_getpid 11 -+ -+#if !defined(SYS_getpid) && defined(__NR_getxpid) -+#define SYS_getpid __NR_getxpid /* for linux */ -+#endif -+ -+#if !defined(_ASM) && !defined(__LANGUAGE_ASSEMBLY__) -+ -+extern int elan3_t_open (const char *, int, ...); -+extern ssize_t elan3_t_write (int, const void *, unsigned); -+extern ssize_t elan3_t_read(int, void *, unsigned); -+extern int elan3_t_ioctl(int, int, ...); -+extern int elan3_t_close(int); -+extern off_t elan3_t_lseek(int filedes, off_t offset, int whence); -+ -+extern caddr_t elan3_t_mmap(caddr_t, size_t, int, int, int, off_t); -+extern int elan3_t_munmap(caddr_t, size_t); -+ -+extern int elan3_t_getpid(void); -+extern void elan3_t_abort(char *str); -+ -+#endif /* !_ASM && ! __LANGUAGE_ASSEMBLY__ */ -+ -+#endif /* __ELAN3_SYSCALL_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/include/elan3/trtype.h linux-2.6.9/include/elan3/trtype.h ---- clean/include/elan3/trtype.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan3/trtype.h 2002-08-09 07:23:34.000000000 -0400 -@@ -0,0 +1,116 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef _ELAN3_TRTYPE_H -+#define _ELAN3_TRTYPE_H -+ -+#ident "$Id: trtype.h,v 1.13 2002/08/09 11:23:34 addy Exp $" -+/* $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/trtype.h,v $ */ -+ -+/*<15> ackNow */ -+#define TR_SENDACK (1 << 15) -+ -+#define TR_SIZE_SHIFT 12 -+#define TR_SIZE_MASK 7 -+ -+/*<14:12> Size 0, 1, 2, 4, 8, 16, 32, 64 Double Words -+ Bit 14 is forced to zero currently so that only size 0, 1, 2, 4 are -+ allowed */ -+ -+#define TR_SIZE0 (0 << TR_SIZE_SHIFT) -+#define TR_SIZE1 (1 << TR_SIZE_SHIFT) -+#define TR_SIZE2 (2 << TR_SIZE_SHIFT) -+#define TR_SIZE4 (3 << TR_SIZE_SHIFT) -+#define TR_SIZE8 (4 << TR_SIZE_SHIFT) -+ -+#define TR_64_BIT_ADDR (1 << 11) -+#define TR_LAST_TRANS (1 << 10) -+ -+#define TR_WRITEBLOCK_BIT (1 << 9) -+#define TR_WRITEBLOCK (TR_WRITEBLOCK_BIT | TR_SIZE8) -+ -+ -+#define TR_WRITEBLOCK_SIZE 64 -+ -+/* -+ * write-block -+ */ -+/* WriteBlock <8:7> Data type -+ <6:0> Part write size */ -+#define TR_TYPE_SHIFT 7 -+#define TR_TYPE_MASK ((1 << 2) - 1) -+ -+#define TR_TYPE_BYTE 0 -+#define TR_TYPE_SHORT 1 -+#define TR_TYPE_WORD 2 -+#define TR_TYPE_DWORD 3 -+ -+#define TR_PARTSIZE_MASK ((1 << 7) -1) -+ -+#define TR_WAIT_FOR_EOP (1 << 8) -+ -+/* -+ * trace-route format -+ */ -+#define TR_TRACEROUTE0_CHANID(val) ((val) & 1) /* 0 Chan Id */ -+#define TR_TRACEROUTE0_LINKID(val) (((val) >> 1) & 7) /* 1:3 Link Id */ -+#define TR_TRACEROUTE0_REVID(val) (((val) >> 4) & 7) /* 4:6 Revision ID */ -+#define TR_TRACEROUTE0_BCAST_TOP_PIN(val) (((val) >> 7) & 1) /* 7 Broadcast Top Pin (REV B) */ -+#define TR_TRACEROUTE0_LNR(val) ((val) >> 8) /* 8:15 Global Link Not Ready */ -+ -+#define TR_TRACEROUTE1_PRIO(val) ((val & 0xF)) /* 0:3 Arrival Priority (REV A) */ -+#define TR_TRACEROUTE1_AGE(val) (((val) >> 4) & 0xF) /* 4:7 Priority Held(Age) (REV A) */ -+#define TR_TRACEROUTE1_ROUTE_SELECTED(val) ((val) & 0xFF) /* 0:7 Arrival age (REV B) */ -+#define TR_TRACEROUTE1_BCAST_TOP(val) (((val) >> 8) & 7) /* 8:10 Broadcast Top */ -+#define TR_TRACEROUTE1_ADAPT(val) (((val) >> 12) & 3) /* 12:13 This Adaptive Value (REV A) */ -+#define TR_TRACEROUTE1_BCAST_BOT(val) (((val) >> 12) & 7) /* 12:14 Broadcast Bottom (REV B) */ -+ -+#define TR_TRACEROUTE2_ARRIVAL_AGE(val) ((val) & 0xF) /* 0:3 Arrival Age (REV B) */ -+#define TR_TRACEROUTE2_CURR_AGE(val) (((val) >> 4) & 0xF) /* 4:7 Current Age (REV B) */ -+#define TR_TRACEROUTE2_BUSY(val) (((val) >> 8) & 0xFF) /* 8:15 Busy (REV B) */ -+ -+#define TR_TRACEROUTE_SIZE 32 -+#define TR_TRACEROUTE_ENTRIES (TR_TRACEROUTE_SIZE/2) -+ -+/* -+ * non-write block -+ */ -+#define TR_OPCODE_MASK (((1 << 8) - 1) | \ -+ (TR_SIZE_MASK << TR_SIZE_SHIFT) | \ -+ TR_WRITEBLOCK_BIT) -+ -+#define TR_NOP_TRANS (0x0 | TR_SIZE0) -+#define TR_SETEVENT (0x0 | TR_SIZE0 | TR_SENDACK | TR_LAST_TRANS) -+#define TR_REMOTEDMA (0x1 | TR_SIZE4 | TR_SENDACK | TR_LAST_TRANS) -+#define TR_LOCKQUEUE (0x2 | TR_SIZE0) -+#define TR_UNLOCKQUEUE (0x3 | TR_SIZE0 | TR_SENDACK | TR_LAST_TRANS) -+ -+#define TR_SENDDISCARD (0x4 | TR_SIZE0) -+#define TR_TRACEROUTE (0x5 | TR_SIZE4) -+ -+#define TR_DMAIDENTIFY (0x6 | TR_SIZE0) -+#define TR_THREADIDENTIFY (0x7 | TR_SIZE1) -+ -+#define TR_GTE (0x8 | TR_SIZE1) -+#define TR_LT (0x9 | TR_SIZE1) -+#define TR_EQ (0xA | TR_SIZE1) -+#define TR_NEQ (0xB | TR_SIZE1) -+ -+#define TR_WRITEWORD (0xC | TR_SIZE1) -+#define TR_WRITEDOUBLEWORD (0xD | TR_SIZE1) -+#define TR_TESTANDWRITE (0xE | TR_SIZE1) -+#define TR_ATOMICADDWORD (0xF | TR_SIZE1 | TR_SENDACK | TR_LAST_TRANS) -+#define TR_OPCODE_TYPE_MASK 0xff -+ -+ -+#endif /* notdef _ELAN3_TRTYPE_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/include/elan3/urom_addrs.h linux-2.6.9/include/elan3/urom_addrs.h ---- clean/include/elan3/urom_addrs.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan3/urom_addrs.h 2002-07-12 10:28:21.000000000 -0400 -@@ -0,0 +1,262 @@ -+#define MI_WaitForRemoteDescRead 0x0 -+#define MI_WaitForRemoteDescRead2 0x1 -+#define MI_WaitForRemoteDescRead2_seq1 0x2 -+#define MI_SendRemoteDmaRoutes 0x3 -+#define MI_IProcTrapped 0x4 -+#define MI_DProcTrapped 0x5 -+#define MI_CProcTrapped 0x6 -+#define MI_TProcTrapped 0x7 -+#define MI_TestWhichDmaQueue 0x8 -+#define MI_TestWhichDmaQueue_seq1 0x9 -+#define MI_InputRemoteDmaUpdateBPtr 0xa -+#define MI_FixupQueueContextAndRemoteBit 0xb -+#define MI_FixupQueueContextAndRemoteBit_seq1 0xc -+#define MI_FixupQueueContextAndRemoteBit_seq2 0xd -+#define MI_FixupQueueContextAndRemoteBit_seq3 0xe -+#define MI_FixupQueueContextAndRemoteBit_seq4 0xf -+#define MI_RunDmaCommand 0x10 -+#define MI_DoSendRemoteDmaDesc 0x11 -+#define MI_DequeueNonSysCntxDma 0x12 -+#define MI_WaitForRemoteDescRead1 0x13 -+#define MI_RemoteDmaCommand 0x14 -+#define MI_WaitForRemoteRoutes 0x15 -+#define MI_DequeueSysCntxDma 0x16 -+#define MI_ExecuteDmaDescriptorForQueue 0x17 -+#define MI_ExecuteDmaDescriptor1 0x18 -+#define MI_ExecuteDmaDescriptor1_seq1 0x19 -+#define MI_ExecuteDmaDescriptor1_seq2 0x1a -+#define MI_ExecuteDmaDescriptor1_seq3 0x1b -+#define MI_GetNewSizeInProg 0x1c -+#define MI_GetNewSizeInProg_seq1 0x1d -+#define MI_FirstBlockRead 0x1e -+#define MI_ExtraFirstBlockRead 0x1f -+#define MI_UnimplementedError 0x20 -+#define MI_UpdateDescriptor 0x21 -+#define MI_UpdateDescriptor_seq1 0x22 -+#define MI_UpdateDescriptor_seq2 0x23 -+#define MI_UpdateDescriptor_seq3 0x24 -+#define MI_UpdateDescriptor_seq4 0x25 -+#define MI_UpdateDescriptor_seq5 0x26 -+#define MI_GetNextSizeInProg 0x27 -+#define MI_DoStopThisDma 0x28 -+#define MI_DoStopThisDma_seq1 0x29 -+#define MI_GenNewBytesToRead 0x2a -+#define MI_WaitForEventReadTy1 0x2b -+#define MI_WaitUpdateEvent 0x2c -+#define MI_WaitUpdateEvent_seq1 0x2d -+#define MI_DoSleepOneTickThenRunable 0x2e -+#define MI_RunEvent 0x2f -+#define MI_EnqueueThread 0x30 -+#define MI_CheckContext0 0x31 -+#define MI_EnqueueDma 0x32 -+#define MI_CprocTrapping 0x33 -+#define MI_CprocTrapping_seq1 0x34 -+#define MI_WaitForRemoteRoutes1 0x35 -+#define MI_SetEventCommand 0x36 -+#define MI_DoSetEvent 0x37 -+#define MI_DoRemoteSetEventNowOrTrapQueueingDma 0x38 -+#define MI_DoRemoteSetEventNowOrTrapQueueingDma_seq1 0x39 -+#define MI_SendRemoteDmaRoutes2 0x3a -+#define MI_WaitForRemoteRoutes2 0x3b -+#define MI_WaitEventCommandTy0 0x3c -+#define MI_DequeueNonSysCntxDma2 0x3d -+#define MI_WaitEventCommandTy1 0x3e -+#define MI_WaitEventCommandTy1_seq1 0x3f -+#define MI_DequeueNonSysCntxThread 0x40 -+#define MI_DequeueSysCntxDma1 0x41 -+#define MI_DequeueSysCntxThread 0x42 -+#define MI_TestNonSysCntxDmaQueueEmpty 0x43 -+#define MI_TestNonSysCntxDmaQueueEmpty_seq1 0x44 -+#define MI_TestNonSysCntxDmaQueueEmpty_seq2 0x45 -+#define MI_RunThreadCommand 0x46 -+#define MI_SetEventWaitForLastAcess 0x47 -+#define MI_SetEventReadWait 0x48 -+#define MI_SetEventReadWait_seq1 0x49 -+#define MI_TestEventType 0x4a -+#define MI_TestEventType_seq1 0x4b -+#define MI_TestEventBit2 0x4c -+#define MI_DmaDescOrBlockCopyOrChainedEvent 0x4d -+#define MI_RunThread 0x4e -+#define MI_RunThread1 0x4f -+#define MI_RunThread1_seq1 0x50 -+#define MI_IncDmaSysCntxBPtr 0x51 -+#define MI_IncDmaSysCntxBPtr_seq1 0x52 -+#define MI_IncDmaSysCntxBPtr_seq2 0x53 -+#define MI_WaitForCntxDmaDescRead 0x54 -+#define MI_FillInContext 0x55 -+#define MI_FillInContext_seq1 0x56 -+#define MI_WriteNewDescToQueue 0x57 -+#define MI_WriteNewDescToQueue_seq1 0x58 -+#define MI_TestForQueueWrap 0x59 -+#define MI_TestForQueueWrap_seq1 0x5a -+#define MI_TestQueueIsFull 0x5b -+#define MI_TestQueueIsFull_seq1 0x5c -+#define MI_TestQueueIsFull_seq2 0x5d -+#define MI_CheckPsychoShitFixup 0x5e -+#define MI_PsychoShitFixupForcedRead 0x5f -+#define MI_PrepareDMATimeSlice 0x60 -+#define MI_PrepareDMATimeSlice_seq1 0x61 -+#define MI_TProcRestartFromTrapOrTestEventBit2 0x62 -+#define MI_TProcRestartFromTrapOrTestEventBit2_seq1 0x63 -+#define MI_WaitForGlobalsRead 0x64 -+#define MI_WaitForNPCRead 0x65 -+#define MI_EventInterrupt 0x66 -+#define MI_EventInterrupt_seq1 0x67 -+#define MI_EventInterrupt_seq2 0x68 -+#define MI_EventInterrupt_seq3 0x69 -+#define MI_TestSysCntxDmaQueueEmpty 0x6a -+#define MI_TestSysCntxDmaQueueEmpty_seq1 0x6b -+#define MI_TestIfRemoteDesc 0x6c -+#define MI_DoDmaLocalSetEvent 0x6d -+#define MI_DoDmaLocalSetEvent_seq1 0x6e -+#define MI_DoDmaLocalSetEvent_seq2 0x6f -+#define MI_DmaLoop1 0x70 -+#define MI_ExitDmaLoop 0x71 -+#define MI_ExitDmaLoop_seq1 0x72 -+#define MI_RemoteDmaTestPAckType 0x73 -+#define MI_PacketDiscardOrTestFailRecIfCCis0 0x74 -+#define MI_PacketDiscardOrTestFailRecIfCCis0_seq1 0x75 -+#define MI_TestNackFailIsZero2 0x76 -+#define MI_TestNackFailIsZero3 0x77 -+#define MI_DmaFailCountError 0x78 -+#define MI_TestDmaForSysCntx 0x79 -+#define MI_TestDmaForSysCntx_seq1 0x7a -+#define MI_TestDmaForSysCntx_seq2 0x7b -+#define MI_TestAeqB2 0x7c -+#define MI_TestAeqB2_seq1 0x7d -+#define MI_GetNextDmaDescriptor 0x7e -+#define MI_DequeueSysCntxDma2 0x7f -+#define MI_InputSetEvent 0x80 -+#define MI_PutBackSysCntxDma 0x81 -+#define MI_PutBackSysCntxDma_seq1 0x82 -+#define MI_PutBackSysCntxDma_seq2 0x83 -+#define MI_InputRemoteDma 0x84 -+#define MI_InputRemoteDma_seq1 0x85 -+#define MI_WaitOneTickForWakeup1 0x86 -+#define MI_SendRemoteDmaDesc 0x87 -+#define MI_InputLockQueue 0x88 -+#define MI_CloseTheTrappedPacketIfCCis1 0x89 -+#define MI_CloseTheTrappedPacketIfCCis1_seq1 0x8a -+#define MI_PostDmaInterrupt 0x8b -+#define MI_InputUnLockQueue 0x8c -+#define MI_WaitForUnLockDescRead 0x8d -+#define MI_SendEOPforRemoteDma 0x8e -+#define MI_LookAtRemoteAck 0x8f -+#define MI_InputWriteBlockQueue 0x90 -+#define MI_WaitForSpStore 0x91 -+#define MI_TProcNext 0x92 -+#define MI_TProcStoppedRunning 0x93 -+#define MI_InputWriteBlock 0x94 -+#define MI_RunDmaOrDeqNonSysCntxDma 0x95 -+#define MI_ExecuteDmaDescriptorForRun 0x96 -+#define MI_ConfirmQueueLock 0x97 -+#define MI_DmaInputIdentify 0x98 -+#define MI_TProcStoppedRunning2 0x99 -+#define MI_TProcStoppedRunning2_seq1 0x9a -+#define MI_TProcStoppedRunning2_seq2 0x9b -+#define MI_ThreadInputIdentify 0x9c -+#define MI_InputIdWriteAddrAndType3 0x9d -+#define MI_IProcTrappedWriteStatus 0x9e -+#define MI_FinishTrappingEop 0x9f -+#define MI_InputTestTrans 0xa0 -+#define MI_TestAeqB3 0xa1 -+#define MI_ThreadUpdateNonSysCntxBack 0xa2 -+#define MI_ThreadQueueOverflow 0xa3 -+#define MI_RunContext0Thread 0xa4 -+#define MI_RunContext0Thread_seq1 0xa5 -+#define MI_RunContext0Thread_seq2 0xa6 -+#define MI_RunDmaDesc 0xa7 -+#define MI_RunDmaDesc_seq1 0xa8 -+#define MI_RunDmaDesc_seq2 0xa9 -+#define MI_TestAeqB 0xaa -+#define MI_WaitForNonCntxDmaDescRead 0xab -+#define MI_DmaQueueOverflow 0xac -+#define MI_BlockCopyEvent 0xad -+#define MI_BlockCopyEventReadBlock 0xae -+#define MI_BlockCopyWaitForReadData 0xaf -+#define MI_InputWriteWord 0xb0 -+#define MI_TraceSetEvents 0xb1 -+#define MI_TraceSetEvents_seq1 0xb2 -+#define MI_TraceSetEvents_seq2 0xb3 -+#define MI_InputWriteDoubleWd 0xb4 -+#define MI_SendLockTransIfCCis1 0xb5 -+#define MI_WaitForDmaRoutes1 0xb6 -+#define MI_LoadDmaContext 0xb7 -+#define MI_InputTestAndSetWord 0xb8 -+#define MI_InputTestAndSetWord_seq1 0xb9 -+#define MI_GetDestEventValue 0xba -+#define MI_SendDmaIdentify 0xbb -+#define MI_InputAtomicAddWord 0xbc -+#define MI_LoadBFromTransD0 0xbd -+#define MI_ConditionalWriteBackCCTrue 0xbe -+#define MI_WaitOneTickForWakeup 0xbf -+#define MI_SendFinalUnlockTrans 0xc0 -+#define MI_SendDmaEOP 0xc1 -+#define MI_GenLastAddrForPsycho 0xc2 -+#define MI_FailedAckIfCCis0 0xc3 -+#define MI_FailedAckIfCCis0_seq1 0xc4 -+#define MI_WriteDmaSysCntxDesc 0xc5 -+#define MI_TimesliceDmaQueueOverflow 0xc6 -+#define MI_DequeueNonSysCntxThread1 0xc7 -+#define MI_DequeueNonSysCntxThread1_seq1 0xc8 -+#define MI_TestThreadQueueEmpty 0xc9 -+#define MI_ClearThreadQueueIfCC 0xca -+#define MI_DequeueSysCntxThread1 0xcb -+#define MI_DequeueSysCntxThread1_seq1 0xcc -+#define MI_TProcStartUpGeneric 0xcd -+#define MI_WaitForPCload2 0xce -+#define MI_WaitForNPCWrite 0xcf -+#define MI_WaitForEventWaitAddr 0xd0 -+#define MI_WaitForWaitEventAccess 0xd1 -+#define MI_WaitForWaitEventAccess_seq1 0xd2 -+#define MI_WaitForWaitEventDesc 0xd3 -+#define MI_WaitForEventReadTy0 0xd4 -+#define MI_SendCondTestFail 0xd5 -+#define MI_InputMoveToNextTrans 0xd6 -+#define MI_ThreadUpdateSysCntxBack 0xd7 -+#define MI_FinishedSetEvent 0xd8 -+#define MI_EventIntUpdateBPtr 0xd9 -+#define MI_EventQueueOverflow 0xda -+#define MI_MaskLowerSource 0xdb -+#define MI_DmaLoop 0xdc -+#define MI_SendNullSetEvent 0xdd -+#define MI_SendFinalSetEvent 0xde -+#define MI_TestNackFailIsZero1 0xdf -+#define MI_DmaPacketTimedOutOrPacketError 0xe0 -+#define MI_NextPacketIsLast 0xe1 -+#define MI_TestForZeroLengthDma 0xe2 -+#define MI_WaitForPCload 0xe3 -+#define MI_ReadInIns 0xe4 -+#define MI_WaitForInsRead 0xe5 -+#define MI_WaitForLocals 0xe6 -+#define MI_WaitForOutsWrite 0xe7 -+#define MI_WaitForWaitEvWrBack 0xe8 -+#define MI_WaitForLockRead 0xe9 -+#define MI_TestQueueLock 0xea -+#define MI_InputIdWriteAddrAndType 0xeb -+#define MI_InputIdWriteAddrAndType2 0xec -+#define MI_ThreadInputIdentify2 0xed -+#define MI_WriteIntoTrapArea0 0xee -+#define MI_GenQueueBlockWrAddr 0xef -+#define MI_InputDiscardFreeLock 0xf0 -+#define MI_WriteIntoTrapArea1 0xf1 -+#define MI_WriteIntoTrapArea2 0xf2 -+#define MI_ResetBPtrToBase 0xf3 -+#define MI_InputDoTrap 0xf4 -+#define MI_RemoteDmaCntxt0Update 0xf5 -+#define MI_ClearQueueLock 0xf6 -+#define MI_IProcTrappedBlockWriteData 0xf7 -+#define MI_FillContextFilter 0xf8 -+#define MI_IProcTrapped4 0xf9 -+#define MI_RunSysCntxDma 0xfa -+#define MI_ChainedEventError 0xfb -+#define MI_InputTrappingEOP 0xfc -+#define MI_CheckForRunIfZero 0xfd -+#define MI_TestForBreakOrSuspend 0xfe -+#define MI_SwapForRunable 0xff -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/include/elan3/vmseg.h linux-2.6.9/include/elan3/vmseg.h ---- clean/include/elan3/vmseg.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan3/vmseg.h 2003-09-24 09:57:24.000000000 -0400 -@@ -0,0 +1,75 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef _VM_SEG_ELAN3_H -+#define _VM_SEG_ELAN3_H -+ -+#ident "$Id: vmseg.h,v 1.20 2003/09/24 13:57:24 david Exp $" -+/* $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/vmseg.h,v $*/ -+ -+#include -+ -+/* -+ * This segment maps Elan registers, it is fixed size and has 8K -+ * pages split up as follows -+ * -+ * ---------------------------------------- -+ * | Performance Counters (read-only) | -+ * ---------------------------------------- -+ * | Flag Page (read-only) | -+ * ---------------------------------------- -+ * | Command Port | -+ * ---------------------------------------- -+ */ -+typedef volatile struct elan3_flagstats -+{ -+ u_int CommandFlag; -+ u_int PageFaults; -+ u_int CProcTraps; -+ u_int DProcTraps; -+ u_int TProcTraps; -+ u_int IProcTraps; -+ u_int EopBadAcks; -+ u_int EopResets; -+ u_int DmaNetworkErrors; -+ u_int DmaIdentifyNetworkErrors; -+ u_int ThreadIdentifyNetworkErrors; -+ u_int DmaRetries; -+ u_int ThreadSystemCalls; -+ u_int ThreadElanCalls; -+ u_int LoadVirtualProcess; -+} ELAN3_FLAGSTATS; -+ -+#ifdef DIGITAL_UNIX -+typedef volatile union elan3_flagpage -+{ -+ u_char Padding[8192]; -+ ELAN3_FLAGSTATS Stats; -+} ELAN3_FLAGPAGE; -+ -+typedef volatile struct elan3_vmseg -+{ -+ E3_CommandPort CommandPort; -+ ELAN3_FLAGPAGE FlagPage; -+ E3_User_Regs UserRegs; -+} ELAN3_VMSEG; -+ -+#define SEGELAN3_SIZE (sizeof (ELAN3_VMSEG)) -+ -+#define SEGELAN3_COMMAND_PORT 0 -+#define SEGELAN3_FLAG_PAGE 1 -+#define SEGELAN3_PERF_COUNTERS 2 -+ -+#endif /* DIGITAL_UNIX */ -+ -+#endif /* _VM_SEG_ELAN3_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/include/elan3/vpd.h linux-2.6.9/include/elan3/vpd.h ---- clean/include/elan3/vpd.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan3/vpd.h 2002-08-09 07:23:34.000000000 -0400 -@@ -0,0 +1,47 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "$Id: vpd.h,v 1.5 2002/08/09 11:23:34 addy Exp $" -+/* $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/vpd.h,v $*/ -+ -+#ifndef __ELAN3_VPD_H -+#define __ELAN3_VPD_H -+ -+#define LARGE_RESOURCE_BIT 0x80 -+ -+#define SMALL_RESOURCE_COMPATIBLE_DEVICE_ID 0x3 -+#define SMALL_RESOURCE_VENDOR_DEFINED 0xE -+#define SMALL_RESOURCE_END_TAG 0xF -+ -+#define LARGE_RESOURCE_STRING 0x2 -+#define LARGE_RESOURCE_VENDOR_DEFINED 0x4 -+#define LARGE_RESOURCE_VITAL_PRODUCT_DATA 0x10 -+ -+#define VPD_PART_NUMBER "PN" -+#define VPD_FRU_PART_NUMBER "FN" -+#define VPD_EC_LEVEL "EC" -+#define VPD_MANUFACTURE_ID "MN" -+#define VPD_SERIAL_NUMBER "SN" -+ -+#define VPD_LOAD_ID "LI" -+#define VPD_ROM_LEVEL "RL" -+#define VPD_ALTERABLE_ROM_LEVEL "RM" -+#define VPD_NETWORK_ADDRESS "NA" -+#define VPD_DEVICE_DRIVER_LEVEL "DD" -+#define VPD_DIAGNOSTIC_LEVEL "DG" -+#define VPD_LOADABLE_MICROCODE_LEVEL "LL" -+#define VPD_VENDOR_ID "VI" -+#define VPD_FUNCTION_NUMBER "FU" -+#define VPD_SUBSYSTEM_VENDOR_ID "SI" -+ -+#endif /* __ELAN3_VPD_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/include/elan4/commands.h linux-2.6.9/include/elan4/commands.h ---- clean/include/elan4/commands.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan4/commands.h 2004-06-16 11:45:02.000000000 -0400 -@@ -0,0 +1,247 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __ELAN4_COMMANDS_H -+#define __ELAN4_COMMANDS_H -+ -+#ident "$Id: commands.h,v 1.29 2004/06/16 15:45:02 addy Exp $" -+/* $Source: /cvs/master/quadrics/elan4hdr/commands.h,v $*/ -+ -+/* -+ * This header file describes the command format for the Elan 4 -+ * See CommandFormat.doc -+ */ -+ -+/* -+ * Number of channels in traced elanlib_trace.c -+ */ -+#define TRACE_MAX_CHANNELS 2 -+ -+/* -+ * Define encoding for the commands issued into the command queues -+ */ -+#define RUN_THREAD_CMD 0x00 -+#define OPEN_STEN_PKT_CMD 0x01 -+#define WRITE_DWORD_CMD 0x02 -+#define ADD_DWORD_CMD 0x03 -+#define COPY64_CMD 0x05 -+#define GUARD_CMD 0x06 -+#define SET_EVENT_CMD 0x07 -+#define SEND_TRANS_CMD 0x09 -+#define INTERRUPT_CMD 0x0d -+#define RUN_DMA_CMD 0x0e -+#define SET_EVENTN_CMD 0x0f -+#define NOP_CMD 0x17 -+#define MAKE_EXT_CLEAN_CMD 0x37 -+#define WAIT_EVENT_CMD 0x1f -+ -+/* -+ * Define the portion of the data word the user is NOT -+ * allowed to use. This varies with Commmand type -+ */ -+#define RUN_THREAD_CMD_MASK 0x03 -+#define OPEN_STEN_PKT_CMD_MASK 0x0f -+#define WRITE_DWORD_CMD_MASK 0x07 -+#define ADD_DWORD_CMD_MASK 0x07 -+#define COPY64_CMD_MASK 0x0f -+#define GUARD_CMD_MASK 0x0f -+#define SET_EVENT_CMD_MASK 0x1f -+#define SEND_TRANS_CMD_MASK 0x1f -+#define INTERRUPT_CMD_MASK 0x0f -+#define RUN_DMA_CMD_MASK 0x0f -+#define SET_EVENTN_CMD_MASK 0x1f -+#define NOP_CMD_MASK 0x3f -+#define MAKE_EXT_CLEAN_MASK 0x3f -+#define WAIT_EVENT_CMD_MASK 0x1f -+ -+#define COPY64_DATA_TYPE_SHIFT 0x4 -+#define COPY64_DTYPE_BYTE (0 << COPY64_DATA_TYPE_SHIFT) -+#define COPY64_DTYPE_SHORT (1 << COPY64_DATA_TYPE_SHIFT) -+#define COPY64_DTYPE_WORD (2 << COPY64_DATA_TYPE_SHIFT) -+#define COPY64_DTYPE_LONG (3 << COPY64_DATA_TYPE_SHIFT) -+ -+/* -+ * SET_EVENTN - word 1 has following form -+ * [63:5] Event Address -+ * [4:0] Part Set Value. -+ */ -+#define SET_EVENT_PART_SET_MASK 0x1f -+ -+/* OPEN_STEN_PKT_CMD -+ * [63:32] Vproc -+ * [31] Use Test -+ * [30:28] unused -+ * [27:21] Test Acceptable PAck code -+ * [20:16] Test Ack Channel Number -+ * [15:9] Acceptable PAck code -+ * [8:4] Ack Channel Number (1 bit on Elan4) -+ * [3:0] Command type -+ */ -+/* Acceptable PAck code */ -+#define PACK_OK (1 << 0) -+#define PACK_TESTFAIL (1 << 1) -+#define PACK_DISCARD (1 << 2) -+#define RESTART_COUNT_ZERO (1 << 3) -+#define PACK_ERROR (1 << 7) -+#define PACK_TIMEOUT (1 << 8) -+ -+/* -+ *#ifndef USE_DIRTY_COMMANDS -+ *#define USE_DIRTY_COMMANDS -+ *#endif -+ */ -+#ifdef USE_DIRTY_COMMANDS -+#define OPEN_PACKET_USED_MASK 0x00000000780f00e0ULL -+#define SEND_TRANS_USED_MASK 0xffffffff0000fff0ULL -+#define COPY64_WRITE_USED_MASK 0x000000000000000fULL -+#define MAIN_INT_USED_MASK 0x0000000000003ff0ULL -+#define GUARD_USED_MASK 0xfffffe007000fde0ULL -+#define DMA_TYPESIZE_USED_MASK 0x000000000000fff0ULL -+#define SETEVENTN_USED_MASK 0xffffffffffffffe0ULL -+#define NOP_USED_MASK 0xffffffffffffffc0ULL -+#define EXT_CLEAN_USED_MASK 0xffffffffffffffc0ULL -+#define WAIT_CNT_TYPE_USED_MASK 0x00000000fffff800ULL -+#else -+#define OPEN_PACKET_USED_MASK 0x0ULL -+#define SEND_TRANS_USED_MASK 0x0ULL -+#define COPY64_WRITE_USED_MASK 0x0ULL -+#define MAIN_INT_USED_MASK 0x0ULL -+#define GUARD_USED_MASK 0x0ULL -+#define DMA_TYPESIZE_USED_MASK 0x0ULL -+#define SETEVENTN_USED_MASK 0x0ULL -+#define NOP_USED_MASK 0x0ULL -+#define EXT_CLEAN_USED_MASK 0x0ULL -+#define WAIT_CNT_TYPE_USED_MASK 0x0ULL -+#endif -+ -+#define OPEN_PACKET(chan, code, vproc) \ -+ ((((chan) & 1) << 4) | (((code) & 0x7f) << 9) | ((E4_uint64)(vproc) << 32) | OPEN_STEN_PKT_CMD) -+ -+#define OPEN_PACKET_TEST(chan, code, vproc, tchan, tcode) \ -+ ((((chan) & 1) << 4) | (((code) & 0x7f) << 9) | ((E4_uint64)(vproc) << 32) | \ -+ (((tchan) & 1) << 16) | (((tcode) & 0x7f) << 21) | (((E4_uint64) 1) << 31) | OPEN_STEN_PKT_CMD) -+ -+/* -+ * GUARD_CMD -+ * [63:41] unused -+ * [40] Reset Restart Fail Count // only performed if the Guard executes the next command. -+ * [39:32] New Restart Fail Count value -+ * [31] Use Test -+ * [30:28] unused -+ * [27:21] Test Acceptable PAck code -+ * [20:16] Test Ack Channel Number -+ * [15:9] unused -+ * [8:4] Ack Channel Number -+ * [3:0] Command type -+ */ -+/* GUARD_CHANNEL(chan) -+ */ -+#define GUARD_ALL_CHANNELS ((1 << 9) | GUARD_CMD) -+#define GUARD_CHANNEL(chan) ((((chan) & 1) << 4) | GUARD_CMD) -+#define GUARD_TEST(chan,code) ((1ull << 31) | (((code) & 0x7f) << 21) | (((chan) & 1) << 16)) -+#define GUARD_RESET(count) ((1ull << 40) | ((((E4_uint64) count) & 0xff) << 32)) -+ -+#define GUARD_CHANNEL_TEST(chan,tchan,tcode) \ -+ ((((chan) & 1) << 4) | (((tchan) & 1) << 16) | (((tcode) & 0x7f) << 21) | \ -+ (((E4_uint64) 1) << 31) | GUARD_CMD) -+ -+/* -+ * SEND_TRANS_CMD -+ * [63:32] unused -+ * [31:16] transaction type -+ * [15:4] unused -+ * [3:0] Command type -+ */ -+#define SEND_TRANS(TransType) (((TransType) << 16) | SEND_TRANS_CMD) -+ -+/* -+ * Command port trace debug levels -+ */ -+#define TRACE_CMD_BUFFER 0x01 -+#define TRACE_CMD_TYPE 0x02 -+#define TRACE_CHANNEL_OPENS 0x04 -+#define TRACE_GUARDED_ATOMICS 0x08 -+#define TRACE_CMD_TIMEOUT 0x10 -+ -+/* -+ * Commands that should be preceeded by a GUARD_CMD. -+ */ -+#define IS_ATOMIC_CMD(cmd) \ -+ ((cmd) == RUN_THREAD_CMD || (cmd) == ADD_DWORD_CMD || (cmd) == INTERRUPT_CMD || \ -+ (cmd) == RUN_DMA_CMD || (cmd) == SET_EVENT_CMD || (cmd) == SET_EVENTN_CMD || \ -+ (cmd) == WAIT_EVENT_CMD) -+ -+#ifndef _ASM -+ -+/* -+ * These structures are used to build event copy command streams. They are intended to be included -+ * in a larger structure to form a self documenting command sequence that can be easily coped and manipulated. -+ */ -+ -+typedef struct e4_runthreadcmd -+{ -+ E4_Addr PC; -+ E4_uint64 r[6]; -+} E4_RunThreadCmd; -+ -+typedef E4_uint64 E4_OpenCmd; -+ -+typedef struct e4_writecmd -+{ -+ E4_Addr WriteAddr; -+ E4_uint64 WriteValue; -+} E4_WriteCmd; -+ -+typedef struct e4_addcmd -+{ -+ E4_Addr AddAddr; -+ E4_uint64 AddValue; -+} E4_AddCmd; -+ -+typedef struct e4_copycmd -+{ -+ E4_Addr SrcAddr; -+ E4_Addr DstAddr; -+} E4_CopyCmd; -+ -+typedef E4_uint64 E4_GaurdCmd; -+typedef E4_uint64 E4_SetEventCmd; -+ -+/* -+ * The data to this command must be declared as a vector after the use of this. -+ */ -+typedef struct e4_sendtranscmd -+{ -+ E4_Addr Type; -+ E4_Addr Addr; -+} E4_SendTransCmd; -+ -+typedef E4_uint64 E4_IntCmd; -+ -+/* The normal Dma struc can be used here. */ -+ -+typedef struct e4_seteventncmd -+{ -+ E4_Addr Event; -+ E4_Addr SetCount; -+} E4_SetEventNCmd; -+ -+typedef E4_uint64 E4_NopCmd; -+typedef E4_uint64 E4_MakeExtCleanCmd; -+ -+typedef struct e4_waitcmd -+{ -+ E4_Addr ev_Event; -+ E4_Addr ev_CountType; -+ E4_Addr ev_Params[2]; -+} E4_WaitCmd; -+ -+#endif /* _ASM */ -+ -+#endif /* __ELAN4_COMMANDS_H */ -+ -diff -urN clean/include/elan4/debug.h linux-2.6.9/include/elan4/debug.h ---- clean/include/elan4/debug.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan4/debug.h 2005-03-23 06:06:15.000000000 -0500 -@@ -0,0 +1,112 @@ -+/* -+ * Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef _ELAN4_DEBUG_H -+#define _ELAN4_DEBUG_H -+ -+#ident "$Id: debug.h,v 1.21 2005/03/23 11:06:15 david Exp $" -+/* $Source: /cvs/master/quadrics/elan4mod/debug.h,v $ */ -+ -+/* values for "type" field - note a "ctxt" is permissible */ -+/* and BUFFER/CONSOLE are for explict calls to elan4_debugf() */ -+#define DBG_DEVICE ((void *) 0) -+#define DBG_USER ((void *) 1) -+ -+#define DBG_BUFFER ((void *) 62) -+#define DBG_CONSOLE ((void *) 63) -+#define DBG_NTYPES 64 -+ -+/* values for "mode" field */ -+#define DBG_CONFIG 0x00000001 -+#define DBG_INTR 0x00000002 -+#define DBG_MAININT 0x00000004 -+#define DBG_SDRAM 0x00000008 -+#define DBG_MMU 0x00000010 -+#define DBG_REGISTER 0x00000020 -+#define DBG_CQ 0x00000040 -+#define DBG_NETWORK_CTX 0x00000080 -+ -+#define DBG_FLUSH 0x00000100 -+#define DBG_FILE 0x00000200 -+#define DBG_CONTROL 0x00000400 -+#define DBG_MEM 0x00000800 -+ -+#define DBG_PERM 0x00001000 -+#define DBG_FAULT 0x00002000 -+#define DBG_SWAP 0x00004000 -+#define DBG_TRAP 0x00008000 -+#define DBG_DDCQ 0x00010000 -+#define DBG_VP 0x00020000 -+#define DBG_RESTART 0x00040000 -+#define DBG_RESUME 0x00080000 -+#define DBG_CPROC 0x00100000 -+#define DBG_DPROC 0x00200000 -+#define DBG_EPROC 0x00400000 -+#define DBG_IPROC 0x00800000 -+#define DBG_TPROC 0x01000000 -+#define DBG_IOPROC 0x02000000 -+#define DBG_ROUTE 0x04000000 -+#define DBG_NETERR 0x08000000 -+ -+#define DBG_ALL 0x7FFFFFFF -+ -+ -+#ifdef DEBUG_PRINTF -+ -+# define PRINTF0(type,m,fmt) ((elan4_debug&(m) || (type) == DBG_CONSOLE) ? elan4_debugf(type,m,fmt) : (void)0) -+# define PRINTF1(type,m,fmt,a) ((elan4_debug&(m) || (type) == DBG_CONSOLE) ? elan4_debugf(type,m,fmt,a) : (void)0) -+# define PRINTF2(type,m,fmt,a,b) ((elan4_debug&(m) || (type) == DBG_CONSOLE) ? elan4_debugf(type,m,fmt,a,b) : (void)0) -+# define PRINTF3(type,m,fmt,a,b,c) ((elan4_debug&(m) || (type) == DBG_CONSOLE) ? elan4_debugf(type,m,fmt,a,b,c) : (void)0) -+# define PRINTF4(type,m,fmt,a,b,c,d) ((elan4_debug&(m) || (type) == DBG_CONSOLE) ? elan4_debugf(type,m,fmt,a,b,c,d) : (void)0) -+# define PRINTF5(type,m,fmt,a,b,c,d,e) ((elan4_debug&(m) || (type) == DBG_CONSOLE) ? elan4_debugf(type,m,fmt,a,b,c,d,e) : (void)0) -+# define PRINTF6(type,m,fmt,a,b,c,d,e,f) ((elan4_debug&(m) || (type) == DBG_CONSOLE) ? elan4_debugf(type,m,fmt,a,b,c,d,e,f) : (void)0) -+# define PRINTF7(type,m,fmt,a,b,c,d,e,f,g) ((elan4_debug&(m) || (type) == DBG_CONSOLE) ? elan4_debugf(type,m,fmt,a,b,c,d,e,f,g) : (void)0) -+# define PRINTF8(type,m,fmt,a,b,c,d,e,f,g,h) ((elan4_debug&(m) || (type) == DBG_CONSOLE) ? elan4_debugf(type,m,fmt,a,b,c,d,e,f,g,h) : (void)0) -+# define PRINTF9(type,m,fmt,a,b,c,d,e,f,g,h,i) ((elan4_debug&(m) || (type) == DBG_CONSOLE) ? elan4_debugf(type,m,fmt,a,b,c,d,e,f,g,h,i): (void)0) -+#ifdef __GNUC__ -+# define PRINTF(type,m,args...) ((elan4_debug&(m) || (type) == DBG_CONSOLE) ? elan4_debugf(type,m, ##args) : (void)0) -+#endif -+# define DBGCMD(type,m,cmd) ((elan4_debug&(m) || (type) == DBG_CONSOLE) ? (void) (cmd) : (void) 0) -+ -+#else -+ -+# define PRINTF0(type,m,fmt) (0) -+# define PRINTF1(type,m,fmt,a) (0) -+# define PRINTF2(type,m,fmt,a,b) (0) -+# define PRINTF3(type,m,fmt,a,b,c) (0) -+# define PRINTF4(type,m,fmt,a,b,c,d) (0) -+# define PRINTF5(type,m,fmt,a,b,c,d,e) (0) -+# define PRINTF6(type,m,fmt,a,b,c,d,e,f) (0) -+# define PRINTF7(type,m,fmt,a,b,c,d,e,f,g) (0) -+# define PRINTF8(type,m,fmt,a,b,c,d,e,f,g,h) (0) -+# define PRINTF9(type,m,fmt,a,b,c,d,e,f,g,h,i) (0) -+#ifdef __GNUC__ -+# define PRINTF(type,m,args...) -+#endif -+# define DBGCMD(type,m,cmd) ((void) 0) -+ -+#endif /* DEBUG_PRINTF */ -+ -+extern unsigned elan4_debug; -+extern unsigned elan4_debug_toconsole; -+extern unsigned elan4_debug_tobuffer; -+extern unsigned elan4_debug_display_ctxt; -+extern unsigned elan4_debug_ignore_ctxt; -+extern unsigned elan4_debug_ignore_type; -+ -+extern void elan4_debug_init(void); -+extern void elan4_debug_fini(void); -+extern void elan4_debugf (void *type, int mode, char *fmt, ...); -+ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -+#endif /* _ELAN4_DEBUG_H */ -diff -urN clean/include/elan4/device.h linux-2.6.9/include/elan4/device.h ---- clean/include/elan4/device.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan4/device.h 2005-08-09 05:57:04.000000000 -0400 -@@ -0,0 +1,888 @@ -+/* -+ * Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __ELAN4_ELANDEV_H -+#define __ELAN4_ELANDEV_H -+ -+#ident "$Id: device.h,v 1.92.2.2 2005/08/09 09:57:04 mike Exp $" -+/* $Source: /cvs/master/quadrics/elan4mod/device.h,v $ */ -+ -+#include -+#include -+ -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+#include -+ -+#ifdef CONFIG_MPSAS -+#include -+#endif -+ -+#if defined(LINUX) -+#include -+#elif defined(TRU64UNIX) -+#include -+#elif defined(SOLARIS) -+#include -+#endif -+ -+/* -+ * Network context number allocation. -+ * [0] neterr fixup system context -+ * [1] kernel comms system context -+ * [2048-4095] kernel comms data contexts -+ */ -+#define ELAN4_NETERR_CONTEXT_NUM 0x00 /* network error fixup context number */ -+#define ELAN4_KCOMM_CONTEXT_NUM 0x01 /* kernel comms context number */ -+#define ELAN4_KCOMM_BASE_CONTEXT_NUM 0x800 /* kernel comms data transfer contexts */ -+#define ELAN4_KCOMM_TOP_CONTEXT_NUM 0xfff -+ -+#define ELAN4_SYSTEM_CONTEXT(ctx) ((ctx) >= ELAN4_KCOMM_BASE_CONTEXT_NUM) -+ -+typedef void (ELAN4_HALTFN)(struct elan4_dev *dev, void *arg); -+ -+typedef struct elan4_haltop -+{ -+ struct list_head op_link; /* chain on a list */ -+ E4_uint32 op_mask; /* Interrupt mask to see before calling function */ -+ -+ ELAN4_HALTFN *op_function; /* function to call */ -+ void *op_arg; /* arguement to pass to function */ -+} ELAN4_HALTOP; -+ -+typedef void (ELAN4_DMA_FLUSHFN)(struct elan4_dev *dev, void *arg, int qfull); -+ -+typedef struct elan4_dma_flushop -+{ -+ struct list_head op_link; /* chain on a list */ -+ ELAN4_DMA_FLUSHFN *op_function; /* function to call */ -+ void *op_arg; /* arguement to pass to function */ -+} ELAN4_DMA_FLUSHOP; -+ -+typedef void (ELAN4_INTFN)(struct elan4_dev *dev, void *arg); -+ -+typedef struct elan4_intop -+{ -+ struct list_head op_link; /* chain on a list */ -+ ELAN4_INTFN *op_function; /* function to call */ -+ void *op_arg; /* arguement to pass to function */ -+ E4_uint64 op_cookie; /* and main interrupt cookie */ -+} ELAN4_INTOP; -+ -+typedef struct elan4_eccerrs -+{ -+ E4_uint64 EccStatus; -+ E4_uint64 ConfigReg; -+ E4_uint32 ErrorCount; -+} ELAN4_ECCERRS; -+ -+#define SDRAM_MIN_BLOCK_SHIFT 10 -+#define SDRAM_NUM_FREE_LISTS 19 /* allows max 256 Mb block */ -+#define SDRAM_MIN_BLOCK_SIZE (1 << SDRAM_MIN_BLOCK_SHIFT) -+#define SDRAM_MAX_BLOCK_SIZE (SDRAM_MIN_BLOCK_SIZE << (SDRAM_NUM_FREE_LISTS-1)) -+ -+#if PAGE_SHIFT < 13 -+#define SDRAM_PAGE_SIZE 8192 -+#define SDRAM_PGOFF_OFFSET 1 -+#define SDRAM_PGOFF_MASK (~SDRAM_PGOFF_OFFSET) -+#else -+#define SDRAM_PAGE_SIZE PAGE_SIZE -+#define SDRAM_PGOFF_OFFSET 0 -+#define SDRAM_PGOFF_MASK (~SDRAM_PGOFF_OFFSET) -+#endif -+ -+typedef struct elan4_sdram -+{ -+ sdramaddr_t b_base; /* offset in sdram bar */ -+ unsigned b_size; /* size of bank */ -+ ioaddr_t b_ioaddr; /* ioaddr where mapped into the kernel */ -+ ELAN4_MAP_HANDLE b_handle; /* and mapping handle */ -+ bitmap_t *b_bitmaps[SDRAM_NUM_FREE_LISTS]; /* buddy allocator bitmaps */ -+} ELAN4_SDRAM_BANK; -+ -+/* command queue */ -+typedef struct elan4_cq -+{ -+ struct elan4_cqa *cq_cqa; /* command queue allocator this belongs to */ -+ unsigned cq_idx; /* and which command queue this is */ -+ -+ sdramaddr_t cq_space; /* sdram backing up command queue */ -+ unsigned cq_size; /* size value */ -+ unsigned cq_perm; /* permissions */ -+ ioaddr_t cq_mapping; /* mapping of command queue page */ -+ ELAN4_MAP_HANDLE cq_handle; /* and mapping handle */ -+} ELAN4_CQ; -+ -+/* cqtype flags to elan4_alloccq() */ -+#define CQ_Priority (1 << 0) -+#define CQ_Reorder (1 << 1) -+ -+/* command queues are allocated in chunks,so that all the -+ * command ports are in a single system page */ -+#define ELAN4_CQ_PER_CQA MAX(1, (PAGESIZE/CQ_CommandMappingSize)) -+ -+/* maximum number of command queues per context */ -+#define ELAN4_MAX_CQA (256 / ELAN4_CQ_PER_CQA) -+ -+typedef struct elan4_cqa -+{ -+ struct list_head cqa_link; /* linked together */ -+ bitmap_t cqa_bitmap[BT_BITOUL(ELAN4_CQ_PER_CQA)]; /* bitmap of which are free */ -+ unsigned int cqa_type; /* allocation type */ -+ unsigned int cqa_cqnum; /* base cq number */ -+ unsigned int cqa_ref; /* "mappings" to a queue */ -+ unsigned int cqa_idx; /* index number */ -+ ELAN4_CQ cqa_cq[ELAN4_CQ_PER_CQA]; /* command queue entries */ -+} ELAN4_CQA; -+ -+#define elan4_cq2num(cq) ((cq)->cq_cqa->cqa_cqnum + (cq)->cq_idx) -+#define elan4_cq2idx(cq) ((cq)->cq_cqa->cqa_idx * ELAN4_CQ_PER_CQA + (cq)->cq_idx) -+ -+#ifdef ELAN4_LARGE_PAGE_SUPPORT -+#define NUM_HASH_TABLES 2 -+#else -+#define NUM_HASH_TABLES 1 -+#endif -+ -+typedef struct elan4_ctxt_trans_index -+{ -+ int tbl; -+ int index; -+} ELAN4_CTXT_TRANS_INDEX; -+ -+#define ELAN4_CTXT_MAX_SHUFFLE (10) -+ -+typedef struct elan4_ctxt -+{ -+ struct elan4_dev *ctxt_dev; /* device we're associated with */ -+ struct list_head ctxt_link; /* chained on device */ -+ -+ struct elan4_trap_ops *ctxt_ops; /* client specific operations */ -+ -+ unsigned int ctxt_features; /* device features this context is using */ -+ signed int ctxt_num; /* local context number */ -+ -+ struct list_head ctxt_cqalist; /* link list of command queue allocators */ -+ bitmap_t ctxt_cqamap[BT_BITOUL(ELAN4_MAX_CQA)]; /* bitmap for allocating cqa_idx */ -+ -+ ELAN4_HASH_ENTRY **ctxt_mmuhash[NUM_HASH_TABLES]; /* software hash tables */ -+ spinlock_t ctxt_mmulock; /* and spinlock. */ -+ -+ struct proc_dir_entry *procdir; -+ ELAN4_CTXT_TRANS_INDEX trans_index[NUM_HASH_TABLES]; /* place to stash info for /proc/qsnet/elan4/deviceN/ctxt/N/translations_N */ -+ -+ int shuffle_needed[NUM_HASH_TABLES]; /* true when there are entries in shuffle array */ -+ int shuffle[NUM_HASH_TABLES][ELAN4_CTXT_MAX_SHUFFLE]; /* hashidx's that need shuffling or -1 = none. if all set then shuffle ALL hashidx's */ -+} ELAN4_CTXT; -+ -+typedef struct elan4_trap_ops -+{ -+ void (*op_eproc_trap) (ELAN4_CTXT *ctxt, E4_uint64 status); -+ void (*op_cproc_trap) (ELAN4_CTXT *ctxt, E4_uint64 status, unsigned cqnum); -+ void (*op_dproc_trap) (ELAN4_CTXT *ctxt, E4_uint64 status, unsigned unit); -+ void (*op_tproc_trap) (ELAN4_CTXT *ctxt, E4_uint64 status); -+ void (*op_iproc_trap) (ELAN4_CTXT *ctxt, E4_uint64 status, unsigned unit); -+ void (*op_interrupt) (ELAN4_CTXT *ctxt, E4_uint64 cookie); -+ void (*op_neterrmsg) (ELAN4_CTXT *ctxt, ELAN4_NETERR_MSG *msg); -+ void (*op_need_shuffle) (ELAN4_CTXT *ctxt, int tbl, int index); -+} ELAN4_TRAP_OPS; -+ -+typedef struct elan4_route_table -+{ -+ spinlock_t tbl_lock; -+ unsigned tbl_size; -+ sdramaddr_t tbl_entries; -+} ELAN4_ROUTE_TABLE; -+ -+#define DEV_STASH_ROUTE_COUNT 20 -+ -+typedef struct elan4_route_ringbuf { -+ int start; -+ int end; -+ E4_VirtualProcessEntry routes[DEV_STASH_ROUTE_COUNT]; -+} ELAN4_ROUTE_RINGBUF; -+ -+#define elan4_ringbuf_init(ringbuf) memset(&ringbuf, 0, sizeof(ELAN4_ROUTE_RINGBUF)); -+ -+typedef struct elan4_trans_index -+{ -+ int tbl; -+} ELAN4_TRANS_INDEX; -+ -+#define ELAN4_TRANS_STATS_NUM_BUCKETS 7 -+typedef struct elan4_trans_stats -+{ -+ int tbl; -+ int buckets[ELAN4_TRANS_STATS_NUM_BUCKETS]; -+} ELAN4_TRANS_STATS; -+ -+typedef struct elan4_dev -+{ -+ ELAN4_CTXT dev_ctxt; /* context for device operations */ -+ -+ ELAN4_DEV_OSDEP dev_osdep; /* OS specific entries */ -+ -+ int dev_instance; /* device number */ -+ ELAN_DEVINFO dev_devinfo; /* device information (revision etc */ -+ ELAN_POSITION dev_position; /* position connected to switch */ -+ ELAN_DEV_IDX dev_idx; /* device idx registered with elanmod */ -+ -+ kmutex_t dev_lock; /* lock for device state/references */ -+ unsigned dev_state; /* device state */ -+ unsigned dev_references; /* # references */ -+ -+ ioaddr_t dev_regs; /* Mapping of device registers */ -+ ELAN4_MAP_HANDLE dev_regs_handle; -+ ioaddr_t dev_rom; /* Mapping of rom */ -+ ELAN4_MAP_HANDLE dev_rom_handle; -+ ioaddr_t dev_i2c; /* Mapping of I2C registers */ -+ ELAN4_MAP_HANDLE dev_i2c_handle; -+ -+ E4_uint64 dev_sdram_cfg; /* SDRAM config value (from ROM) */ -+ E4_uint64 dev_sdram_initial_ecc_val; /* power on ECC register value */ -+ int dev_sdram_numbanks; /* # banks of sdram */ -+ ELAN4_SDRAM_BANK dev_sdram_banks[SDRAM_MAX_BANKS]; /* Mapping of sdram banks */ -+ spinlock_t dev_sdram_lock; /* spinlock for buddy allocator */ -+ sdramaddr_t dev_sdram_freelists[SDRAM_NUM_FREE_LISTS]; -+ unsigned dev_sdram_freecounts[SDRAM_NUM_FREE_LISTS]; -+ -+ physaddr_t dev_regs_phys; /* physaddr of registers */ -+ physaddr_t dev_sdram_phys; /* and of sdram */ -+ -+ sdramaddr_t dev_cacheflush_space; /* sdram reserved for cache flush operation */ -+ -+ sdramaddr_t dev_faultarea; /* fault areas for each unit */ -+ sdramaddr_t dev_inputtraparea; /* trap area for trapped transactions */ -+ sdramaddr_t dev_ctxtable; /* context table (E4_ContextControlBlock) */ -+ int dev_ctxtableshift; /* and size (in bits) */ -+ -+ E4_uint32 dev_syscontrol; /* copy of system control register */ -+ spinlock_t dev_syscontrol_lock; /* spinlock to sequentialise modifications */ -+ unsigned dev_direct_map_pci_writes; /* # counts for CONT_DIRECT_MAP_PCI_WRITES */ -+ -+ volatile E4_uint32 dev_intmask; /* copy of interrupt mask register */ -+ spinlock_t dev_intmask_lock; /* spinlock to sequentialise modifications */ -+ -+ /* i2c section */ -+ spinlock_t dev_i2c_lock; /* spinlock for i2c operations */ -+ unsigned int dev_i2c_led_disabled; /* count of reasons led auto update disabled */ -+ -+ /* mmu section */ -+ unsigned dev_pagesizeval[NUM_HASH_TABLES]; /* page size value */ -+ unsigned dev_pageshift[NUM_HASH_TABLES]; /* pageshift in bits. */ -+ unsigned dev_hashsize[NUM_HASH_TABLES]; /* # entries in mmu hash table */ -+ sdramaddr_t dev_hashtable[NUM_HASH_TABLES]; /* mmu hash table */ -+ ELAN4_HASH_ENTRY *dev_mmuhash[NUM_HASH_TABLES]; /* and software shadow */ -+ ELAN4_HASH_ENTRY *dev_mmufree_count; /* space holder - will use to indicate if there is a free slot in chain */ -+ ELAN4_HASH_ENTRY *dev_mmufreelist; /* and free blocks */ -+ spinlock_t dev_mmulock; -+ E4_uint16 dev_topaddr[4]; /* top address values */ -+ unsigned char dev_topaddrvalid; -+ unsigned char dev_topaddrmode; -+ unsigned char dev_pteval; /* allow setting of relaxed order/dont snoop attributes */ -+ -+ unsigned dev_rsvd_hashmask[NUM_HASH_TABLES]; -+ unsigned dev_rsvd_hashval[NUM_HASH_TABLES]; -+ -+ ELAN4_TRANS_INDEX trans_index[NUM_HASH_TABLES]; /* place to stash info for /proc/qsnet/elan4/deviceN/translations_N */ -+ ELAN4_TRANS_STATS trans_stats[NUM_HASH_TABLES]; /* place to stash info for /proc/qsnet/elan4/deviceN/stats/translations_N */ -+ /* run queues */ -+ sdramaddr_t dev_comqlowpri; /* CProc low & high pri run queues */ -+ sdramaddr_t dev_comqhighpri; -+ -+ sdramaddr_t dev_dmaqlowpri; /* DProc,TProc,Interrupt queues */ -+ sdramaddr_t dev_dmaqhighpri; -+ sdramaddr_t dev_threadqlowpri; -+ sdramaddr_t dev_threadqhighpri; -+ sdramaddr_t dev_interruptq; -+ -+ E4_uint32 dev_interruptq_nfptr; /* cache next main interrupt fptr */ -+ struct list_head dev_interruptq_list; /* list of operations to call when space in interruptq*/ -+ -+ /* command queue section */ -+ sdramaddr_t dev_cqaddr; /* SDRAM address of command queues */ -+ unsigned dev_cqoffset; /* offset for command queue alignment constraints */ -+ unsigned dev_cqcount; /* number of command queue descriptors */ -+ bitmap_t *dev_cqamap; /* bitmap for allocation */ -+ spinlock_t dev_cqlock; /* spinlock to protect bitmap */ -+ unsigned dev_cqreorder; /* offset for first re-ordering queue with mtrr */ -+ -+ /* halt operation section */ -+ struct list_head dev_haltop_list; /* list of operations to call when units halted */ -+ E4_uint32 dev_haltop_mask; /* mask of which ones to halt */ -+ E4_uint32 dev_haltop_active; /* mask of which haltops are executing */ -+ spinlock_t dev_haltop_lock; /* and their spinlock */ -+ struct timer_list dev_haltop_timer; /* timer looking for haltop locked in list */ -+ -+ struct { -+ struct list_head list; /* list of halt operations for DMAs */ -+ ELAN4_CQ *cq; /* and command queue's */ -+ ELAN4_INTOP intop; /* and main interrupt op */ -+ E4_uint64 status; /* status register (when waiting for intop)*/ -+ } dev_dma_flushop[2]; -+ -+ unsigned dev_halt_all_count; /* count of reasons to halt all units */ -+ unsigned dev_halt_lowpri_count; /* count of reasons to halt lowpri queues */ -+ unsigned dev_halt_cproc_count; /* count of reasons to halt command processor */ -+ unsigned dev_halt_dproc_count; /* count of reasons to halt dma processor */ -+ unsigned dev_halt_tproc_count; /* count of reasons to halt thread processor */ -+ unsigned dev_discard_all_count; /* count of reasons to discard all packets */ -+ unsigned dev_discard_lowpri_count; /* count of reasons to discard non-system packets */ -+ unsigned dev_discard_highpri_count; /* count of reasons to discard system packets */ -+ -+ E4_uint32 dev_schedstatus; /* copy of schedule status register */ -+ -+ /* local context allocation section */ -+ spinlock_t dev_ctxlock; /* spinlock to protect bitmap */ -+ bitmap_t *dev_ctxmap; /* bitmap for local context allocation */ -+ -+ spinlock_t dev_ctxt_lock; /* spinlock to protect context list */ -+ struct list_head dev_ctxt_list; /* linked list of contexts */ -+ -+ /* locks to sequentialise interrupt handling */ -+ spinlock_t dev_trap_lock; /* spinlock while handling a trap */ -+ spinlock_t dev_requeue_lock; /* spinlock sequentialising cproc requeue */ -+ -+ /* error rate interrupt section */ -+ long dev_error_time; /* lbolt at start of sampling period */ -+ unsigned dev_errors_per_period; /* errors so far this sampling period */ -+ timer_fn_t dev_error_timeoutid; /* timeout to re-enable error interrupts */ -+ timer_fn_t dev_linkerr_timeoutid; /* timeout to clear link error led */ -+ -+ /* kernel threads */ -+ unsigned dev_stop_threads:1; /* kernel threads should exit */ -+ -+ /* main interrupt thread */ -+ kcondvar_t dev_mainint_wait; /* place for mainevent interrupt thread to sleep */ -+ spinlock_t dev_mainint_lock; /* and it's spinlock */ -+ unsigned dev_mainint_started:1; -+ unsigned dev_mainint_stopped:1; -+ -+ /* device context - this is used to flush insert cache/instruction cache/dmas & threads */ -+ ELAN4_CPROC_TRAP dev_cproc_trap; /* space to extract cproc trap into */ -+ -+ struct list_head dev_intop_list; /* list of main interrupt operations */ -+ spinlock_t dev_intop_lock; /* and spinlock */ -+ E4_uint64 dev_intop_cookie; /* and next cookie to use */ -+ -+ spinlock_t dev_flush_lock; /* spinlock for flushing */ -+ kcondvar_t dev_flush_wait; /* and place to sleep */ -+ -+ ELAN4_CQ *dev_flush_cq[COMMAND_INSERTER_CACHE_ENTRIES]; /* command queues to flush the insert cache */ -+ ELAN4_INTOP dev_flush_op[COMMAND_INSERTER_CACHE_ENTRIES]; /* and a main interrupt operation for each one */ -+ unsigned dev_flush_finished; /* flush command finished */ -+ -+ ELAN4_HALTOP dev_iflush_haltop; /* halt operation for icache flush */ -+ unsigned dev_iflush_queued:1; /* icache haltop queued */ -+ -+ ELAN4_ROUTE_TABLE *dev_routetable; /* virtual process table (for dma queue flush)*/ -+ sdramaddr_t dev_sdrampages[2]; /* pages of sdram to hold suspend code sequence */ -+ E4_Addr dev_tproc_suspend; /* st8suspend instruction */ -+ E4_Addr dev_tproc_space; /* and target memory */ -+ -+ sdramaddr_t dev_neterr_inputq; /* network error input queue descriptor & event */ -+ sdramaddr_t dev_neterr_slots; /* network error message slots */ -+ ELAN4_CQ *dev_neterr_msgcq; /* command queue for sending messages */ -+ ELAN4_CQ *dev_neterr_intcq; /* command queue for message received interrupt */ -+ ELAN4_INTOP dev_neterr_intop; /* and it's main interrupt operation */ -+ E4_uint64 dev_neterr_queued; /* # message queued in msgcq */ -+ spinlock_t dev_neterr_lock; /* and spinlock .... */ -+ -+ ELAN4_DEV_STATS dev_stats; /* device statistics */ -+ ELAN4_ECCERRS dev_sdramerrs[30]; /* last few sdram errors for procfs */ -+ -+ unsigned int *dev_ack_errors; /* Map of source of dproc ack errors */ -+ ELAN4_ROUTE_RINGBUF dev_ack_error_routes; -+ unsigned int *dev_dproc_timeout; /* Ditto dproc timeout errors */ -+ ELAN4_ROUTE_RINGBUF dev_dproc_timeout_routes; -+ unsigned int *dev_cproc_timeout; /* Ditto cproc timeout errors */ -+ ELAN4_ROUTE_RINGBUF dev_cproc_timeout_routes; -+ -+ unsigned dev_linkerr_signalled; /* linkerror signalled to switch controller */ -+ -+ struct list_head dev_hc_list; /* list of the allocated hash_chunks */ -+ -+ ELAN4_IPROC_TRAP dev_iproc_trap; /* space for iproc trap */ -+} ELAN4_DEV; -+ -+/* values for dev_state */ -+#define ELAN4_STATE_STOPPED (1 << 0) /* device initialised but not started */ -+#define ELAN4_STATE_STARTING (1 << 1) /* device in process of starting */ -+#define ELAN4_STATE_STARTED (1 << 2) /* device started */ -+#define ELAN4_STATE_STOPPING (1 << 3) /* device in process of stopping */ -+ -+extern __inline__ unsigned long long -+__elan4_readq (ELAN4_DEV *dev, ioaddr_t addr) -+{ -+#if defined(__i386) -+ if (dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] & ELAN4_FEATURE_64BIT_READ) -+ { -+ uint64_t save[2]; -+ uint64_t rval; -+ unsigned long flags, cr0; -+ -+ local_irq_save (flags); -+ -+ /* Save FPU state */ -+ asm volatile("mov %%cr0,%0 ; clts\n" : "=r" (cr0)); -+ -+ /* GNAT 7726: Save 128-bit xmm0 register value */ -+ asm volatile ("movupd %%xmm0,%0\n" : "=m" (save[0])); -+ -+ /* Do a 64-bit PCI read */ -+ asm volatile ("sfence\n" -+ "movq (%1), %%xmm0\n" -+ "movq %%xmm0, %0\n" -+ "sfence\n" -+ : "=m" (rval) : "r" (addr) : "memory"); -+ -+ /* GNAT 7726: Restore 128-bit xmm0 register value */ -+ asm volatile("movupd %0,%%xmm0\n" : : "m" (save[0])); -+ -+ /* Restore FPU state */ -+ asm volatile("mov %0,%%cr0\n" : : "r" (cr0)); -+ -+ local_irq_restore(flags); -+ -+ return rval; -+ } -+#endif -+ return readq ((void *)addr); -+} -+ -+extern __inline__ unsigned int -+__elan4_readl (ELAN4_DEV *dev, ioaddr_t addr) -+{ -+ if (dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] & ELAN4_FEATURE_64BIT_READ) -+ { -+ uint64_t val = __elan4_readq (dev, ((unsigned long) addr & ~7)); -+ return ((val >> (((unsigned long) addr & 7) << 3)) & 0xffffffff); -+ } -+ return readl ((void *)addr); -+} -+ -+extern __inline__ unsigned int -+__elan4_readw (ELAN4_DEV *dev, ioaddr_t addr) -+{ -+ if (dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] & ELAN4_FEATURE_64BIT_READ) -+ { -+ uint64_t val = __elan4_readq (dev, ((unsigned long) addr & ~7)); -+ return ((val >> (((unsigned long) addr & 7) << 3)) & 0xffff); -+ } -+ return readw ((void *)addr); -+} -+ -+extern __inline__ unsigned int -+__elan4_readb (ELAN4_DEV *dev, ioaddr_t addr) -+{ -+ if (dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] & ELAN4_FEATURE_64BIT_READ) -+ { -+ uint64_t val = __elan4_readq (dev, ((unsigned long) addr & ~7)); -+ return ((val >> (((unsigned long) addr & 7) << 3)) & 0xff); -+ } -+ return readb ((void *)addr); -+} -+ -+/* macros for accessing dev->dev_regs.Tags. */ -+#define write_tag(dev,what,val) writeq (val, (void *) (dev->dev_regs + offsetof (E4_Registers, Tags.what))) -+#define read_tag(dev,what) __elan4_readq (dev, dev->dev_regs + offsetof (E4_Registers, Tags.what)) -+ -+/* macros for accessing dev->dev_regs.Regs. */ -+#define write_reg64(dev,what,val) writeq (val, (void *) (dev->dev_regs + offsetof (E4_Registers, Regs.what))) -+#define write_reg32(dev,what,val) writel (val, (void *) (dev->dev_regs + offsetof (E4_Registers, Regs.what))) -+#define read_reg64(dev,what) __elan4_readq (dev, dev->dev_regs + offsetof (E4_Registers, Regs.what)) -+#define read_reg32(dev,what) __elan4_readl (dev, dev->dev_regs + offsetof (E4_Registers, Regs.what)) -+ -+/* macros for accessing dev->dev_regs.uRegs. */ -+#define write_ureg64(dev,what,val) writeq (val, (void *) (dev->dev_regs + offsetof (E4_Registers, uRegs.what))) -+#define write_ureg32(dev,what,val) writel (val, (void *) (dev->dev_regs + offsetof (E4_Registers, uRegs.what))) -+#define read_ureg64(dev,what) __elan4_readq (dev, dev->dev_regs + offsetof (E4_Registers, uRegs.what)) -+#define read_ureg32(dev,what) __elan4_readl (dev, dev->dev_regs + offsetof (E4_Registers, uRegs.what)) -+ -+/* macros for accessing dev->dev_i2c */ -+#define write_i2c(dev,what,val) writeb (val, (void *) (dev->dev_i2c + offsetof (E4_I2C, what))) -+#define read_i2c(dev,what) __elan4_readb (dev, dev->dev_i2c + offsetof (E4_I2C, what)) -+ -+/* macros for accessing dev->dev_rom */ -+#define read_ebus_rom(dev,off) __elan4_readb (dev, dev->dev_rom + off) -+ -+/* PIO flush operations - ensure writes to registers/sdram are ordered */ -+#ifdef CONFIG_IA64_SGI_SN2 -+#define pioflush_reg(dev) read_reg32(dev,InterruptReg) -+#define pioflush_sdram(dev) elan4_sdram_readl(dev, 0) -+#else -+#define pioflush_reg(dev) mb() -+#define pioflush_sdram(dev) mb() -+#endif -+ -+/* macros for manipulating the interrupt mask register */ -+#define SET_INT_MASK(dev,value) \ -+do { \ -+ write_reg32(dev, InterruptMask, (dev)->dev_intmask = (value)); \ -+ pioflush_reg(dev);\ -+} while (0) -+ -+#define CHANGE_INT_MASK(dev, value) \ -+do { \ -+ if ((dev)->dev_intmask != (value)) \ -+ {\ -+ write_reg32 (dev, InterruptMask, (dev)->dev_intmask = (value));\ -+ pioflush_reg(dev);\ -+ }\ -+} while (0) -+ -+#define ENABLE_INT_MASK(dev,value) \ -+do { \ -+ unsigned long flags; \ -+ \ -+ spin_lock_irqsave (&(dev)->dev_intmask_lock, flags); \ -+ write_reg32(dev, InterruptMask, (dev)->dev_intmask |= (value)); \ -+ pioflush_reg(dev);\ -+ spin_unlock_irqrestore (&(dev)->dev_intmask_lock, flags); \ -+} while (0) -+ -+#define DISABLE_INT_MASK(dev,value) \ -+do { \ -+ unsigned long flags; \ -+ \ -+ spin_lock_irqsave (&(dev)->dev_intmask_lock, flags); \ -+ write_reg32(dev, InterruptMask, (dev)->dev_intmask &= ~(value)); \ -+ pioflush_reg(dev);\ -+ spin_unlock_irqrestore (&(dev)->dev_intmask_lock, flags); \ -+} while (0) -+ -+#define SET_SYSCONTROL(dev,what,value) \ -+do { \ -+ unsigned long flags; \ -+\ -+ spin_lock_irqsave (&(dev)->dev_syscontrol_lock, flags); \ -+ if ((dev)->what++ == 0) \ -+ write_reg64 (dev, SysControlReg, (dev)->dev_syscontrol |= (value)); \ -+ pioflush_reg(dev);\ -+ spin_unlock_irqrestore (&(dev)->dev_syscontrol_lock, flags); \ -+} while (0) -+ -+#define CLEAR_SYSCONTROL(dev,what,value) \ -+do { \ -+ unsigned long flags; \ -+\ -+ spin_lock_irqsave (&(dev)->dev_syscontrol_lock, flags); \ -+ if (--(dev)->what == 0)\ -+ write_reg64 (dev, SysControlReg, (dev)->dev_syscontrol &= ~(value)); \ -+ pioflush_reg (dev); \ -+ spin_unlock_irqrestore (&(dev)->dev_syscontrol_lock, flags); \ -+} while (0) -+ -+#define PULSE_SYSCONTROL(dev,value) \ -+do { \ -+ unsigned long flags; \ -+\ -+ spin_lock_irqsave (&(dev)->dev_syscontrol_lock, flags); \ -+ write_reg64 (dev, SysControlReg, (dev)->dev_syscontrol | (value)); \ -+ pioflush_reg (dev); \ -+ spin_unlock_irqrestore (&(dev)->dev_syscontrol_lock, flags); \ -+} while (0) -+ -+#define CHANGE_SYSCONTROL(dev,add,sub) \ -+do { \ -+ unsigned long flags; \ -+\ -+ spin_lock_irqsave (&(dev)->dev_syscontrol_lock, flags); \ -+ dev->dev_syscontrol |= (add);\ -+ dev->dev_syscontrol &= ~(sub);\ -+ write_reg64 (dev, SysControlReg, (dev)->dev_syscontrol);\ -+ pioflush_reg (dev); \ -+ spin_unlock_irqrestore (&(dev)->dev_syscontrol_lock, flags); \ -+} while (0) -+ -+#define SET_SCHED_STATUS(dev, value)\ -+do {\ -+ write_reg32 (dev, SchedStatus.Status, (dev)->dev_schedstatus = (value));\ -+ pioflush_reg (dev);\ -+} while (0) -+ -+#define CHANGE_SCHED_STATUS(dev, value)\ -+do {\ -+ if ((dev)->dev_schedstatus != (value))\ -+ {\ -+ write_reg32 (dev, SchedStatus.Status, (dev)->dev_schedstatus = (value));\ -+ pioflush_reg (dev);\ -+ }\ -+} while (0) -+ -+#define PULSE_SCHED_RESTART(dev,value)\ -+do {\ -+ write_reg32 (dev, SchedStatus.Restart, value);\ -+ pioflush_reg (dev);\ -+} while (0) -+ -+/* device context elan address space */ -+#define DEVICE_TPROC_SUSPEND_ADDR (0x1000000000000000ull) -+#define DEVICE_TPROC_SPACE_ADDR (0x1000000000000000ull + SDRAM_PAGE_SIZE) -+#if defined(__LITTLE_ENDIAN__) -+# define DEVICE_TPROC_SUSPEND_INSTR 0xd3f040c0 /* st64suspend %r16, [%r1] */ -+#else -+# define DEVICE_TPROC_SUSPEND_INSTR 0xc040f0d3 /* st64suspend %r16, [%r1] */ -+#endif -+ -+#define DEVICE_NETERR_INPUTQ_ADDR (0x2000000000000000ull) -+#define DEVICE_NETERR_INTCQ_ADDR (0x2000000000000000ull + SDRAM_PAGE_SIZE) -+#define DEVICE_NETERR_SLOTS_ADDR (0x2000000000000000ull + SDRAM_PAGE_SIZE*2) -+ -+/* -+ * Interrupt operation cookie space -+ * [50:48] type -+ * [47:0] value -+ */ -+#define INTOP_PERSISTENT (0x1000000000000ull) -+#define INTOP_ONESHOT (0x2000000000000ull) -+#define INTOP_TYPE_MASK (0x3000000000000ull) -+#define INTOP_VALUE_MASK (0x0ffffffffffffull) -+ -+/* functions for accessing sdram - sdram.c */ -+extern unsigned char elan4_sdram_readb (ELAN4_DEV *dev, sdramaddr_t ptr); -+extern unsigned short elan4_sdram_readw (ELAN4_DEV *dev, sdramaddr_t ptr); -+extern unsigned int elan4_sdram_readl (ELAN4_DEV *dev, sdramaddr_t ptr); -+extern unsigned long long elan4_sdram_readq (ELAN4_DEV *dev, sdramaddr_t ptr); -+extern void elan4_sdram_writeb (ELAN4_DEV *dev, sdramaddr_t ptr, unsigned char val); -+extern void elan4_sdram_writew (ELAN4_DEV *dev, sdramaddr_t ptr, unsigned short val); -+extern void elan4_sdram_writel (ELAN4_DEV *dev, sdramaddr_t ptr, unsigned int val); -+extern void elan4_sdram_writeq (ELAN4_DEV *dev, sdramaddr_t ptr, unsigned long long val); -+ -+extern void elan4_sdram_zerob_sdram (ELAN4_DEV *dev, sdramaddr_t ptr, int nbytes); -+extern void elan4_sdram_zerow_sdram (ELAN4_DEV *dev, sdramaddr_t ptr, int nbytes); -+extern void elan4_sdram_zerol_sdram (ELAN4_DEV *dev, sdramaddr_t ptr, int nbytes); -+extern void elan4_sdram_zeroq_sdram (ELAN4_DEV *dev, sdramaddr_t ptr, int nbytes); -+ -+extern void elan4_sdram_copyb_from_sdram (ELAN4_DEV *dev, sdramaddr_t from, void *to, int nbytes); -+extern void elan4_sdram_copyw_from_sdram (ELAN4_DEV *dev, sdramaddr_t from, void *to, int nbytes); -+extern void elan4_sdram_copyl_from_sdram (ELAN4_DEV *dev, sdramaddr_t from, void *to, int nbytes); -+extern void elan4_sdram_copyq_from_sdram (ELAN4_DEV *dev, sdramaddr_t from, void *to, int nbytes); -+extern void elan4_sdram_copyb_to_sdram (ELAN4_DEV *dev, void *from, sdramaddr_t to, int nbytes); -+extern void elan4_sdram_copyw_to_sdram (ELAN4_DEV *dev, void *from, sdramaddr_t to, int nbytes); -+extern void elan4_sdram_copyl_to_sdram (ELAN4_DEV *dev, void *from, sdramaddr_t to, int nbytes); -+extern void elan4_sdram_copyq_to_sdram (ELAN4_DEV *dev, void *from, sdramaddr_t to, int nbytes); -+ -+/* device.c - configuration */ -+extern unsigned int elan4_hash_0_size_val; -+extern unsigned int elan4_hash_1_size_val; -+extern unsigned int elan4_ctxt_table_shift; -+extern unsigned int elan4_ln2_max_cqs; -+extern unsigned int elan4_dmaq_highpri_size; -+extern unsigned int elan4_threadq_highpri_size; -+extern unsigned int elan4_dmaq_lowpri_size; -+extern unsigned int elan4_threadq_lowpri_size; -+extern unsigned int elan4_interruptq_size; -+extern unsigned int elan4_mainint_punt_loops; -+extern unsigned int elan4_mainint_resched_ticks; -+extern unsigned int elan4_linkport_lock; -+extern unsigned int elan4_eccerr_recheck; -+ -+/* device.c */ -+extern void elan4_set_schedstatus (ELAN4_DEV *dev, E4_uint32 intreg); -+extern void elan4_queue_haltop (ELAN4_DEV *dev, ELAN4_HALTOP *op); -+extern void elan4_queue_intop (ELAN4_DEV *dev, ELAN4_CQ *cq, ELAN4_INTOP *op); -+extern void elan4_register_intop (ELAN4_DEV *dev, ELAN4_INTOP *op); -+extern void elan4_deregister_intop (ELAN4_DEV *dev, ELAN4_INTOP *op); -+extern void elan4_queue_dma_flushop (ELAN4_DEV *dev, ELAN4_DMA_FLUSHOP *op, int hipri); -+extern void elan4_queue_mainintop (ELAN4_DEV *dev, ELAN4_INTOP *op); -+ -+extern int elan4_1msi0 (ELAN4_DEV *dev); -+ -+extern int elan4_insertctxt (ELAN4_DEV *dev, ELAN4_CTXT *ctxt, ELAN4_TRAP_OPS *ops); -+extern void elan4_removectxt (ELAN4_DEV *dev, ELAN4_CTXT *ctxt); -+extern ELAN4_CTXT *elan4_localctxt (ELAN4_DEV *dev, unsigned num); -+extern ELAN4_CTXT *elan4_networkctxt (ELAN4_DEV *dev, unsigned num); -+ -+extern int elan4_attach_filter (ELAN4_CTXT *ctxt, unsigned int ctxnum); -+extern void elan4_detach_filter (ELAN4_CTXT *ctxt, unsigned int ctxnum); -+extern void elan4_set_filter (ELAN4_CTXT *ctxt, unsigned int ctxnum, E4_uint32 state); -+extern void elan4_set_routetable (ELAN4_CTXT *ctxt, ELAN4_ROUTE_TABLE *tbl); -+ -+extern ELAN4_CQA * elan4_getcqa (ELAN4_CTXT *ctxt, unsigned int idx); -+extern void elan4_putcqa (ELAN4_CTXT *ctxt, unsigned int idx); -+extern ELAN4_CQ *elan4_alloccq (ELAN4_CTXT *ctxt, unsigned cqsize, unsigned cqperm, unsigned cqtype); -+extern void elan4_freecq (ELAN4_CTXT *ctxt, ELAN4_CQ *cq); -+extern void elan4_restartcq (ELAN4_DEV *dev, ELAN4_CQ *cq); -+extern void elan4_flushcq (ELAN4_DEV *dev, ELAN4_CQ *cq); -+extern void elan4_updatecq (ELAN4_DEV *dev, ELAN4_CQ *cq, unsigned perm, unsigned restart); -+ -+extern void elan4_flush_icache (ELAN4_CTXT *ctxt); -+extern void elan4_flush_icache_halted (ELAN4_CTXT *ctxt); -+ -+extern int elan4_initialise_device (ELAN4_DEV *dev); -+extern void elan4_finalise_device (ELAN4_DEV *dev); -+extern int elan4_start_device (ELAN4_DEV *dev); -+extern void elan4_stop_device (ELAN4_DEV *dev); -+ -+extern int elan4_compute_position (ELAN_POSITION *pos, unsigned nodeid, unsigned numnodes, unsigned aritiyval); -+extern int elan4_get_position (ELAN4_DEV *dev, ELAN_POSITION *pos); -+extern int elan4_set_position (ELAN4_DEV *dev, ELAN_POSITION *pos); -+extern void elan4_get_params (ELAN4_DEV *dev, ELAN_PARAMS *params, unsigned short *mask); -+extern void elan4_set_params (ELAN4_DEV *dev, ELAN_PARAMS *params, unsigned short mask); -+ -+ -+extern int elan4_read_vpd(ELAN4_DEV *dev, unsigned char *tag, unsigned char *result) ; -+ -+extern void proc_insertctxt(ELAN4_DEV *dev,ELAN4_CTXT *ctxt); -+extern void proc_removectxt(ELAN4_DEV *dev,ELAN4_CTXT *ctxt); -+ -+extern int elan4_route2str (E4_VirtualProcessEntry *route, char *routeStr); -+extern void elan4_hardware_lock_check(ELAN4_DEV *dev, char *from); -+ -+/* device_osdep.c */ -+extern unsigned int elan4_pll_cfg; -+extern int elan4_pll_div; -+extern int elan4_mod45disable; -+extern int assfail_mode; -+ -+extern int elan4_pciinit (ELAN4_DEV *dev); -+extern void elan4_pcifini (ELAN4_DEV *dev); -+extern void elan4_updatepll (ELAN4_DEV *dev, unsigned int val); -+extern void elan4_pcierror (ELAN4_DEV *dev); -+ -+extern ELAN4_DEV *elan4_reference_device (int instance, int state); -+extern void elan4_dereference_device (ELAN4_DEV *dev); -+ -+extern ioaddr_t elan4_map_device (ELAN4_DEV *dev, unsigned bar, unsigned off, unsigned size, ELAN4_MAP_HANDLE *handlep); -+extern void elan4_unmap_device (ELAN4_DEV *dev, ioaddr_t ptr, unsigned size, ELAN4_MAP_HANDLE *handlep); -+extern unsigned long elan4_resource_len (ELAN4_DEV *dev, unsigned bar); -+ -+extern void elan4_configure_writecombining (ELAN4_DEV *dev); -+extern void elan4_unconfigure_writecombining (ELAN4_DEV *dev); -+ -+/* i2c.c */ -+extern int i2c_disable_auto_led_update (ELAN4_DEV *dev); -+extern void i2c_enable_auto_led_update (ELAN4_DEV *dev); -+extern int i2c_write (ELAN4_DEV *dev, unsigned int addr, unsigned int count, unsigned char *data); -+extern int i2c_read (ELAN4_DEV *dev, unsigned int addr, unsigned int count, unsigned char *data); -+extern int i2c_writereg (ELAN4_DEV *dev, unsigned int addr, unsigned int reg, unsigned int count, unsigned char *data); -+extern int i2c_readreg (ELAN4_DEV *dev, unsigned int addr, unsigned int reg, unsigned int count, unsigned char *data); -+extern int i2c_read_rom (ELAN4_DEV *dev, unsigned int addr, unsigned int count, unsigned char *data); -+ -+#if defined(__linux__) -+/* procfs_Linux.c */ -+extern void elan4_procfs_device_init (ELAN4_DEV *dev); -+extern void elan4_procfs_device_fini (ELAN4_DEV *dev); -+extern void elan4_procfs_init(void); -+extern void elan4_procfs_fini(void); -+ -+extern struct proc_dir_entry *elan4_procfs_root; -+extern struct proc_dir_entry *elan4_config_root; -+#endif -+ -+/* sdram.c */ -+extern void elan4_sdram_init (ELAN4_DEV *dev); -+extern void elan4_sdram_fini (ELAN4_DEV *dev); -+extern void elan4_sdram_setup_delay_lines (ELAN4_DEV *dev, int factor); -+extern int elan4_sdram_init_bank (ELAN4_DEV *dev, ELAN4_SDRAM_BANK *bank); -+extern void elan4_sdram_fini_bank (ELAN4_DEV *dev, ELAN4_SDRAM_BANK *bank); -+extern void elan4_sdram_add_bank (ELAN4_DEV *dev, ELAN4_SDRAM_BANK *bank); -+extern sdramaddr_t elan4_sdram_alloc (ELAN4_DEV *dev, int nbytes); -+extern void elan4_sdram_free (ELAN4_DEV *dev, sdramaddr_t ptr, int nbytes); -+extern void elan4_sdram_flushcache (ELAN4_DEV *dev, sdramaddr_t base, int nbytes); -+extern char *elan4_sdramerr2str (ELAN4_DEV *dev, E4_uint64 status, E4_uint64 ConfigReg, char *str); -+ -+/* traps.c */ -+extern void elan4_display_eproc_trap (void *type, int mode, char *str, ELAN4_EPROC_TRAP *trap); -+extern void elan4_display_cproc_trap (void *type, int mode, char *str, ELAN4_CPROC_TRAP *trap); -+extern void elan4_display_dproc_trap (void *type, int mode, char *str, ELAN4_DPROC_TRAP *trap); -+extern void elan4_display_tproc_trap (void *type, int mode, char *str, ELAN4_TPROC_TRAP *trap); -+extern void elan4_display_iproc_trap (void *type, int mode, char *str, ELAN4_IPROC_TRAP *trap); -+ -+ -+extern void elan4_extract_eproc_trap (ELAN4_DEV *dev, E4_uint64 status, ELAN4_EPROC_TRAP *trap, int iswaitevent); -+extern void elan4_extract_cproc_trap (ELAN4_DEV *dev, E4_uint64 status, ELAN4_CPROC_TRAP *trap, unsigned cqnum); -+extern void elan4_extract_dproc_trap (ELAN4_DEV *dev, E4_uint64 status, ELAN4_DPROC_TRAP *trap, unsigned unit); -+extern void elan4_extract_tproc_trap (ELAN4_DEV *dev, E4_uint64 status, ELAN4_TPROC_TRAP *trap); -+extern void elan4_extract_iproc_trap (ELAN4_DEV *dev, E4_uint64 status, ELAN4_IPROC_TRAP *trap, unsigned unit); -+extern void elan4_ringbuf_store(ELAN4_ROUTE_RINGBUF *ringbuf, E4_VirtualProcessEntry *route, ELAN4_DEV *dev); -+extern int cproc_open_extract_vp (ELAN4_DEV *dev, ELAN4_CQ *cq, int chan); -+ -+extern void elan4_inspect_iproc_trap (ELAN4_IPROC_TRAP *trap); -+extern E4_uint64 elan4_trapped_open_command (ELAN4_DEV *dev, ELAN4_CQ *cq); -+ -+/* mmu.c */ -+extern void elan4mmu_flush_tlb (ELAN4_DEV *dev); -+extern ELAN4_HASH_ENTRY *elan4mmu_ptealloc (ELAN4_CTXT *ctxt, int tbl, E4_Addr vaddr, unsigned int *tagidxp); -+extern int elan4mmu_pteload (ELAN4_CTXT *ctxt, int tbl, E4_Addr vaddr, int type, E4_uint64 pte); -+extern int elan4mmu_pteload_page (ELAN4_CTXT *ctxt, int tbl, E4_Addr vaddr, struct page *page, int perm); -+extern void elan4mmu_pteunload (ELAN4_CTXT *ctxt, ELAN4_HASH_ENTRY *he, unsigned int tagidx, unsigned int pteidx); -+extern void elan4mmu_unload_range (ELAN4_CTXT *ctxt, int tbl, E4_Addr start, unsigned long len); -+extern void elan4mmu_invalidate_ctxt (ELAN4_CTXT *ctxt); -+ -+extern ELAN4_HASH_CACHE *elan4mmu_reserve (ELAN4_CTXT *ctxt, int tbl, E4_Addr start, unsigned int npages, int cansleep); -+extern void elan4mmu_release (ELAN4_CTXT *ctxt, ELAN4_HASH_CACHE *hc); -+extern void elan4mmu_set_pte (ELAN4_CTXT *ctxt, ELAN4_HASH_CACHE *hc, unsigned int idx, E4_uint64 newpte); -+extern E4_uint64 elan4mmu_get_pte (ELAN4_CTXT *ctxt, ELAN4_HASH_CACHE *hc, unsigned int idx); -+extern void elan4mmu_clear_pte (ELAN4_CTXT *ctxt, ELAN4_HASH_CACHE *hc, unsigned int idx); -+ -+extern int elan4mmu_display_mmuhash(ELAN4_DEV *dev, int tlb, int *index, char *page, int count); -+extern int elan4mmu_display_ctxt_mmuhash(ELAN4_CTXT *ctxt, int tlb, int *index, char *page, int count); -+extern int elan4mmu_display_bucket_mmuhash(ELAN4_DEV *dev, int tlb, int *buckets, int nBuckets, char *page, int count); -+extern void elan4mmu_do_shuffle(ELAN4_CTXT *ctxt, int tbl); -+extern void elan4mmu_set_shuffle(ELAN4_CTXT *ctxt, int tbl, int hashidx); -+ -+/* mmu_osdep.c */ -+extern int elan4mmu_sdram_aliascheck (ELAN4_CTXT *ctxt, E4_Addr addr, sdramaddr_t phys); -+extern int elan4mmu_alloc_topaddr (ELAN4_DEV *dev, physaddr_t paddr, unsigned type); -+extern E4_uint64 elan4mmu_phys2pte (ELAN4_DEV *dev, physaddr_t phys, unsigned perm); -+extern physaddr_t elan4mmu_pte2phys (ELAN4_DEV *dev, E4_uint64 pte); -+ -+/* neterr.c */ -+extern int elan4_neterr_init (ELAN4_DEV *dev); -+extern void elan4_neterr_destroy (ELAN4_DEV *dev); -+extern int elan4_neterr_sendmsg (ELAN4_DEV *dev, unsigned int nodeid, unsigned int retries, ELAN4_NETERR_MSG *msg); -+extern int elan4_neterr_iproc_trap (ELAN4_DEV *dev, ELAN4_IPROC_TRAP *trap); -+ -+/* routetable.c */ -+extern ELAN4_ROUTE_TABLE *elan4_alloc_routetable (ELAN4_DEV *dev, unsigned size); -+extern void elan4_free_routetable (ELAN4_DEV *dev, ELAN4_ROUTE_TABLE *tbl); -+extern void elan4_write_route (ELAN4_DEV *dev, ELAN4_ROUTE_TABLE *tbl, unsigned vp, E4_VirtualProcessEntry *entry); -+extern void elan4_read_route (ELAN4_DEV *dev, ELAN4_ROUTE_TABLE *tbl, unsigned vp, E4_VirtualProcessEntry *entry); -+extern void elan4_invalidate_route (ELAN4_DEV *dev, ELAN4_ROUTE_TABLE *tbl, unsigned vp); -+extern int elan4_generate_route (ELAN_POSITION *pos, E4_VirtualProcessEntry *route, unsigned ctxnum, -+ unsigned lowid, unsigned highid, unsigned options); -+extern int elan4_check_route (ELAN_POSITION *pos, ELAN_LOCATION location, E4_VirtualProcessEntry *route, unsigned flags); -+ -+/* user.c */ -+extern int __categorise_command (E4_uint64 command, int *cmdSize); -+extern int __whole_command (sdramaddr_t *commandPtr, sdramaddr_t insertPtr, unsigned int cqSize, unsigned int cmdSize); -+ -+/* debug.c */ -+extern int elan4_assfail (ELAN4_CTXT *ctxt, const char *ex, const char *func, const char *file, const int line); -+extern int elan4_debug_trigger (ELAN4_CTXT *ctxt, const char *func, const char *file, const int line, const char *fmt, ...); -+ -+#if defined(DEBUG_ASSERT) -+#define ELAN4_ASSERT(ctxt,EXPR) do { \ -+ if (!(EX) && elan4_assfail (ctxt, #EXPR, __FUNCTION__, __FILE__, __LINE__)) { \ -+ BUG(); \ -+ } \ -+} while (0) -+#else -+#define ELAN4_ASSERT(ctxt,EXPR) ((void) 0) -+#endif -+ -+#define ELAN4_DEBUG_TRIGGER(ctxt,fmt, args...) do {\ -+ if (elan4_debug_trigger (ctxt, __FUNCTION__, __FILE__, __LINE__, fmt, ##args)) \ -+ BUG();\ -+} while (0) -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -+#endif /* __ELAN4_ELANDEV_H */ -diff -urN clean/include/elan4/device_Linux.h linux-2.6.9/include/elan4/device_Linux.h ---- clean/include/elan4/device_Linux.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan4/device_Linux.h 2005-04-05 11:29:28.000000000 -0400 -@@ -0,0 +1,118 @@ -+/* -+ * Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __ELAN4_ELANDEV_LINUX_H -+#define __ELAN4_ELANDEV_LINUX_H -+ -+#ident "$Id: device_Linux.h,v 1.26 2005/04/05 15:29:28 robin Exp $" -+/* $Source: /cvs/master/quadrics/elan4mod/device_Linux.h,v $*/ -+ -+#include -+ -+#if !defined(NO_COPROC) /* The older coproc kernel patch is applied */ -+#include -+ -+#define ioproc_ops coproc_ops_struct -+#define ioproc_register_ops register_coproc_ops -+#define ioproc_unregister_ops unregister_coproc_ops -+ -+#define IOPROC_MM_STRUCT_ARG 1 -+#define IOPROC_PATCH_APPLIED 1 -+ -+#elif !defined(NO_IOPROC) /* The new ioproc kernel patch is applied */ -+#include -+ -+#define IOPROC_PATCH_APPLIED 1 -+#endif -+ -+ -+#if defined(MPSAS) -+#include -+#endif -+ -+#if defined(CONFIG_DEVFS_FS) -+#include -+#endif -+ -+#define ELAN4_MAJOR 61 -+#define ELAN4_NAME "elan4" -+#define ELAN4_MAX_CONTROLLER 16 /* limited to 4 bits */ -+ -+/* OS dependant component of ELAN4_DEV struct */ -+typedef struct elan4_dev_osdep -+{ -+ struct pci_dev *pdev; /* PCI config data */ -+ -+ struct proc_dir_entry *procdir; -+ struct proc_dir_entry *configdir; -+ struct proc_dir_entry *statsdir; -+ struct proc_dir_entry *ctxtdir; -+ -+#if defined(CONFIG_DEVFS_FS) -+ devfs_handle_t devfs_control; -+ devfs_handle_t devfs_sdram; -+ devfs_handle_t devfs_user; -+#endif -+ -+#if defined(CONFIG_MTRR) -+ int sdram_mtrr; -+ int regs_mtrr; -+#endif -+} ELAN4_DEV_OSDEP; -+ -+/* /dev/elan/rmsX */ -+ -+/* /dev/elan4/controlX */ -+typedef struct control_private -+{ -+ struct elan4_dev *pr_dev; -+ unsigned pr_boundary_scan; -+} CONTROL_PRIVATE; -+ -+/* /dev/elan4/sdramX */ -+typedef struct mem_page -+{ -+ struct mem_page *pg_next; -+ sdramaddr_t pg_addr; -+ unsigned long pg_pgoff; -+ unsigned pg_ref; -+} MEM_PAGE; -+ -+#define MEM_HASH_SIZE 32 -+#define MEM_HASH(pgoff) ((pgoff) & (MEM_HASH_SIZE-1)) -+ -+typedef struct mem_private -+{ -+ struct elan4_dev *pr_dev; -+ MEM_PAGE *pr_pages[MEM_HASH_SIZE]; -+ spinlock_t pr_lock; -+} MEM_PRIVATE; -+ -+/* /dev/elan4/userX */ -+typedef struct user_private -+{ -+ atomic_t pr_ref; -+ struct user_ctxt *pr_uctx; -+ struct mm_struct *pr_mm; -+ -+#if defined(IOPROC_PATCH_APPLIED) -+ struct ioproc_ops pr_ioproc; -+#endif -+} USER_PRIVATE; -+ -+/* No mapping handles on linux */ -+typedef void *ELAN4_MAP_HANDLE; -+ -+#define ELAN4_TASK_HANDLE() ((unsigned long) current->mm) -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -+#endif /* __ELAN4_ELANDEV_LINUX_H */ -diff -urN clean/include/elan4/dma.h linux-2.6.9/include/elan4/dma.h ---- clean/include/elan4/dma.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan4/dma.h 2003-09-04 08:39:17.000000000 -0400 -@@ -0,0 +1,82 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __ELAN4_DMA_H -+#define __ELAN4_DMA_H -+ -+#ident "$Id: dma.h,v 1.16 2003/09/04 12:39:17 david Exp $" -+/* $Source: /cvs/master/quadrics/elan4hdr/dma.h,v $*/ -+ -+#include -+ -+/* Alignment for a DMA descriptor */ -+#define E4_DMA_ALIGN (64) -+ -+/* Maximum size of a single DMA ((1 << 31)-1) */ -+#define E4_MAX_DMA_SIZE (0x7fffffff) -+ -+/* -+ * dma_typeSize -+ * -+ * [63:32] Size -+ * [31] unused -+ * [30] IsRemote -+ * [29] QueueWrite -+ * [28] ShmemWrite -+ * [27:26] DataType -+ * [25] Broadcast -+ * [24] AlignPackets -+ * [23:16] FailCount -+ * [15:14] unused -+ * [13:0] Context -+ */ -+ -+#define DMA_FailCount(val) (((val) & 0xff) << 16) -+#define DMA_AlignPackets (1 << 24) -+#define DMA_Broadcast (1 << 25) -+#define DMA_ShMemWrite (1 << 28) -+#define DMA_QueueWrite (1 << 29) -+#define DMA_IsRemote (1 << 30) -+#define DMA_Context(val) ((unsigned) (val) & 0x3ff) -+#define DMA_ContextMask 0x3fffull -+#define Dma_TypeSizeMask 0xfffffffffff00000ull -+ -+#define DMA_DataTypeByte (E4_DATATYPE_BYTE << 26) -+#define DMA_DataTypeShort (E4_DATATYPE_SHORT << 26) -+#define DMA_DataTypeWord (E4_DATATYPE_WORD << 26) -+#define DMA_DataTypeLong (E4_DATATYPE_DWORD << 26) -+ -+#define E4_DMA_TYPE_SIZE(size, dataType, flags, failCount) \ -+ ((((E4_uint64)(size)) << 32) | ((dataType) & DMA_DataTypeLong) | \ -+ (flags) | DMA_FailCount(failCount)) -+ -+typedef volatile struct e4_dma -+{ -+ E4_uint64 dma_typeSize; -+ E4_uint64 dma_cookie; -+ E4_uint64 dma_vproc; -+ E4_Addr dma_srcAddr; -+ E4_Addr dma_dstAddr; -+ E4_Addr dma_srcEvent; -+ E4_Addr dma_dstEvent; -+} E4_DMA; -+ -+/* Same as above but padded to 64-bytes */ -+typedef volatile struct e4_dma64 -+{ -+ E4_uint64 dma_typeSize; -+ E4_uint64 dma_cookie; -+ E4_uint64 dma_vproc; -+ E4_Addr dma_srcAddr; -+ E4_Addr dma_dstAddr; -+ E4_Addr dma_srcEvent; -+ E4_Addr dma_dstEvent; -+ E4_Addr dma_pad; -+} E4_DMA64; -+ -+#endif /* __ELAN4_DMA_H */ -diff -urN clean/include/elan4/events.h linux-2.6.9/include/elan4/events.h ---- clean/include/elan4/events.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan4/events.h 2004-06-23 07:07:18.000000000 -0400 -@@ -0,0 +1,179 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __ELAN4_EVENTS_H -+#define __ELAN4_EVENTS_H -+ -+#ident "$Id: events.h,v 1.22 2004/06/23 11:07:18 addy Exp $" -+/* $Source: /cvs/master/quadrics/elan4hdr/events.h,v $*/ -+ -+#define E4_EVENT_ALIGN 32 -+#define E4_EVENTBLOCK_SIZE 64 -+ -+#ifndef _ASM -+/* -+ * Event locations must be aligned to a 32 byte boundary. It is very much more efficent to place -+ * them in elan local memory but is not essential. -+ */ -+typedef struct _E4_Event -+{ -+ volatile E4_uint64 ev_CountAndType; -+ E4_uint64 ev_Params[2]; -+} E4_Event; -+ -+/* Same as above but padded to correct Event alignment */ -+typedef struct _E4_Event32 -+{ -+ volatile E4_uint64 ev_CountAndType; -+ E4_uint64 ev_Params[2]; -+ E4_uint64 ev_pad; -+} E4_Event32; -+ -+/* -+ * An E4_EVENTBLOCK_SIZE aligned block of Main or Elan memory -+ */ -+typedef union _E4_Event_Blk -+{ -+ /* Padded to 64-bytes in case a cache-line write is more efficient */ -+ volatile E4_uint8 eb_unit8[E4_EVENTBLOCK_SIZE]; -+ volatile E4_uint32 eb_uint32[E4_EVENTBLOCK_SIZE/sizeof(E4_uint32)]; -+ volatile E4_uint64 eb_uint64[E4_EVENTBLOCK_SIZE/sizeof(E4_uint64)]; -+} E4_Event_Blk; -+#define eb_done eb_uint32[14] -+#define eb_done_dword eb_uint64[7] -+ -+#endif /* ! _ASM */ -+ -+/* -+ * ev_CountAndType -+ * [63:31] Count -+ * [10] CopyType -+ * [9:8] DataType -+ * [7:0] CopySize -+ */ -+#define E4_EVENT_TYPE_MASK 0x00000000ffffffffull -+#define E4_EVENT_COUNT_MASK 0xffffffff00000000ull -+#define E4_EVENT_COUNT_SHIFT 32 -+#define E4_EVENT_COPY_TYPE_MASK (1 << 10) -+#define E4_EVENT_DATA_TYPE_MASK (3 << 8) -+#define E4_EVENT_COPY_SIZE_MASK (0xff) -+ -+/* CopyType */ -+#define E4_EVENT_COPY (0 << 10) -+#define E4_EVENT_WRITE (1 << 10) -+ -+/* DataType */ -+#define E4_EVENT_DTYPE_BYTE (0 << 8) -+#define E4_EVENT_DTYPE_SHORT (1 << 8) -+#define E4_EVENT_DTYPE_WORD (2 << 8) -+#define E4_EVENT_DTYPE_LONG (3 << 8) -+ -+#define EVENT_COUNT(EventPtr) ((E4_int32)(elan4_load64 (&(EventPtr)->ev_CountAndType) >> E4_EVENT_COUNT_SHIFT)) -+#define EVENT_TYPE(EventPtr) ((E4_uint32)(elan4_load64 (&(EventPtr)->ev_CountAndType) & E4_EVENT_TYPE_MASK)) -+ -+#define E4_WAITEVENT_COUNT_TYPE_VALUE(Count, EventType, DataType, CopySize) \ -+ (((E4_uint64)(Count) << E4_EVENT_COUNT_SHIFT) | (EventType) | (DataType) | (CopySize)) -+ -+#define E4_EVENT_TYPE_VALUE(EventType, DataType, CopySize) \ -+ ((EventType) | (DataType) | (CopySize)) -+ -+#define E4_EVENT_INIT_VALUE(InitialCount, EventType, DataType, CopySize) \ -+ (((E4_uint64)(InitialCount) << E4_EVENT_COUNT_SHIFT) | E4_EVENT_TYPE_VALUE(EventType, DataType, CopySize)) -+ -+#define ev_CopySource ev_Params[0] -+#define ev_CopyDest ev_Params[1] -+#define ev_WritePtr ev_Params[0] -+#define ev_WriteValue ev_Params[1] -+ -+#define EVENT_BLK_READY(BLK) ((BLK)->eb_done != 0) -+#define EVENT_READY(EVENT) ((E4_uint32)((((volatile E4_Event *) (EVENT))->ev_CountAndType) >> E4_EVENT_COUNT_SHIFT) >= 0) -+ -+#define ELAN_WAIT_EVENT (0) -+#define ELAN_POLL_EVENT (-1) -+ -+#define E4_BLK_PATTERN ((E4_uint32)0xfeedface) -+ -+#define E4_INIT_COPY_EVENT(EVENT, BLK_ELAN, BLK, SIZE) \ -+ do { \ -+ elan4_store64 (E4_EVENT_INIT_VALUE(0, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, SIZE), &(EVENT)->ev_CountAndType); \ -+ elan4_store64 ((BLK_ELAN), &(EVENT)->ev_CopySource); \ -+ elan4_store64 ((BLK), &(EVENT)->ev_CopyDest); \ -+ } while (0) -+ -+#define E4_INIT_WRITE_EVENT(EVENT, DWORD) \ -+ do { \ -+ elan4_store64 (E4_EVENT_INIT_VALUE(0, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0), &(EVENT)->ev_CountAndType); \ -+ elan4_store64 ((DWORD), &(EVENT)->ev_WritePtr); \ -+ elan4_store64 ((E4_Addr) (E4_BLK_PATTERN), &(EVENT)->ev_WriteValue); \ -+ } while (0) -+ -+#define E4_RESET_BLK_EVENT(BLK) \ -+ do { \ -+ (BLK)->eb_done = (0); \ -+ } while (0) -+ -+#define E4_PRIME_BLK_EVENT(EVENT, COUNT) \ -+ do { \ -+ elan4_store64 (E4_EVENT_INIT_VALUE(COUNT, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, 8), &(EVENT)->ev_CountAndType);\ -+ } while (0) -+ -+#define E4_PRIME_COPY_EVENT(EVENT, SIZE, COUNT) \ -+ do { \ -+ elan4_store64 (E4_EVENT_INIT_VALUE(COUNT, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, (SIZE >> 3)), &(EVENT)->ev_CountAndType);\ -+ } while (0) -+ -+#define E4_PRIME_WRITE_EVENT(EVENT, COUNT) \ -+ do { \ -+ elan4_store64 (E4_EVENT_INIT_VALUE(COUNT, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0), &(EVENT)->ev_CountAndType);\ -+ } while (0) -+ -+#ifndef _ASM -+ -+#define E4_INPUTQ_ALIGN 32 /* Descriptor must be 32-byte aligned */ -+ -+typedef struct _E4_InputQueue -+{ -+ volatile E4_Addr q_bptr; /* 64 bit aligned ptr to current back item */ -+ E4_Addr q_fptr; /* 64 bit aligned ptr to current front item */ -+ E4_uint64 q_control; /* this defines the last item, item size, and offset back to the first item. */ -+ E4_Addr q_event; /* queue event */ -+} E4_InputQueue; -+ -+#define E4_INPUTQ_LASTITEM_MASK 0x00000000ffffffffULL -+#define E4_INPUTQ_ITEMSIZE_MASK 0x000000ff00000000ULL -+#define E4_INPUTQ_LASTITEM_OFFSET_MASK 0xffffff0000000000ULL -+#define E4_INPUTQ_LASTITEM_SHIFT 0 -+#define E4_INPUTQ_ITEMSIZE_SHIFT 32 -+#define E4_INPUTQ_LASTITEM_OFFSET_SHIFT 40 -+ -+/* -+ * Macro to initialise the InputQueue control word given the FirstItem, LastItem & ItemSize -+ * FirstItem and LastItem are 64 bit double word aligned elan addresses. -+ */ -+#define E4_InputQueueControl(FirstItem, LastItem, ItemSizeInBytes)\ -+ (((((E4_uint64)(LastItem))) & E4_INPUTQ_LASTITEM_MASK) |\ -+ ((((E4_uint64)(ItemSizeInBytes)) << (E4_INPUTQ_ITEMSIZE_SHIFT-3)) & E4_INPUTQ_ITEMSIZE_MASK) |\ -+ ((((E4_uint64)((FirstItem)-(LastItem))) << (E4_INPUTQ_LASTITEM_OFFSET_SHIFT-3)) & E4_INPUTQ_LASTITEM_OFFSET_MASK)) -+ -+/* -+ * LastItemOffset is a sign extended -ve quantity with LastItemOffset[26:3] == q_control[63:40] -+ * we sign extend this by setting LastItemOffset[63:27] to be #one. -+ */ -+#define E4_InputQueueLastItemOffset(control) ((((E4_int64) -1) << (64 - (E4_INPUTQ_LASTITEM_OFFSET_SHIFT-3))) | \ -+ ((E4_int64) (((control) & E4_INPUTQ_LASTITEM_OFFSET_MASK) >> (E4_INPUTQ_LASTITEM_OFFSET_SHIFT-3)))) -+#define E4_InputQueueItemSize(control) (((control) & E4_INPUTQ_ITEMSIZE_MASK) >> (E4_INPUTQ_ITEMSIZE_SHIFT-3)) -+ -+/* -+ * Macro to increment the InputQ front pointer taking into account wrap -+ */ -+#define E4_InputQueueFptrIncrement(Q, FirstItem, LastItem, ItemSizeInBytes) \ -+ ((Q)->q_fptr = ( ((Q)->q_fptr == (LastItem)) ? (FirstItem) : ((Q)->q_fptr + (ItemSizeInBytes))) ) -+ -+#endif /* _ASM */ -+ -+#endif /* __ELAN4_EVENTS_H */ -diff -urN clean/include/elan4/i2c.h linux-2.6.9/include/elan4/i2c.h ---- clean/include/elan4/i2c.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan4/i2c.h 2003-12-02 11:11:22.000000000 -0500 -@@ -0,0 +1,47 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef _ELAN4_I2C_H -+#define _ELAN4_I2C_H -+ -+#ident "@(#)$Id: i2c.h,v 1.10 2003/12/02 16:11:22 lee Exp $ $Name: QSNETMODULES-5-11-3_20050907 $" -+/* $Source: /cvs/master/quadrics/elan4hdr/i2c.h,v $*/ -+ -+/* I2C address space - bits[7:1] */ -+#define I2C_LED_I2C_ADDR 0x20 -+#define I2C_TEMP_ADDR 0x48 -+#define I2C_EEPROM_ADDR 0x50 -+ -+#define I2C_WRITE_ADDR(addr) ((addr) << 1 | 0) -+#define I2C_READ_ADDR(addr) ((addr) << 1 | 1) -+ -+/* I2C EEPROM appears as 8 I2C 256 byte devices */ -+#define I2C_24LC16B_BLOCKSIZE (256) -+#define I2C_24LC16B_BLOCKADDR(addr) ((addr) >> 8) -+#define I2C_24LC16B_BLOCKOFFSET(addr) ((addr) & 0xff) -+ -+#define I2C_ELAN_EEPROM_PCI_BASEADDR 0 /* PCI config starts at addr 0 in the EEPROM */ -+#define I2C_ELAN_EEPROM_VPD_BASEADDR 256 /* VPD data start */ -+#define I2C_ELAN_EEPROM_PCI_SIZE 256 /* PCI data max size */ -+#define I2C_ELAN_EEPROM_VPD_SIZE 256 /* VPD data max size */ -+ -+#define I2C_ELAN_EEPROM_SIZE 2048 -+ -+#define I2C_ELAN_EEPROM_DEVICE_ID 0xA0 -+#define I2C_ELAN_EEPROM_FAIL_LIMIT 8 -+ -+#define I2C_ELAN_EEPROM_ADDR_BLOCKSIZE_SHIFT 0x8 -+#define I2C_ELAN_EEPROM_ADDR_BLOCK_MASK 0x7 -+#define I2C_ELAN_EEPROM_ADDR_BLOCK_SHIFT 0x1 -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -+#endif /* _ELAN4_I2C_H */ -diff -urN clean/include/elan4/intcookie.h linux-2.6.9/include/elan4/intcookie.h ---- clean/include/elan4/intcookie.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan4/intcookie.h 2004-08-09 10:02:37.000000000 -0400 -@@ -0,0 +1,62 @@ -+/* -+ * Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: intcookie.h,v 1.10 2004/08/09 14:02:37 daniel Exp $" -+/* $Source: /cvs/master/quadrics/elan4mod/intcookie.h,v $*/ -+ -+#ifndef __ELAN4_INTCOOKIE_H -+#define __ELAN4_INTCOOKIE_H -+ -+typedef E4_uint64 ELAN4_INTCOOKIE; -+ -+#ifdef __KERNEL__ -+ -+typedef struct intcookie_entry -+{ -+ struct intcookie_entry *ent_next; -+ struct intcookie_entry *ent_prev; -+ -+ spinlock_t ent_lock; -+ unsigned ent_ref; -+ -+ ELAN4_INTCOOKIE ent_cookie; -+ ELAN4_INTCOOKIE ent_fired; -+ kcondvar_t ent_wait; -+} INTCOOKIE_ENTRY; -+ -+typedef struct intcookie_table -+{ -+ struct intcookie_table *tbl_next; -+ struct intcookie_table *tbl_prev; -+ -+ ELAN_CAPABILITY *tbl_cap; -+ -+ spinlock_t tbl_lock; -+ unsigned tbl_ref; -+ INTCOOKIE_ENTRY *tbl_entries; -+} INTCOOKIE_TABLE; -+ -+extern void intcookie_init(void); -+extern void intcookie_fini(void); -+extern INTCOOKIE_TABLE *intcookie_alloc_table (ELAN_CAPABILITY *cap); -+extern void intcookie_free_table (INTCOOKIE_TABLE *tbl); -+extern int intcookie_alloc (INTCOOKIE_TABLE *tbl, ELAN4_INTCOOKIE cookie); -+extern int intcookie_free (INTCOOKIE_TABLE *tbl, ELAN4_INTCOOKIE cookie); -+extern int intcookie_fire (INTCOOKIE_TABLE *tbl, ELAN4_INTCOOKIE cookie); -+extern int intcookie_fire_cap (ELAN_CAPABILITY *cap, ELAN4_INTCOOKIE cookie); -+extern int intcookie_wait (INTCOOKIE_TABLE *tbl, ELAN4_INTCOOKIE cookie); -+extern int intcookie_arm (INTCOOKIE_TABLE *tbl, ELAN4_INTCOOKIE cookie); -+ -+#endif /* __KERNEL */ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -+#endif /* __ELAN4_INTCOOKIE_H */ -diff -urN clean/include/elan4/ioctl.h linux-2.6.9/include/elan4/ioctl.h ---- clean/include/elan4/ioctl.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan4/ioctl.h 2005-01-10 12:45:50.000000000 -0500 -@@ -0,0 +1,320 @@ -+/* -+ * Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __ELAN4_IOCTL_H -+#define __ELAN4_IOCTL_H -+ -+#ident "@(#)$Id: ioctl.h,v 1.29 2005/01/10 17:45:50 duncant Exp $" -+/* $Source: /cvs/master/quadrics/elan4mod/ioctl.h,v $*/ -+ -+#include -+#include -+ -+#include -+#include -+#include -+#include -+ -+#define ELAN4IO_CONTROL_PATHNAME "/dev/elan4/control%d" -+#define ELAN4IO_USER_PATHNAME "/dev/elan4/user%d" -+#define ELAN4IO_SDRAM_PATHNAME "/dev/elan4/sdram%d" -+#define ELAN4IO_MAX_PATHNAMELEN 32 -+ -+/* -+ * NOTE - ioctl values 0->0x1f are defined for -+ * generic/control usage. -+ */ -+ -+/* Macro to generate 'offset' to mmap "control" device */ -+#define OFF_TO_BAR(off) (((off) >> 28) & 0xF) -+#define OFF_TO_OFFSET(off) ((off) & 0x0FFFFFFF) -+#define GEN_OFF(bar,off) (((bar) << 28) | ((off) & 0x0FFFFFFF)) -+ -+/* Definiations for generic ioctls */ -+#define ELAN4IO_GENERIC_BASE 0x00 -+ -+typedef struct elan4io_stats_struct -+{ -+ int which; -+ unsigned long long ptr; /* always pass pointer as 64 bit */ -+} ELAN4IO_STATS_STRUCT; -+ -+#define ELAN4IO_STATS _IOR ('e', ELAN4IO_GENERIC_BASE + 0, ELAN4IO_STATS_STRUCT) -+#define ELAN4IO_DEVINFO _IOR ('e', ELAN4IO_GENERIC_BASE + 1, ELAN_DEVINFO) -+#define ELAN4IO_POSITION _IOR ('e', ELAN4IO_GENERIC_BASE + 2, ELAN_POSITION) -+ -+ -+/* -+ * Definitions for /dev/elan4/controlX -+ */ -+#define ELAN4IO_CONTROL_BASE 0x20 -+ -+#define ELAN4IO_GET_POSITION _IOR ('e', ELAN4IO_CONTROL_BASE + 0, ELAN_POSITION) -+#define ELAN4IO_SET_POSITION _IOW ('e', ELAN4IO_CONTROL_BASE + 1, ELAN_POSITION) -+#define ELAN4IO_DEBUG_SNAPSHOT _IOW ('e', ELAN4IO_CONTROL_BASE + 2, ) -+ -+typedef struct elan4io_params_mask_struct -+{ -+ unsigned short p_mask; -+ ELAN_PARAMS p_params; -+} ELAN4IO_PARAMS_STRUCT; -+#define ELAN4IO_GET_PARAMS _IOR ('e', ELAN4IO_CONTROL_BASE + 3, ELAN4IO_PARAMS_STRUCT) -+#define ELAN4IO_SET_PARAMS _IOW ('e', ELAN4IO_CONTROL_BASE + 4, ELAN4IO_PARAMS_STRUCT) -+ -+/* old versions - implicit p_mask == 3 */ -+#define ELAN4IO_OLD_GET_PARAMS _IOR ('e', ELAN4IO_CONTROL_BASE + 3, ELAN_PARAMS) -+#define ELAN4IO_OLD_SET_PARAMS _IOW ('e', ELAN4IO_CONTROL_BASE + 4, ELAN_PARAMS) -+ -+/* -+ * Definitions for /dev/elan4/userX -+ */ -+#define ELAN4IO_USER_BASE 0x40 -+ -+#define ELAN4IO_FREE _IO ('e', ELAN4IO_USER_BASE + 0) -+#define ELAN4IO_ATTACH _IOWR ('e', ELAN4IO_USER_BASE + 1, ELAN_CAPABILITY) -+#define ELAN4IO_DETACH _IOWR ('e', ELAN4IO_USER_BASE + 2, ELAN_CAPABILITY) -+#define ELAN4IO_BLOCK_INPUTTER _IO ('e', ELAN4IO_USER_BASE + 3) -+ -+typedef struct elan4io_add_p2pvp_struct -+{ -+ unsigned vp_process; -+ ELAN_CAPABILITY vp_capability; -+} ELAN4IO_ADD_P2PVP_STRUCT; -+ -+#define ELAN4IO_ADD_P2PVP _IOW ('e', ELAN4IO_USER_BASE + 4, ELAN4IO_ADD_P2PVP_STRUCT) -+ -+typedef struct elan4io_add_bcastvp_struct -+{ -+ unsigned int vp_process; -+ unsigned int vp_lowvp; -+ unsigned int vp_highvp; -+} ELAN4IO_ADD_BCASTVP_STRUCT; -+ -+#define ELAN4IO_ADD_BCASTVP _IOW ('e', ELAN4IO_USER_BASE + 5, ELAN4IO_ADD_BCASTVP_STRUCT) -+ -+#define ELAN4IO_REMOVEVP _IO ('e', ELAN4IO_USER_BASE + 6) -+ -+typedef struct elan4io_route_struct -+{ -+ unsigned int rt_process; -+ unsigned int rt_error; -+ E4_VirtualProcessEntry rt_route; -+} ELAN4IO_ROUTE_STRUCT; -+ -+#define ELAN4IO_SET_ROUTE _IOW ('e', ELAN4IO_USER_BASE + 7, ELAN4IO_ROUTE_STRUCT) -+#define ELAN4IO_RESET_ROUTE _IOW ('e', ELAN4IO_USER_BASE + 9, ELAN4IO_ROUTE_STRUCT) -+#define ELAN4IO_GET_ROUTE _IOWR ('e', ELAN4IO_USER_BASE + 8, ELAN4IO_ROUTE_STRUCT) -+#define ELAN4IO_CHECK_ROUTE _IOWR ('e', ELAN4IO_USER_BASE + 10, ELAN4IO_ROUTE_STRUCT) -+ -+typedef struct elan4io_alloc_cq_struct -+{ -+ unsigned int cq_size; /* input: size of queue */ -+ unsigned int cq_perm; /* input: requested permissions */ -+ unsigned int cq_type; /* input: queue type */ -+ unsigned int cq_indx; /* output: queue number */ -+} ELAN4IO_ALLOCCQ_STRUCT; -+ -+#define ELAN4IO_ALLOCCQ _IOWR ('e', ELAN4IO_USER_BASE + 11, ELAN4IO_ALLOCCQ_STRUCT) -+#define ELAN4IO_FREECQ _IOWR ('e', ELAN4IO_USER_BASE + 12, unsigned) -+ -+#define ELAN4IO_CQ_TYPE_REORDER 1 /* revb reordering command queue */ -+ -+typedef struct elan4io_perm_struct -+{ -+ E4_Addr ps_eaddr; -+ E4_uint64 ps_len; -+ unsigned long ps_maddr; -+ unsigned int ps_perm; -+} ELAN4IO_PERM_STRUCT; -+ -+typedef struct elan4io_perm_struct32 -+{ -+ E4_Addr ps_eaddr; -+ E4_uint64 ps_len; -+ unsigned int ps_maddr; -+ unsigned int ps_perm; -+} ELAN4IO_PERM_STRUCT32; -+ -+#define ELAN4IO_SETPERM _IOWR ('e', ELAN4IO_USER_BASE + 13, ELAN4IO_PERM_STRUCT) -+#define ELAN4IO_SETPERM32 _IOWR ('e', ELAN4IO_USER_BASE + 13, ELAN4IO_PERM_STRUCT32) -+#define ELAN4IO_CLRPERM _IOWR ('e', ELAN4IO_USER_BASE + 14, ELAN4IO_PERM_STRUCT) -+#define ELAN4IO_CLRPERM32 _IOWR ('e', ELAN4IO_USER_BASE + 14, ELAN4IO_PERM_STRUCT32) -+ -+typedef struct elan4io_trapsig_struct -+{ -+ int ts_signo; -+} ELAN4IO_TRAPSIG_STRUCT; -+#define ELAN4IO_TRAPSIG _IOW ('e', ELAN4IO_USER_BASE + 15, ELAN4IO_TRAPSIG_STRUCT) -+ -+typedef struct elan4io_traphandler_struct -+{ -+ unsigned int th_nticks; /* number of ticks to sleep for next trap */ -+ unsigned int th_proc; /* elan processor involved */ -+ unsigned long th_trapp; /* space to store trap */ -+} ELAN4IO_TRAPHANDLER_STRUCT; -+ -+typedef struct elan4io_traphandler_struct32 -+{ -+ unsigned int th_nticks; /* number of ticks to sleep for next trap */ -+ unsigned int th_proc; /* elan processor involved */ -+ unsigned int th_trapp; /* space to store trap */ -+} ELAN4IO_TRAPHANDLER_STRUCT32; -+ -+#define ELAN4IO_TRAPHANDLER _IOW ('e', ELAN4IO_USER_BASE + 16, ELAN4IO_TRAPHANDLER_STRUCT) -+#define ELAN4IO_TRAPHANDLER32 _IOW ('e', ELAN4IO_USER_BASE + 16, ELAN4IO_TRAPHANDLER_STRUCT32) -+ -+typedef struct elan4io_required_mappings_struct -+{ -+ E4_Addr rm_upage_addr; /* elan address of user page */ -+ E4_Addr rm_trestart_addr; /* elan address of tproc restart trampoline */ -+} ELAN4IO_REQUIRED_MAPPINGS_STRUCT; -+#define ELAN4IO_REQUIRED_MAPPINGS _IOW ('e', ELAN4IO_USER_BASE + 17, ELAN4IO_REQUIRED_MAPPINGS_STRUCT) -+ -+typedef struct elan4io_resume_eproc_trap_struct -+{ -+ E4_Addr rs_addr; -+} ELAN4IO_RESUME_EPROC_TRAP_STRUCT; -+#define ELAN4IO_RESUME_EPROC_TRAP _IOW ('e', ELAN4IO_USER_BASE + 18, ELAN4IO_RESUME_EPROC_TRAP_STRUCT) -+ -+typedef struct elan4io_resume_cproc_trap_struct -+{ -+ unsigned int rs_indx; -+} ELAN4IO_RESUME_CPROC_TRAP_STRUCT; -+#define ELAN4IO_RESUME_CPROC_TRAP _IOW ('e', ELAN4IO_USER_BASE + 19, ELAN4IO_RESUME_CPROC_TRAP_STRUCT) -+ -+typedef struct elan4io_resume_dproc_trap_struct -+{ -+ E4_DMA rs_desc; -+} ELAN4IO_RESUME_DPROC_TRAP_STRUCT; -+#define ELAN4IO_RESUME_DPROC_TRAP _IOW ('e', ELAN4IO_USER_BASE + 20, ELAN4IO_RESUME_DPROC_TRAP_STRUCT) -+ -+typedef struct elan4io_resume_tproc_trap_struct -+{ -+ E4_ThreadRegs rs_regs; -+} ELAN4IO_RESUME_TPROC_TRAP_STRUCT; -+#define ELAN4IO_RESUME_TPROC_TRAP _IOW ('e', ELAN4IO_USER_BASE + 21, ELAN4IO_RESUME_TPROC_TRAP_STRUCT) -+ -+typedef struct elan4io_resume_iproc_trap_struct -+{ -+ unsigned int rs_channel; -+ unsigned int rs_trans; -+ E4_IprocTrapHeader rs_header; -+ E4_IprocTrapData rs_data; -+} ELAN4IO_RESUME_IPROC_TRAP_STRUCT; -+#define ELAN4IO_RESUME_IPROC_TRAP _IOW ('e', ELAN4IO_USER_BASE + 22, ELAN4IO_RESUME_IPROC_TRAP_STRUCT) -+ -+#define ELAN4IO_FLUSH_ICACHE _IO ('e', ELAN4IO_USER_BASE + 23) -+#define ELAN4IO_STOP_CTXT _IO ('e', ELAN4IO_USER_BASE + 24) -+ -+#define ELAN4IO_ALLOC_INTCOOKIE _IOW ('e', ELAN4IO_USER_BASE + 25, ELAN4_INTCOOKIE) -+#define ELAN4IO_FREE_INTCOOKIE _IOW ('e', ELAN4IO_USER_BASE + 26, ELAN4_INTCOOKIE) -+#define ELAN4IO_ARM_INTCOOKIE _IOW ('e', ELAN4IO_USER_BASE + 27, ELAN4_INTCOOKIE) -+#define ELAN4IO_WAIT_INTCOOKIE _IOW ('e', ELAN4IO_USER_BASE + 28, ELAN4_INTCOOKIE) -+ -+typedef struct elan4io_alloc_trap_queues_struct -+{ -+ unsigned int tq_ndproc_traps; -+ unsigned int tq_neproc_traps; -+ unsigned int tq_ntproc_traps; -+ unsigned int tq_nthreads; -+ unsigned int tq_ndmas; -+} ELAN4IO_ALLOC_TRAP_QUEUES_STRUCT; -+#define ELAN4IO_ALLOC_TRAP_QUEUES _IOW ('e', ELAN4IO_USER_BASE + 29, ELAN4IO_ALLOC_TRAP_QUEUES_STRUCT) -+ -+typedef struct elan4io_neterr_msg_struct -+{ -+ unsigned int nm_vp; -+ unsigned int nm_nctx; -+ unsigned int nm_retries; -+ unsigned int nm_pad; -+ ELAN4_NETERR_MSG nm_msg; -+} ELAN4IO_NETERR_MSG_STRUCT; -+#define ELAN4IO_NETERR_MSG _IOW ('e', ELAN4IO_USER_BASE + 30, ELAN4IO_NETERR_MSG_STRUCT) -+ -+typedef struct elan4io_neterr_timer_struct -+{ -+ unsigned int nt_usecs; -+} ELAN4IO_NETERR_TIMER_STUCT; -+ -+#define ELAN4IO_NETERR_TIMER _IO ('e', ELAN4IO_USER_BASE + 31) -+ -+typedef struct elan4io_neterr_fixup_struct -+{ -+ E4_uint64 nf_cookie; -+ unsigned int nf_waitforeop; -+ unsigned int nf_sten; -+ unsigned int nf_vp; -+ unsigned int nf_pad; -+} ELAN4IO_NETERR_FIXUP_STRUCT; -+ -+#define ELAN4IO_NETERR_FIXUP _IOW ('e', ELAN4IO_USER_BASE + 32, ELAN4IO_NETERR_FIXUP_STRUCT) -+ -+typedef struct elan4io_firecap_struct -+{ -+ ELAN_CAPABILITY fc_capability; -+ ELAN4_INTCOOKIE fc_cookie; -+} ELAN4IO_FIRECAP_STRUCT; -+ -+#define ELAN4IO_FIRE_INTCOOKIE _IOW ('e', ELAN4IO_USER_BASE + 33, ELAN4IO_FIRECAP_STRUCT) -+ -+#define ELAN4IO_ALLOC_INTCOOKIE_TABLE _IOW ('e', ELAN4IO_USER_BASE + 34, ELAN_CAPABILITY) -+#define ELAN4IO_FREE_INTCOOKIE_TABLE _IO ('e', ELAN4IO_USER_BASE + 35) -+ -+typedef struct elan4io_translation -+{ -+ E4_Addr tr_addr; -+ unsigned long tr_len; -+ unsigned int tr_access; -+} ELAN4IO_TRANSLATION_STRUCT; -+ -+#define ELAN4IO_LOAD_TRANSLATION _IOW ('e', ELAN4IO_USER_BASE + 36, ELAN4IO_TRANSLATION_STRUCT) -+#define ELAN4IO_UNLOAD_TRANSLATION _IOW ('e', ELAN4IO_USER_BASE + 37, ELAN4IO_TRANSLATION_STRUCT) -+ -+typedef struct elan4io_dumpcq_struct32 -+{ -+ E4_uint64 cq_space; /* output: sdram addr of q, used to decode ptrs */ -+ E4_uint32 cq_size; /* output: The real size of the command queue */ -+ E4_uint32 bufsize; /* input: The size of the buffer to dump to */ -+ E4_uint32 cq_indx; /* input: index of cq to dump */ -+ unsigned int buffer; /* input: user address of rgs->buffer to dump to */ -+} ELAN4IO_DUMPCQ_STRUCT32; -+ -+typedef struct elan4io_dumpcq_struct -+{ -+ E4_uint64 cq_space; /* output: sdram addr of q, used to decode ptrs */ -+ E4_uint32 cq_size; /* output: The real size of the command queue */ -+ E4_uint32 bufsize; /* input: The size of the buffer to dump to */ -+ E4_uint32 cq_indx; /* input: index of cq to dump */ -+ unsigned long buffer; /* input: user address of rgs->buffer to dump to */ -+} ELAN4IO_DUMPCQ_STRUCT; -+ -+#define ELAN4IO_DUMPCQ _IOWR ('e', ELAN4IO_USER_BASE + 38, ELAN4IO_DUMPCQ_STRUCT) -+#define ELAN4IO_DUMPCQ32 _IOWR ('e', ELAN4IO_USER_BASE + 38, ELAN4IO_DUMPCQ_STRUCT32) -+ -+/* mmap offsets - - we define the file offset space as follows: -+ * -+ * page 0 - 4095 - command queues -+ * page 4096 - device user registers -+ * page 4097 - flag page/user stats -+ * page 4098 - device stats -+ * page 4099 - tproc trampoline -+ */ -+ -+#define ELAN4_OFF_COMMAND_QUEUES 0 -+#define ELAN4_OFF_USER_REGS 4096 -+#define ELAN4_OFF_USER_PAGE 4097 -+#define ELAN4_OFF_DEVICE_STATS 4098 -+#define ELAN4_OFF_TPROC_TRAMPOLINE 4099 -+ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -+#endif /* __ELAN4_IOCTL_H */ -diff -urN clean/include/elan4/mmu.h linux-2.6.9/include/elan4/mmu.h ---- clean/include/elan4/mmu.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan4/mmu.h 2005-04-21 07:12:06.000000000 -0400 -@@ -0,0 +1,117 @@ -+/* -+ * Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: mmu.h,v 1.14 2005/04/21 11:12:06 mike Exp $" -+/* $Source: /cvs/master/quadrics/elan4mod/mmu.h,v $*/ -+ -+ -+#ifndef __ELAN4_MMU_H -+#define __ELAN4_MMU_H -+ -+#include -+ -+typedef union elan4_pte_page -+{ -+ struct { -+ struct page *page; -+ physaddr_t dma_addr; -+ } _page; -+#define pg_page _page.page -+#define pg_dma_addr _page.dma_addr -+ -+} ELAN4_PTE_PAGE; -+ -+typedef struct elan4_hash_entry -+{ -+ struct elan4_hash_entry *he_next; -+ struct elan4_hash_entry *he_prev; -+ -+ sdramaddr_t he_entry; -+ -+ struct elan4_hash_entry *he_chain[2]; -+ E4_uint64 he_tag[2]; -+ E4_uint32 he_pte[2]; -+ -+ ELAN4_PTE_PAGE he_pg[2][4]; -+} ELAN4_HASH_ENTRY; -+ -+#define ELAN4_HENT_CHUNKS 16 /* SDRAM_MIN_BLOCK_SIZE/sizeof (E4_HashTableEntry) */ -+ -+typedef struct elan4_hash_chunk -+{ -+ struct list_head hc_link; -+ ELAN4_HASH_ENTRY hc_hents[ELAN4_HENT_CHUNKS]; -+} ELAN4_HASH_CHUNK; -+ -+typedef struct elan4_hash_cache -+{ -+ E4_Addr hc_start; -+ E4_Addr hc_end; -+ int hc_tbl; -+ -+ ELAN4_HASH_ENTRY *hc_hes[1]; -+} ELAN4_HASH_CACHE; -+ -+/* -+ * he_pte is really 4 bytes of pte "type" one for each pte -+ * entry - however we declare it as an "int" so we can -+ * easily determine that all 4 entries are invalid -+ */ -+#define HE_SET_PTE(he,tagidx,pteidx,val) (((E4_uint8 *) &(he->he_pte[tagidx]))[pteidx] = (val)) -+#define HE_GET_PTE(he,tagidx,pteidx) (((E4_uint8 *) &(he->he_pte[tagidx]))[pteidx]) -+ -+#define HE_TYPE_INVALID 0 -+#define HE_TYPE_SDRAM 1 -+#define HE_TYPE_COMMAND 2 -+#define HE_TYPE_REGS 3 -+#define HE_TYPE_PAGE 4 -+#define HE_TYPE_OTHER 5 -+#define HE_TYPE_RESERVED 6 -+ -+/* -+ * he_tag has the following form : -+ * [63:27] tag -+ * [20:17] pte valid -+ * [16] locked -+ * [15] copy -+ * [14] valid -+ * [13:0] context -+ */ -+ -+#define HE_TAG_VALID (1 << 14) -+#define HE_TAG_COPY (1 << 15) -+#define HE_TAG_LOCKED (1 << 16) -+ -+#define INVALID_CONTEXT 0 -+ -+extern u_char elan4_permtable[]; -+#define ELAN4_INCOMPAT_ACCESS(perm,access) ((elan4_permtable[(perm)] & (1 << (access))) == 0) -+extern u_char elan4_permreadonly[]; -+#define ELAN4_PERM_READONLY(perm) (elan4_permreadonly[(perm)]) -+ -+extern int elan4_debug_mmu; -+ -+extern int elan4_mmuhash_chain_reduction; -+extern int elan4_mmuhash_chain_end_reduce; -+extern int elan4_mmuhash_chain_middle_reduce; -+extern int elan4_mmuhash_chain_middle_fail; -+extern int elan4_mmuhash_shuffle_attempts; -+extern int elan4_mmuhash_shuffle_done; -+ -+#ifdef DEBUG_PRINTF -+# define MPRINTF(ctxt,lvl,args...) (elan4_debug_mmu > (lvl) ? elan4_debugf(ctxt,DBG_MMU, ##args) : (void)0) -+#else -+# define MPRINTF(ctxt,lvl,args...) ((void) 0) -+#endif -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -+#endif /* __ELAN4_MMU_H */ -diff -urN clean/include/elan4/neterr.h linux-2.6.9/include/elan4/neterr.h ---- clean/include/elan4/neterr.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan4/neterr.h 2004-01-19 09:38:34.000000000 -0500 -@@ -0,0 +1,40 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2004 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __ELAN4_NETERR_H -+#define __ELAN4_NETERR_H -+ -+#ident "@(#)$Id: neterr.h,v 1.1 2004/01/19 14:38:34 david Exp $ $Name: QSNETMODULES-5-11-3_20050907 $" -+/* $Source: /cvs/master/quadrics/elan4mod/neterr.h,v $*/ -+ -+typedef struct elan4_neterr_msg -+{ -+ E4_uint8 msg_type; -+ E4_uint8 msg_waitforeop; -+ E4_uint16 msg_context; /* network context # message sent to */ -+ E4_int16 msg_found; /* # cookie found (response) */ -+ -+ ELAN_LOCATION msg_sender; /* nodeid/context # message sent from */ -+ E4_uint32 msg_pad; -+ -+ E4_uint64 msg_cookies[6]; /* 64 bit cookies from identify packets */ -+} ELAN4_NETERR_MSG; -+ -+#define ELAN4_NETERR_MSG_SIZE sizeof (ELAN4_NETERR_MSG) -+#define ELAN4_NETERR_MSG_REQUEST 1 -+#define ELAN4_NETERR_MSG_RESPONSE 2 -+ -+#define ELAN4_NETERR_MAX_COOKIES (sizeof (((ELAN4_NETERR_MSG *) 0)->msg_cookies) / \ -+ sizeof (((ELAN4_NETERR_MSG *) 0)->msg_cookies[0])) -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -+#endif /* __ELAN4_NETERR_H */ -diff -urN clean/include/elan4/pci.h linux-2.6.9/include/elan4/pci.h ---- clean/include/elan4/pci.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan4/pci.h 2003-09-04 08:39:17.000000000 -0400 -@@ -0,0 +1,227 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __ELAN4_PCI_H -+#define __ELAN4_PCI_H -+ -+#ident "$Id: pci.h,v 1.32 2003/09/04 12:39:17 david Exp $" -+/* $Source: /cvs/master/quadrics/elan4hdr/pci.h,v $*/ -+ -+/* Elan has 2 64 bit bars */ -+#define ELAN4_BAR_SDRAM 0 -+#define ELAN4_BAR_REGISTERS 2 -+ -+#define PCI_VENDOR_ID_QUADRICS 0x14fc -+#define PCI_DEVICE_ID_ELAN3 0x0000 -+#define PCI_REVISION_ID_ELAN3_REVA 0x0000 -+#define PCI_REVISION_ID_ELAN3_REVB 0x0001 -+#define PCI_DEVICE_ID_ELAN4 0x0001 -+#define PCI_REVISION_ID_ELAN4_REVA 0x0000 -+#define PCI_REVISION_ID_ELAN4_REVB 0x0001 -+ -+/* support standard pseudo bars */ -+#define ELAN4_PSEUDO_BAR_ROM 8 -+ -+/* Elan PCI control -+ configuration space register. ElanControlRegister */ -+#define PCI_ELAN_PARITY_ADDR_LO 0x40 -+#define PCI_ELAN_PARITY_ADDR_HI 0x44 -+#define PCI_ELAN_PARITY_TYPE 0x48 -+#define PCI_ELAN_CONTROL 0x4c -+#define PCI_ELAN_PLL_CONTROL 0x50 -+#define PCI_ELAN_SPLIT_MESSAGE_ATTR 0x54 -+#define PCI_ELAN_SPLIT_MESSAGE_VALUE 0x54 -+#define PCI_ELAN_RAMBIST_FAILED 0x54 -+#define PCI_ELAN_TOPPHYSADDR(i) (0x58 + ((i)<<1)) -+ -+/* -+ * [31] PciM66EN This is set it the bus is running in PCI2.3 - 66MHz mode. -+ * [30:28] InitPattern This gives the PCI-X startup mode. See "Pci intialisation patterns" below. -+ * [27] notBusIs64Bits If set the bus is running 32 bits wide. If Clear it is a 64 bit bus. -+ * [26:24] RamBistCntl Used to control the Elan4 RAM BIST. Not acitive it zero. -+ * [23] RamBistFinished Only used when performing the RAM BIST test. -+ * [22] SelectSplitMessAttr See ECTRL_SELECT_SPLIT_MESS_ATTR below. -+ * [21] ReceivedSplitCompError See ECTRL_REC_SPLIT_COMP_MESSAGE below -+ * [20:16] WriteHighPriTime Used with ReadHighPriTime to control the ratio of PCI master write to PCI master -+ * read bandwidth under heavy load. The high the value of WriteHighPriTime the longer -+ * the PCI write bursts will be allowed without interruption from a read transfer. -+ * [15] DisableCouplingTest This is only used as part of the RAM BIST test. It effects the testing of the main -+ * cache tag RAMS. -+ * [14:13] Not used Will read as zero. -+ * [12:8] ReadHighPriTime Used with WriteHighPriTime to control the ratio of PCI master write to PCI master -+ * read bandwidth under heavy load. The high the value of ReadHighPriTime the longer -+ * the PCI read bursts will be allowed without interruption from a write transfer. -+ * [7] EnableLatencyCountReset This bit effect the behaviour of disconnects due to the removal of GNT# after the latency -+ * counter has expired. If set it will allow the latency counter to be reset each time the -+ * GNT# is reasserted. If asserted it should provided improved bandwidth on the PCI bus -+ * without increasing the maximum latency another device would have for access to the bus. -+ * It will increase the average latency of other devices. -+ * [6] ExtraMasterAddrBits This bit used to control the physical PCI addresses generated by the MMU. -+ * [5] ReducedPciDecode If set the PCI local memory BAR will decode 256Mbytes of PCI address space. If clear it -+ * will decode 2Gbyte of PCI address space. -+ * [4] ConfigInEBusRom If set the constant values of the Elan4 PCI configuration space will be taken from the -+ * EEPROM. If clear the internal values will be used. -+ * [3] EnableRd2_2Bursts This bit only effects the behaviour of burst reads when the PCI bus is operating in -+ * PCI-2.2 mode. It allows adjacent reads to be merged into longer bursts for higher -+ * performance. -+ * [2] SoftIntReset If set this bit will cause the Elan4 to reset itself with the exception of the PCI -+ * configuation space. All internal state machines will be put into the reset state. -+ * [1] EnableWrBursts This bit allows much longer PCI-X write bursts. If set it will stop the Elan4 from -+ * being completely PCI-X compliant as the Elan4 may request a long PCI-X write burst that -+ * it does not complete. However it should significantly increase the maximum PCI-X write -+ * bandwidth and is unlikely to cause problems with many PCI-X bridge chips. -+ * [0] InvertMSIPriority This bit effect the way MSI interrupts are generated. It provides flexiblity to generate -+ * the MSI interrupts in a different way to allow for different implimentations of MSI -+ * logic and still give the correct priority of Elan4 interrupts. -+ * -+ * {PciM66EN, InitPattern, notBusIs64Bits, RamBistCntl, RamBistFinished, -+ * SelectSplitMessAttr, ReceivedSplitCompError, WriteHighPriTime, -+ * DisableCouplingTest, 2'h0, ReadHighPriTime, -+ * EnableLatencyCountReset, ExtraMasterAddrBits, ReducedPciDecode, ConfigInEBusRom, -+ * EnableRd2_2Bursts, SoftIntReset, EnableWrBursts, InvertMSIPriority} -+ */ -+ -+#define ECTRL_INVERT_MSI_PRIO (1 << 0) -+#define ECTRL_ENABLE_WRITEBURSTS (1 << 1) -+#define ECTRL_SOFTWARE_INTERNAL_RESET (1 << 2) -+#define ECTRL_ENABLE_2_2READBURSTS (1 << 3) -+#define ECTRL_CONFIG_IN_EBUS_ROM (1 << 4) -+#define ECTRL_28_NOT_30_BIT_LOCAL_BAR (1 << 5) -+#define ECTRL_ExtraMasterAddrBits (1 << 6) -+#define ECTRL_ENABLE_LATENCY_RESET (1 << 7) -+#define ECTRL_DISABLE_COUPLING_TEST (1 << 15) -+ -+/* -+ * Ratio of the following two registers set the relative bandwidth given to intputer data -+ * versus other PCI pci traffic when scheduling new PCI master accesses. -+ */ -+#define ECTRL_OTHER_HIGH_PRI_TIME_SHIFT (8) /* Sets top 4 bits of 8 bit counter */ -+#define ECTRL_OTHER_HIGH_PRI_TIME_MASK (0x1f) -+ -+ -+#define ECTRL_IPROC_HIGH_PRI_TIME_SHIFT (16) /* Sets top 4 bits of 8 bit counter */ -+#define ECTRL_IPROC_HIGH_PRI_TIME_MASK (0x1f) -+ -+/* -+ * This is set if a split completion message is received. -+ * This will cause a PCI error interrupt. -+ * This error is cleared by writting a 1 to this bit. -+ */ -+#define ECTRL_REC_SPLIT_COMP_MESSAGE (1 << 21) -+/* -+ * This bit is used to select reading of either the Split message attribute value when -+ * set or the split completion message data value from 0x54 in the config space -+ * if the ECTRL_REC_SPLIT_COMP_MESSAGE bit is set. 0x54 returns the the BistFailed flags -+ * if any of the BIST control bits are set (bits 26 to 24) -+ */ -+#define ECTRL_SELECT_SPLIT_MESS_ATTR (1 << 22) -+ -+// Internal RAM bist control bits. -+// Three bits of state control the RAM BIST (Built in self test). -+// -+// These bits must not be set unless the ECTRL_SOFTWARE_INTERNAL_RESET bit has also been set! -+// -+// For a normal fast ram test assert ECTRL_BIST_FAST_TEST. -+// For a data retention test first write ECTRL_START_RETENTION_TEST then wait the retention period of -+// at least 1ms and preferably much longer then write ECTRL_CONTINUE_RETENTION_TEST then wait -+// again and finallly write ECTRL_FINISH_RETENTION_TEST. -+// -+// The read only bit ECTRL_BIST_FINISHED_TEST can be polled to check that the test has compleated. -+#define ECTRL_BIST_CTRL_SHIFT (24) -+#define ECTRL_BIST_CTRL_MASK (7 << 24) -+ -+#define ECTRL_BIST_FAST_TEST ((7 << 24) | ECTRL_SOFTWARE_INTERNAL_RESET) // old scheme -+#define ECTRL_START_RETENTION_TEST ((1 << 24) | ECTRL_SOFTWARE_INTERNAL_RESET) -+#define ECTRL_CONTINUE_RETENTION_TEST ((3 << 24) | ECTRL_SOFTWARE_INTERNAL_RESET) -+#define ECTRL_FINISH_RETENTION_TEST ((7 << 24) | ECTRL_SOFTWARE_INTERNAL_RESET) -+ -+#define ECTRL_BIST_KICK_OFF ((1 << 24) | ECTRL_SOFTWARE_INTERNAL_RESET) // new scheme -+#define ECTRL_BIST_MOVE_ON_ODD ((3 << 24) | ECTRL_SOFTWARE_INTERNAL_RESET) -+#define ECTRL_BIST_MOVE_ON_EVEN ((5 << 24) | ECTRL_SOFTWARE_INTERNAL_RESET) -+#define ECTRL_BIST_SCREAM_THROUGH ((7 << 24) | ECTRL_SOFTWARE_INTERNAL_RESET) -+ -+#define ECTRL_CLEAR_BIST_TEST (0 << 24) -+#define ECTRL_BIST_FINISHED_TEST (1 << 23) -+ -+// Read only current PCI bus type. -+#define ECTRL_RUNNING_32BIT_MODE (1 << 27) -+#define ECTRL_INITIALISATION_MODE (7 << 28) -+#define ECTRL_RUNNING_M66EN_MODE (1 << 31) -+ -+#define ECTRL_INIT_PATTERN_SHIFT (28) -+#define ECTRL_INIT_PATTERN_MASK (0x7) -+ -+// Pci intialisation patterns -+#define Pci2_2 (0 << 28) -+#define PciX50To66MHz (1 << 28) -+#define PciX66to100MHz (2 << 28) -+#define PciX100to133MHz (3 << 28) -+#define PciXReserved1 (4 << 28) -+#define PciXReserved2 (5 << 28) -+#define PciXReserved3 (6 << 28) -+#define PciXReserved4 (7 << 28) -+ -+/* Elan PCI pll and pad control configuration space register. ElanPllControlReg */ -+// This overrides the default PCI pll control settings. -+#define PciPll_FeedForwardISel0 (1 << 0) // Lsi name Z0 -+#define PciPll_FeedForwardISel1 (1 << 1) // Lsi name Z1 -+#define PciPll_ChargePumpISel0 (1 << 2) // Lsi name P0 -+#define PciPll_ChargePumpISel1 (1 << 3) // Lsi name P1 -+#define PciPll_EnableAutoReset (1 << 4) // Lsi name ENARST -+#define PciPll_RSEL200500 (1 << 5) // Lsi name Range Select, 0: 100 - 250MHz, 1: 200 - 500MHz -+#define PciPll_DivideFeedback (1 << 6) // Just used for test - This divides the shortcut feedback to the PCI PLL so that it can lock to the tester clock. -+#define PciPll_CutFeedback (1 << 7) // Just used for test - This disables the shortcut feedback. -+ -+// This overrides the default PCI BZ controler settings. -+#define PciBZ_UPDI (0xf << 8) -+#define PciBZ_WAIT_INT (0xf << 12) -+ -+// This overrides the default Sys and SDRam pll control settings. -+#define SysPll_FeedForwardISel0 (1 << 16) // Lsi name P0 -+#define SysPll_FeedForwardISel1 (1 << 17) // Lsi name P1 -+#define SysPll_ChargePumpISel0 (1 << 18) // Lsi name Z0 -+#define SysPll_ChargePumpISel1 (1 << 19) // Lsi name Z1 -+#define SysPll_EnableAutoReset (1 << 20) // Lsi name ENARST -+#define SysPll_DivPhaseCompInBy2 (1 << 21) // Lsi name NODIV (Should be DIV) -+#define SysPll_PllTestClkSel (1 << 22) // If asserted the master clock source is not taken from the pll. -+ -+#define Pll_ForceEBusADTristate (1 << 23) // Required to enable the testing of EnableAutoReset. Enables use of EBusAD[7] (rev A) -+#define Pll_LinkErrDirectToSDA (1 << 23) // Access to link error flag for triggering (rev B) -+ -+ -+#define ECTRL_SYS_CLOCK_RATIO_SHIFT (24) -+// Config: with 800MHz Speeds are 266 200 160 133. -+// 0 = 133/133 (1:1) 6:6 1 -+// 1 = 160/133 (6:5) 5:6 1.2 -+// 2 = 200/133 (3:2) 4:6 1.5 -+// 3 = 266/133 (2:1) 3:6 2 -+// 4 = 200/200 (1:1) 4:4 1 -+// 5 = 266/200 (4:3) 3:4 1.33 -+ -+// Config: with 600MHz Speeds are 200 150 120 100 -+// 0 = 100/100 (1:1) 6:6 1 -+// 1 = 120/100 (6:5) 5:6 1.2 -+// 2 = 150/100 (3:2) 4:6 1.5 -+// 3 = 200/100 (2:1) 3:6 2 -+// 4 = 150/150 (1:1) 4:4 1 -+// 5 = 200/150 (4:3) 3:4 1.33 -+ -+#define ECTRL_SYS_CLOCK_RATIO_SHIFT (24) -+#define ECTRL_SYS_CLOCK_RATIO_1_1Slow (0 << ECTRL_SYS_CLOCK_RATIO_SHIFT) -+#define ECTRL_SYS_CLOCK_RATIO_6_5 (1 << ECTRL_SYS_CLOCK_RATIO_SHIFT) -+#define ECTRL_SYS_CLOCK_RATIO_3_2 (2 << ECTRL_SYS_CLOCK_RATIO_SHIFT) -+#define ECTRL_SYS_CLOCK_RATIO_2_1 (3 << ECTRL_SYS_CLOCK_RATIO_SHIFT) -+#define ECTRL_SYS_CLOCK_RATIO_1_1Fast (4 << ECTRL_SYS_CLOCK_RATIO_SHIFT) -+#define ECTRL_SYS_CLOCK_RATIO_4_3 (5 << ECTRL_SYS_CLOCK_RATIO_SHIFT) -+#define ECTRL_SYS_CLOCK_MAX_NORMAL (6) /* used to generate a valid random value */ -+#define GET_RANDOM_CLOCK_RATIO (Random(ECTRL_SYS_CLOCK_MAX_NORMAL) << ECTRL_SYS_CLOCK_RATIO_SHIFT) -+#define ECTRL_SYS_CLOCK_RATIO_PLL_TEST (6 << ECTRL_SYS_CLOCK_RATIO_SHIFT) -+#define ECTRL_SYS_CLOCK_RATIO_TEST (7 << ECTRL_SYS_CLOCK_RATIO_SHIFT) -+#define ECTRL_SYS_CLOCK_RATIO_MASK (7 << ECTRL_SYS_CLOCK_RATIO_SHIFT) -+ -+#endif /* __ELAN4_PCI_H */ -diff -urN clean/include/elan4/registers.h linux-2.6.9/include/elan4/registers.h ---- clean/include/elan4/registers.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan4/registers.h 2005-03-03 11:28:50.000000000 -0500 -@@ -0,0 +1,1587 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef _ELAN4_REGISTERS_H -+#define _ELAN4_REGISTERS_H -+ -+#ident "$Id: registers.h,v 1.120 2005/03/03 16:28:50 david Exp $" -+/* $Source: /cvs/master/quadrics/elan4hdr/registers.h,v $*/ -+ -+/* -+ * Header file for internal slave mapping of the ELAN4 registers -+ */ -+ -+#define E4_CACHELINE_SIZE (64) -+#define E4_STACK_ALIGN (64) -+ -+#ifndef _ASM -+ -+#include -+#include -+#include -+ -+typedef volatile struct _E4_CacheSets -+{ -+ E4_uint64 Set0[1024]; /* 8k bytes per set */ -+ E4_uint64 Set1[1024]; /* 8k bytes per set */ -+ E4_uint64 Set2[1024]; /* 8k bytes per set */ -+ E4_uint64 Set3[1024]; /* 8k bytes per set */ -+} E4_CacheSets; -+ -+typedef union e4_cache_tag -+{ -+ struct { -+ E4_uint32 pad0; /* Undefined value when read */ -+#if (BYTE_ORDER == LITTLE_ENDIAN) || defined(__LITTLE_ENDIAN__) -+ E4_uint32 :10; /* 0-9 - reserved */ -+ E4_uint32 LineError:1; /* 10 - line error */ -+ E4_uint32 Modified:1; /* 11 - modified */ -+ E4_uint32 FillPending:1; /* 12 - fill pending */ -+ E4_uint32 AddrTag30to13:18; /* 30-13 - tag */ -+ E4_uint32 :1; /* 31 - */ -+#else -+ E4_uint32 :1; /* 31 - */ -+ E4_uint32 AddrTag30to13:18; /* 30-13 - tag */ -+ E4_uint32 FillPending:1; /* 12 - fill pending */ -+ E4_uint32 Modified:1; /* 11 - modified */ -+ E4_uint32 LineError:1; /* 10 - line error */ -+ E4_uint32 :10; /* 0-9 - reserved */ -+#endif -+ } s; -+ E4_uint64 Value; -+} E4_CacheTag; -+ -+typedef volatile struct _E4_CacheTags -+{ -+ E4_CacheTag Tags[4][128]; /* 8k bytes per set, 64 byte cache line */ -+} E4_CacheTags; -+ -+#define E4_NumCacheSets 4 -+#define E4_NumCacheLines 128 -+#define E4_CacheLineSize 64 -+#define E4_CacheSize (E4_NumCacheSets * E4_NumCacheLines * E4_CacheLineSize) -+#define E4_CacheSetSize (E4_NumCacheLines * E4_CacheLineSize) -+ -+/* -+ * Run Queue pointers -+ * -+ * [62:35] FrontPointer[30:3] -+ * [33:32] Size Value -+ * [30:3] BackPointer[30:3] -+ */ -+#define E4_QueuePtrMask (0x7ffffff8ULL) -+#define E4_QueueSizeMask 3 -+#define E4_QueueEntrySize sizeof (E4_uint64) -+ -+#define E4_Queue8KBytes 0 -+#define E4_Queue64KBytes 1 -+#define E4_Queue512KBytes 2 -+#define E4_Queue4MBytes 3 -+ -+#define E4_QueueFrontValue(val,size) ((val) | (size)) -+#define E4_QueueValue(queue,size) (((E4_uint64) E4_QueueFrontValue(queue,size)) << 32 | ((E4_uint64) (queue))) -+ -+#define E4_QueueFrontPointer(val) /* extract queue front pointer from register */\ -+ (((val) >> 32) & E4_QueuePtrMask) -+#define E4_QueueBackPointer(val) /* extract queue back pointer from register */ \ -+ ((val) & E4_QueuePtrMask) -+#define E4_QueueSizeValue(val) /* extract queue size value from register */ \ -+ (((val) >> 32) & E4_QueueSizeMask) -+#define E4_QueueSize(value) /* queue size in bytes from size value */ \ -+ (1 << (((value)*3) + 13)) -+#define E4_QueueOffsetMask(fptr)\ -+ ((8192 << (((fptr) & E4_QueueSizeMask) << 3)) - 1) -+#define E4_QueueOffset(fptr)\ -+ ((fptr) & E4_QueueOffsetMask(fptr)) -+#define E4_QueueFrontPointerInc(fptr) \ -+ ( ((fptr) & ~E4_QueueOffsetMask(fptr)) | ((E4_QueueOffset(fptr) + 8) & E4_QueueOffsetMask(fptr)) ) -+ -+typedef union _E4_QueuePtr -+{ -+ E4_uint64 Value; -+ struct { -+ E4_uint32 Back; -+ E4_uint32 Front; -+ } s; -+} E4_QueuePtr; -+ -+/* -+ * DMA processor status register. -+ * -+ * [48] FirstSendTrans Set for the first packet of a dma. -+ * [47:46] TimeSliceCount Time left to timeslice. -+ * [45] DmaLastPacket Set for the last packet of a dma. -+ * [44] CurrPrefetchDma Dma descriptor the prefetcher is valid for. -+ * [43:39] PrefetcherState Dma prefetcher's state machines value. -+ * [38:33] PacketAssemblyState Packet assembler's state machines value. -+ * [32:31] PrefetcherWakeupFnt Dma prefetcher's wakeup function. -+ * [30:28] PacketAssWakeupFnt Packet assembler's wakeup function. -+ * [27] AckBufferValid Packet ack is valid. -+ * [26] PrefetchedDataProblem Had either a data read fault or data error. Valid if AckBufferValid. -+ * [25] PrefetcherHalting Prefetch data about to stop for halt. Valid if AckBufferValid. -+ * [24] PacketTimeout Packet timeout. Sent an EopError. Valid if AckBufferValid set. -+ * [23:22] PacketAckValue Packet ack type. Valid if AckBufferValid set. -+ * [21:20] FaultUnitNo Set if the dma prefetcher has faulted. -+ * [19:17] TrapType Packet assembler's trap type. -+ * [16] PrefetcherFault Set if the dma prefetcher has faulted for this DMA unit. -+ * [15] Remote The Dma had been issued remotly -+ * [14] Priority Running at high priority. -+ * [13:0] Context procs current context. -+ */ -+ -+#define DPROC_FirstSendTrans(s) ((unsigned)((s) >> 48) & 1) -+#define DPROC_TimeSliceCount(s) ((unsigned)(((s) >> 46) & 3) -+#define DPROC_DmaLastPacket(s) ((unsigned)((s) >> 45) & 1) -+#define DPROC_CurrPrefetchDma(s) ((unsigned)((s) >> 44) & 1) -+#define DPROC_PrefetcerState(s) ((unsigned)((s) >> 39) & 0x1f) -+#define DPROC_PacketAssemblerState(s) ((unsigned)((s) >> 33) & 0x1f) -+#define DPROC_PrefetcherWakeupFn(s) ((unsigned)((s) >> 31) & 3) -+#define DPROC_PacketAssemblerWakeupFn(s)((unsigned)((s) >> 28) & 3) -+#define DPROC_AckBufferValid(s) ((unsigned)((s) >> 27) & 1) -+#define DPROC_PrefetcherDataProblem(s) ((unsigned)((s) >> 26) & 1) -+#define DPROC_PrefetcherHalting(s) ((unsigned)((s) >> 25) & 1) -+#define DPROC_PacketTimeout(s) ((unsigned)((s) >> 24) & 1) -+#define DPROC_PacketAckValue(s) ((unsigned)((s) >> 22) & 3) -+#define DPROC_FaultUnitNo(s) ((unsigned)((s) >> 20) & 3) -+#define DPROC_TrapType(s) ((unsigned)((s) >> 17) & 7) -+#define DPROC_PrefetcherFault(s) ((unsigned)((s) >> 16) & 1) -+#define DPROC_Remote(s) ((unsigned)((s) >> 15) & 1) -+#define DPROC_Priority(s) ((unsigned)((s) >> 14) & 1) -+#define DPROC_Context(s) ((unsigned)(s) & 0x3fff) -+ -+/* -+ * Command processor status register. -+ * -+ * [26:21] CPState procs current state. -+ * [20] WakeupFnt procs wakeup function. -+ * [19:16] TrapValue procs trap value. -+ * [15] Remote Issued remotely. -+ * [14] Priority Running at high priority. -+ * [13:0] Context procs current context. -+ */ -+ -+#define CPROC_TrapType(s) ((unsigned)((s) >> 16) & 0xf) -+#define CPROC_Remote(s) ((unsigned)((s) >> 15) & 0x1) -+#define CPROC_Priority(s) ((unsigned)((s) >> 14) & 0x1) -+#define CPROC_Context(s) ((unsigned)(s) & 0x3fff) -+ -+/* -+ * Event processor status register. -+ * -+ * [34:30] CPState event procs current state. -+ * [29:28] WakeupFnt event procs wakeup function. -+ * [27:20] EventCopySize This is the number of DWords to still be copied on a copy dword event. -+ * [19] EProcPort1Fault CUN_EventProc1 has taken a translation fault. -+ * [18] EProcPort0Fault CUN_EventProc0 has taken a translation fault. -+ * [17:16] TrapValue event proc's trap value. -+ * [15] Remote Issued remotely. -+ * [14] Priority Running at high priority. -+ * [13:0] Context procs current context. -+ */ -+ -+#define EPROC_CPState(s) ((unsigned)((s) >> 30) & 0x1f) -+#define EPROC_WakeupFunction(s) ((unsigned)((s) >> 28) & 3) -+#define EPROC_CopySize(s) ((unsigned)((s) >> 20) & 0xFF) -+#define EPROC_Port1Fault(s) ((unsigned)((s) >> 19) & 1) -+#define EPROC_Port0Fault(s) ((unsigned)((s) >> 18) & 1) -+#define EPROC_TrapType(s) ((unsigned)((s) >> 16) & 3) -+#define EPROC_Remote(s) ((unsigned)((s) >> 15) & 1) -+#define EPROC_Priority(s) ((unsigned)((s) >> 14) & 1) -+#define EPROC_Context(s) ((unsigned)(s) & 0x3fff) -+ -+/* -+ * Thread processor status register. -+ * -+ * [39:24] MemPortBusy 16 bits of port busy flags for all FFU memory ports. -+ * [23:21] Reads as zero -+ * [20:18] TQState State vector for thread queuing proc. -+ * [17] HighRunQueueFull High priority run queue is full -+ * [16] LowRunQueueFull Low priority run queue is full -+ * [15] ReadyHigh More runable threads at high priority -+ * [14] ReadyLow More runable threads at low priority -+ * [13:0] Context procs current context. -+ */ -+#define TPROC_HighRunQueueFull(s) ((unsigned)((s) >> 17) & 1) -+#define TPROC_LowRunQueueFull(s) ((unsigned)((s) >> 16) & 1) -+#define TPROC_ReadyHigh(s) ((unsigned)((s) >> 15) & 1) -+#define TPROC_ReadyLow(s) ((unsigned)((s) >> 14) & 1) -+#define TPROC_Context(s) ((unsigned)((s) & 0x3fff)) -+ -+/* -+ * Input processor status register -+ * -+ * [55] Last Trans (~EOP) -+ * [54] First Trans (~EOP) -+ * [53] Channel (~EOP) -+ * [52] Bad Length (~EOP) -+ * [51:50] Trans CRC Status (~EOP) -+ * [49:48] EOP type -+ * [47] EOP trap -+ * [46] Trapping priority -+ * [45] Trapping Channel -+ * [44:43] Bad ack sent -+ * [42:41] Good ack sent -+ * [40] Queueing Packet (~EOP) -+ * [39:36] Channel trapped bits -+ * [35:32] IProc Trap Value -+ * [31:16] Network Context (~EOP) -+ * [15:0] Transaction Type (~EOP) -+ */ -+#define IPROC_LastTrans(s) ((unsigned)((s) >> 55) & 0x1) -+#define IPROC_FirstTrans(s) ((unsigned)((s) >> 54) & 0x1) -+#define IPROC_Channel(s) ((unsigned)((s) >> 53) & 0x1) -+#define IPROC_BadLength(s) ((unsigned)((s) >> 52) & 0x1) -+#define IPROC_TransCRCStatus(s) ((unsigned)((s) >> 50) & 0x3) -+#define IPROC_EOPType(s) ((unsigned)((s) >> 48) & 0x3) -+#define IPROC_EOPTrap(s) ((unsigned)((s) >> 47) & 0x1) -+#define IPROC_InputterPri(s) ((unsigned)((s) >> 46) & 0x1) -+#define IPROC_InputterChan(s) ((unsigned)((s) >> 45) & 0x1) -+#define IPROC_BadAckSent(s) ((unsigned)((s) >> 43) & 0x3) -+#define IPROC_GoodAckSent(s) ((unsigned)((s) >> 41) & 0x3) -+#define IPROC_QueueingPacket(s) ((unsigned)((s) >> 40) & 0x1) -+#define IPROC_ChannelTrapped(s) ((unsigned)((s) >> 36) & 0xF) -+#define IPROC_TrapValue(s) ((unsigned)((s) >> 32) & 0xF) -+#define IPROC_NetworkContext(s) ((unsigned)((s) >> 16) & 0xFFFF) -+#define IPROC_TransactionType(s) ((unsigned)(s) & 0xFFFF) -+ -+/* values for IPROC_TransCRCStatus */ -+#define CRC_STATUS_GOOD (0) -+#define CRC_STATUS_DISCARD (1) -+#define CRC_STATUS_ERROR (2) -+#define CRC_STATUS_BAD (3) -+ -+/* values for IPROC_EOPType */ -+#define EOP_GOOD (1) -+#define EOP_BADACK (2) -+#define EOP_ERROR_RESET (3) -+ -+/* -+ * Interrupt register bits -+ * -+ * There are up to four sources of interrupt for the MSI port. -+ * The Elan will request 4 ports but may only get either 2 or 1 port. The Interrupts are assigned -+ * as shown below: -+ * No Of MSI ints Low Prioity High Prioity -+ * 4 Event Ints OtherInts Inputer Ints Hard Error ints. -+ * i.e. Dproc, Tproc, Sten. HighPri and LowPri Link errs, ECC errs, -+ * -+ * 2 Event Ints All other interrupts. -+ * 1 All together. -+ * -+ * It is not safe to change the number of sources of interrupt while there may be outstanding, -+ * unserviced interrupts pending. -+ * There two forms of encoding. This has been provided in case an MSI implimentation assumes either -+ * a high value to have a high priority or a low value to have a high priority. This is controled -+ * by a bit in the Elan Pci Control register. -+ */ -+#define INT_LinkPortKeyFail (1<<18) -+#define INT_PciMemErr (1<<17) -+#define INT_SDRamInt (1<<16) -+#define INT_LinkError (1<<15) -+#define INT_IProcCh1HighPri (1<<14) -+#define INT_IProcCh0HighPri (1<<13) -+#define INT_IProcCh1LowPri (1<<12) -+#define INT_IProcCh0LowPri (1<<11) -+#define INT_DiscardingHighPri (1<<10) -+#define INT_DiscardingLowPri (1<<9) -+#define INT_CProcHalted (1<<8) -+#define INT_TProcHalted (1<<7) -+#define INT_DProcHalted (1<<6) -+#define INT_EProc (1<<5) -+#define INT_TProc (1<<4) -+#define INT_CProc (1<<3) -+#define INT_Dma1Proc (1<<2) -+#define INT_Dma0Proc (1<<1) -+#define INT_MainInterrupt (1<<0) -+ -+#define INT_Units (INT_EProc | INT_TProc | INT_CProc | INT_Dma1Proc | INT_Dma0Proc) -+#define INT_Inputters (INT_IProcCh1HighPri | INT_IProcCh0HighPri | INT_IProcCh1LowPri | INT_IProcCh0LowPri) -+#define INT_Discarding (INT_DiscardingHighPri | INT_DiscardingLowPri) -+#define INT_Halted (INT_CProcHalted | INT_TProcHalted | INT_DProcHalted) -+#define INT_ErrorInterrupts (INT_PciMemErr | INT_SDRamInt | INT_LinkError) -+ -+#define INT_MSI0 INT_MainInterrupt -+#define INT_MSI1 (INT_Units | INT_Discarding | INT_Halted) -+#define INT_MSI2 (INT_Inputters) -+#define INT_MSI3 (INT_ErrorInterrupts) -+ -+#define E4_INTERRUPT_REG_SHIFT 32 -+#define E4_INTERRUPT_MASK_MASK (0xffffffffULL) -+ -+/* -+ * Trap type values - see trapvalues.v -+ */ -+ -+#define CommandProcInserterError 0x1 -+#define CommandProcPermissionTrap 0x2 -+#define CommandProcSendTransInvalid 0x3 -+#define CommandProcSendTransExpected 0x4 -+#define CommandProcDmaQueueOverflow 0x5 -+#define CommandProcInterruptQueueOverflow 0x6 -+#define CommandProcMemoryFault 0x7 -+#define CommandProcRouteFetchFault 0x8 -+#define CommandProcFailCountZero 0x9 -+#define CommandProcAddressAlignment 0xa -+#define CommandProcWaitTrap 0xb -+#define CommandProcMultipleGuards 0xc -+#define CommandProcOpenOnGuardedChan 0xd -+#define CommandProcThreadQueueOverflow 0xe -+#define CommandProcBadData 0xf -+ -+#define DmaProcNoFault 0x0 -+#define DmaProcRouteFetchFault 0x1 -+#define DmaProcFailCountError 0x2 -+#define DmaProcPacketAckError 0x3 -+#define DmaProcRunQueueReadFault 0x4 -+#define DmaProcQueueOverflow 0x5 -+ -+#define EventProcNoFault 0x0 -+#define EventProcAddressAlignment 0x1 -+#define EventProcMemoryFault 0x2 -+#define EventProcCountWrapError 0x3 -+ -+#define InputNoFault 0x0 -+#define InputAddressAlignment 0x1 -+#define InputMemoryFault 0x2 -+#define InputInvalidTransType 0x3 -+#define InputDmaQueueOverflow 0x4 -+#define InputEventEngineTrapped 0x5 -+#define InputCrcErrorAfterPAckOk 0x6 -+#define InputEopErrorOnWaitForEop 0x7 -+#define InputEopErrorTrap 0x8 -+#define InputDiscardAfterAckOk 0x9 -+ -+typedef struct _E4_Sched_Status -+{ -+ E4_uint32 Status; -+ E4_uint32 Restart; -+} E4_Sched_Status; -+ -+typedef struct _E4_Input_Ptrs -+{ -+ E4_uint32 ContextFilterTable; -+ E4_uint32 TrapBasePtr; -+} E4_Input_Ptrs; -+ -+#define SCH_StopLowPriQueues (1 << 0) -+#define SCH_DProcHalt (1 << 1) -+#define SCH_TProcHalt (1 << 2) -+#define SCH_CProcHalt (1 << 3) -+ -+#define SCH_CProcTimeout600ns (1 << 4) -+#define SCH_CProcTimeout1p4us (2 << 4) -+#define SCH_CProcTimeout3p0us (3 << 4) -+#define SCH_CProcTimeout6p2us (4 << 4) -+#define SCH_CProcTimeout12p6us (5 << 4) -+#define SCH_CProcTimeout25p4us (6 << 4) -+#define SCH_CProcTimeout51p0us (7 << 4) -+#define SCH_DiscardLowPriInput (1 << 7) -+#define SCH_DiscardHighPriInput (1 << 8) -+ -+#define SCH_DProcTimeslice64us (0 << 9) -+#define SCH_DProcTimeslice128us (1 << 9) -+#define SCH_DProcTimeslice256us (2 << 9) -+#define SCH_DProcTimeslice512us (3 << 9) -+ -+#define SCH_Halt (SCH_StopLowPriQueues | SCH_DProcHalt | SCH_TProcHalt | SCH_CProcHalt) -+#define SCH_Discard (SCH_DiscardLowPriInput | SCH_DiscardHighPriInput) -+ -+#define SCH_RestartCProc (1 << 0) -+#define SCH_RestartTProc (1 << 1) -+#define SCH_RestartEProc (1 << 2) -+#define SCH_RestartDma0Proc (1 << 3) -+#define SCH_RestartDma1Proc (1 << 4) -+#define SCH_RestartDmaPrefetchProc (1 << 5) -+#define SCH_RestartCh0LowPriInput (1 << 6) -+#define SCH_RestartCh1LowPriInput (1 << 7) -+#define SCH_RestartCh0HighPriInput (1 << 8) -+#define SCH_RestartCh1HighPriInput (1 << 9) -+#define SCH_ClearLinkErrorInt (1 << 10) -+#define SCH_ContextFilterFlush (1 << 11) -+ -+/* -+ * Link state bits. -+ */ -+#define LS_LinkNotReady (1 << 0) /* Link is in reset or recovering from an error */ -+#define LS_Locked (1 << 1) /* Linkinput PLL is locked */ -+#define LS_LockError (1 << 2) /* Linkinput PLL was unable to lock onto the input clock. */ -+#define LS_DeskewError (1 << 3) /* Linkinput was unable to Deskew all the inputs. (Broken wire?) */ -+#define LS_PhaseError (1 << 4) /* Linkinput Phase alignment error. */ -+#define LS_DataError (1 << 5) /* Received value was neither good data or a token. */ -+#define LS_FifoOvFlow0 (1 << 6) /* Channel 0 input fifo overflowed. */ -+#define LS_FifoOvFlow1 (1 << 7) /* Channel 1 input fifo overflowed. */ -+#define LS_Mod45Changed (1 << 8) /* Mod45 bit has changed. Error setr to force reset. */ -+#define LS_PAckNotSeenError (1 << 9) /* PAck value not returned for this packet. */ -+ -+/* -+ * Link State Constant defines, used for writing to LinkSetValue -+ */ -+ -+#define LRS_DataDel0 0x0 -+#define LRS_DataDel1 0x1 -+#define LRS_DataDel2 0x2 -+#define LRS_DataDel3 0x3 -+#define LRS_DataDel4 0x4 -+#define LRS_DataDel5 0x5 -+#define LRS_DataDel6 0x6 -+#define LRS_DataDel7 0x7 -+#define LRS_DataDel8 0x8 -+#define LRS_LinkInValue 0x9 -+#define LRS_PllDelValue 0xA -+#define LRS_ClockEven 0xB -+#define LRS_ErrorVal8to0 0xC -+#define LRS_ErrorVal17to9 0xD -+#define LRS_ErrorVal26to18 0xE -+#define LRS_ErrorVal35to27 0xF -+#define LRS_NumLinkDels 0x10 -+ -+#define LRS_Pllfast 0x40 -+ -+typedef struct _E4_CommandControl -+{ -+ volatile E4_uint32 CommandQueueDescsBase; -+ volatile E4_uint32 CommandRequeuePtr; -+} E4_CommandControl; -+ -+#define E4_CommandRequeueBusy 0x80000000 /* Test against read value of CommandRequeuePtr */ -+#define E4_CommandRequeueHighPri 0x1 /* Will requeue onto the high pri queue */ -+#define E4_QueueDescPtrMask 0x7fffffe0 -+ -+typedef struct _E4_CommandQueueDesc -+{ -+ E4_uint64 CQ_QueuePtrs; -+ E4_uint64 CQ_HoldingValue; /* 32 bit value for 32 bit accesses or OutOfOrderMask*/ -+ E4_uint64 CQ_AckBuffers; /* Space for 32 4 bit ack buffer values. */ -+ E4_uint64 CQ_Control; -+} E4_CommandQueueDesc; -+ -+/* -+ * Rev A - CQ_QueuePtrs -+ * [63] Unused Should be set to zero. -+ * [62:51] Unused (reads as top of InsertPtr) -+ * [50:35] CompletedPtr Completed pointer. This is alligned to a byte address. -+ * [34] Trapped Will be set if the command has trapped. -+ * [33:32] Size Size of queue. -+ * [31] Used Will be set if the descriptor has been changed and written back by the elan. -+ * [30:3] InsertPtr Insert pointer. This is alligned to a byte address. -+ * [2] TimedOut Will be set if the queue timedout executing a command. -+ * [1] Priority When set the queue runs at high priority. -+ * [0] Error If this becomes set all new data written to the queue is * discarded. -+ * -+ * Rev B - CQ_QueuePtrs -+ * [63] TimedOut Will be set if the queue timedout executing a command. -+ * [62] Priority When set the queue runs at high priority. -+ * [61] QueueType 1=will accept unordered 64 bit PCI writes. 0=will accept ordered 32 or 64 bit PCI writes. -+ * [60:51] Unused (reads as top of InsertPtr) -+ * [50:35] CompletedPtr Completed pointer. This is alligned to a byte address. -+ * [34] Trapped Will be set if the command has trapped. -+ * [33:32] Size Size of queue. -+ * [31] Used Will be set if the descriptor has been changed and written back by the elan. -+ * [30:3] InsertPtr Insert pointer. This is alligned to a byte address. -+ * [2] OrderControl Holds bit 8 of last PCI accesses. Used by a reordering queue. -+ * [1:0] ErrorType This field has the current error status of the queue. -+ */ -+ -+/* Common between revA and RevB */ -+#define CQ_PtrMask (0x7ffffff8) /* 31 bit sdram address */ -+#define CQ_PtrOffsetMask (0x7fff8) -+#define CQ_PtrBaseMask (0x7ff80000) -+ -+#define CQ_InsertPtrShift (3 - 3) /* InsertPtr is 64 bit aligned */ -+#define CQ_SizeShift (32) -+# define CQ_Size1K 0 -+# define CQ_Size8K 1 -+# define CQ_Size64K 2 -+# define CQ_Size512K 3 -+# define CQ_SizeMask 3 -+ -+#define CQ_CompletedPtrShift (35 - 3) /* CompletedPtr is 64 but aligned */ -+ -+#define CQ_Used (1ull << 31) -+#define CQ_Trapped (1ull << 34) -+ -+#define CQ_QueuePtrsValue(Size,Inserter,Completer) \ -+ (((E4_uint64) (Size) << CQ_SizeShift) | \ -+ ((E4_uint64) (Inserter) << CQ_InsertPtrShift) | \ -+ ((E4_uint64) (Completer) << CQ_CompletedPtrShift)) -+ -+#define CQ_InsertPtr(QueuePtrs) \ -+ (((E4_uint64) QueuePtrs) & CQ_PtrMask) -+ -+#define CQ_CompletedPtr(QueuePtrs) \ -+ (((E4_uint32)((QueuePtrs) >> CQ_CompletedPtrShift) & CQ_PtrOffsetMask) | \ -+ (CQ_InsertPtr(QueuePtrs) & CQ_PtrBaseMask)) -+ -+#define CQ_Size(SizeVal) (1024 * (1 << ((SizeVal)*3))) -+ -+/* Rev A specific */ -+#define CQ_RevA_Error (1 << 0) -+#define CQ_RevA_Priority (1 << 1) -+#define CQ_RevA_TimedOut (1 << 2) -+ -+/* Rev B specific */ -+#define CQ_RevB_ErrorType(QueuePtr) ((QueuePtr) & (3 << 0)) -+# define CQ_RevB_NoError (0ull << 0) -+# define CQ_RevB_Overflowed (1ull << 0) -+# define CQ_RevB_InvalidWriteSize (2ull << 0) -+# define CQ_RevB_InvalidWriteOrder (3ull << 0) -+#define CQ_RevB_OrderControl (1ull << 2) -+ -+#define CQ_RevB_QueueType(QueuePtr) ((QueuePtr) & (1ull << 61)) -+# define CQ_RevB_ReorderingQueue (1ull << 61) -+# define CQ_RevB_32bitWriteQueue (0ull << 61) -+ -+#define CQ_RevB_Priority (1ull << 62) -+#define CQ_RevB_TimedOut (1ull << 62) -+ -+/* -+ * CQ_AckBuffers - Packet Ack Values -+ */ -+#define PackOk (0x0) -+#define PackTestFail (0x1) -+#define PackDiscard (0x2) -+#define PackError (0x7) -+#define PackTimeout (0x8) -+#define PackWaiting (0xF) -+#define PackValue(val,chan) (((val) >> ((chan) * 4)) & 0xf) -+ -+/* -+ * CQ_Control -+ * [63:35] ExtractPtr -+ * [34] Unused -+ * [33:32] ChannelNotCompleted -+ * [31:24] Permissions -+ * [23:16] RestartCount Decremented after each restart. Will trap when zero -+ * [15:14] Unused Should be set to zero -+ * [13:0] Context -+ */ -+#define CQ_Context(Control) ((E4_uint32) ((Control) >> 0) & 0x3fff) -+#define CQ_RestartCount(Control) ((E4_uint32) ((Control) >> 16) & 0x7f) -+#define CQ_ChannelNotCompleted(Control) ((E4_uint32) ((Control) >> 32) & 3) -+#define CQ_ExtractPtr(Control) ((E4_uint32) ((Control) >> 32) & 0xFFFFFFF8) -+ -+#define CQ_RestartCountShift 16 -+ -+#define CQ_SetEventEnableBit (1 << 24) -+#define CQ_WaitEventEnableBit (1 << 25) -+#define CQ_ModifyEnableBit (1 << 26) -+#define CQ_WriteEnableBit (1 << 27) -+#define CQ_ThreadStartEnableBit (1 << 28) -+#define CQ_DmaStartEnableBit (1 << 29) -+#define CQ_STENEnableBit (1 << 30) -+#define CQ_InterruptEnableBit (1 << 31) -+#define CQ_EnableAllBits (0xFF000000) -+#define CQ_PermissionMask (0xFF000000) -+ -+#define CQ_ControlValue(Cntx, RestartCount, Permissions) \ -+ (((Cntx) & 0x3fff) | (((RestartCount) & 0xff) << 16) | ((Permissions) & CQ_PermissionMask)) -+ -+/* -+ * This file describes the slave address map of Elan4. -+ * -+ * Elan4 has two PCI 64 bit base address registers. One is setup for elan -+ * local memory and the other is for the command port, elan registers and ebus. -+ * -+ * This file describes the command port, elan registers and ebus BAR. This is a -+ * 26 bit base address register and is split up as follows: -+ * 1 The ebus requires 21 bits of address. 26'h3e00000 to 26'h3ffffff -+ * 2 The control regsiters requires 16 bits of address. 26'h3df0000 to 26'h3dfffff -+ * 3 The command port has the rest. This give just under 8k command ports or about 123 per -+ * processor of a 64 node SMP. -+ */ -+ -+/* BAR1 contains the command queues followed by the registers and the Ebus - and is 26 bits */ -+/* each command queue has an 8K page associated with it */ -+#define CQ_CommandMappingSize (1 << 13) -+#define CQ_NumCommandDescs ((1 << (26 - 13))) -+#define CQ_CommandDescsAlignment ((1 << (26 - 13)) * sizeof (E4_CommandQueueDesc)) -+ -+/* control reg bits i.e. E4_DataBusMap.SysControlReg */ -+#define CONT_EN_ALL_SETS (1ULL << 0) /* enable cache */ -+#define CONT_MMU_ENABLE (1ULL << 1) /* bit 0 enables mmu */ -+#define CONT_CACHE_HASH_TABLE (1ULL << 2) /* cache up hash table entries */ -+#define CONT_CACHE_CHAINS (1ULL << 3) /* cache up chain entries */ -+#define CONT_CACHE_ROOT_CNTX (1ULL << 4) /* cache root context table for routes and filters. */ -+#define CONT_CACHE_STEN_ROUTES (1ULL << 5) /* cache up sten packet routes */ -+#define CONT_CACHE_DMA_ROUTES (1ULL << 6) /* cache up dma packet routes */ -+ -+#define CONT_CACHE_NONE 0ULL -+#define CONT_CACHE_ALL (CONT_CACHE_HASH_TABLE | CONT_CACHE_CHAINS | CONT_CACHE_ROOT_CNTX | \ -+ CONT_CACHE_STEN_ROUTES | CONT_CACHE_DMA_ROUTES) -+ -+/* This controls the format size and position of the MMU hash tables. */ -+#define CONT_INHIBIT_MAX_CHAIN_ITEMS (1ULL << 7) /* Prevents the MaxChainItems value of 1024 from forcing a translation miss */ -+#define CONT_TABLE0_MASK_SIZE_SHIFT 8 /* Defines the size of hash table 0 */ -+#define CONT_TABLE0_PAGE_SIZE_SHIFT 13 /* Set the page size for hash table 0 */ -+#define CONT_TABLE1_MASK_SIZE_SHIFT 16 /* Defines the size of hash table 1 */ -+#define CONT_TABLE1_PAGE_SIZE_SHIFT 21 /* Set the page size for hash table 1 */ -+#define CONT_TWO_HASH_TABLES (1ULL << 24) /* Sets the MMU to use two hash tables. If not set only 0 used. */ -+#define CONT_2K_NOT_1K_DMA_PACKETS (1ULL << 25) /* Used to select the default DMA packet size. */ -+#define CONT_ALIGN_ALL_DMA_PACKETS (1ULL << 26) /* Will force all dma packets to be aligned to a page.*/ -+#define CONT_DIRECT_MAP_PCI_WRITES (1ULL << 27) /* Will force pci writes to write and flush the dcache.*/ -+#define CONT_TLB_FLUSH (1ULL << 28) /* Invalidates the TLB and indicates when flushed */ -+#define CONT_CLEAR_WALK_WROTE_TABLES (1ULL << 29) /* Used to guarantee that the elan is using new PTE values. */ -+#define CONT_ROUTE_FLUSH (1ULL << 30) /* Invalidates all route cache entries. */ -+#define CONT_CLEAR_LINKPORT_INT (1ULL << 31) /* Clears the Linkport key fail interrupt. Reads as 0. */ -+#define CONT_CLEAR_SDRAM_ERROR (1ULL << 32) /* Clears an EEC error interrupt. Reads as 0. */ -+ -+/* -+ * These are extra control bits used for testing the DLLs of the SDRAM interface. Most of the Sdram -+ * control bits are defined in xsdram.h -+ */ -+#define SDRAM_FIXED_DLL_DELAY_SHIFT 47 -+#define SDRAM_FIXED_DLL_DELAY_BITS 5 -+#define SDRAM_FIXED_DLL_DELAY_MASK ((1ULL << SDRAM_FIXED_DLL_DELAY_BITS) - 1ULL) -+#define SDRAM_FIXED_DLL_DELAY(Value) ((SDRAM_FIXED_DLL_DELAY_MASK & (Value)) << SDRAM_FIXED_DLL_DELAY_SHIFT) -+#define SDRAM_FIXED_DELAY_ENABLE (1ULL << 52) -+#define SDRAM_GET_DLL_DELAY(Value) (((Value) >> SDRAM_FIXED_DLL_DELAY_SHIFT) & SDRAM_FIXED_DLL_DELAY_MASK) -+ -+#define SDRAM_166_DLL_CORRECTION_FACTOR 3 /* This is to allow for SSO and ringing on the DQ lines */ -+#define SDRAM_150_DLL_CORRECTION_FACTOR 2 /* This is to allow for SSO and ringing on the DQ lines */ -+ -+#define PAGE_SIZE_4K 0x0 -+#define PAGE_SIZE_8K 0x1 -+#define PAGE_SIZE_64K 0x2 -+#define PAGE_SIZE_512K 0x3 -+#define PAGE_SIZE_2M 0x4 -+#define PAGE_SIZE_4M 0x5 -+#define PAGE_SIZE_64M 0x6 -+#define PAGE_SIZE_512M 0x7 -+ -+#define PAGE_SIZE_MASK 0x7 -+#define PAGE_MASK_MASK 0x1f -+ -+/* control reg bits i.e. E4_DataBusMap.LinkControlReg */ -+#define LCONT_REVA_GREEN_LED (1 << 0) -+#define LCONT_REVA_YELLOW_LED (1 << 1) -+#define LCONT_REVA_RED_LED (1 << 2) -+#define LCONT_REVA_ENABLE_LED_DRIVE (1 << 3) /* Enable manual setting of the Leds to the bits set above. */ -+ -+#define LCONT_REVB_DISABLE_TLB_PREFETCH (1 << 0) -+#define LCONT_REVB_DISABLE_CRC_ERROR_CHECKING (1 << 1) -+ -+ -+#define LCONT_EN_SYS_WRITES (1 << 4) /* Enable linkport writes to sys registers. i.e. all of E4_DataBusMap. */ -+#define LCONT_EN_SYS_READS (1 << 5) /* Enable linkport reads from sys registers. i.e. all of E4_DataBusMap. */ -+#define LCONT_EN_USER_WRITES (1 << 6) /* Enable linkport writes to user registers. i.e. all of E4_User_Regs. */ -+#define LCONT_EN_USER_READS (1 << 7) /* Enable linkport reads from user registers. i.e. all of E4_User_Regs. */ -+ -+#define LCONT_TEST_VALUE_MASK 0x3ff /* Value used for test writes and link boundary scan. */ -+#define LCONT_TEST_VALUE_SHIFT 8 -+#define LCONT_TEST_VALUE(Value) ((LCONT_LINK_STATE_MASK & (Value)) << LCONT_TEST_VALUE_SHIFT) -+ -+/* -+ * State read from LINK_STATE when TEST_VALUE is set to the following values. -+ * TEST_VALUE LINK_STATE read TEST_VALUE LINK_STATE read -+ * 000 - Data delay count 0 008 - Data delay count 8 -+ * 001 - Data delay count 1 009 - Link in value -+ * 002 - Data delay count 2 00a - PLL delay -+ * 003 - Data delay count 3 00b - Clock Delay -+ * 004 - Data delay count 4 00c ? ErrorVal8to0 -+ * 005 - Data delay count 5 00d ? ErrorVal17to9 -+ * 006 - Data delay count 6 00e ? ErrorVal26to18 -+ * 007 - Data delay count 7 00f ? ErrorVal35to27 -+ */ -+ -+#define LCONT_TEST_CONTROL_MASK 0x3 /* Selects and controls the action of the LINK_STATE value. */ -+#define LCONT_TEST_CONTROL_SHIFT 18 -+ -+#define LCONT_READ_ERRORS 0 /* {Mod45RequestChanged, FifoOverflowError, DataError, PhaseError, -+ * DeskewError, LockError, Locked, LinkNotReady} */ -+#define LCONT_READ_STATE 1 /* Read valus addressed by TEST_CONTROL value */ -+#define LCONT_FIX_LINK_DELAYS 2 /* Sets delays to TEST_CONTROL value */ -+#define LCONT_BOUNDARY_SCAN 3 /* Puts link into boundary scan. Outputs TEST_CONTROL value to link, -+ * reads LINK_STATE from link. */ -+ -+#define LCONT_LINK_STATE_MASK 0x3ff /* Read only */ -+#define LCONT_LINK_STATE_SHIFT 20 /* Read only */ -+#define LCONT_LINK_STATE(ControlRegValue) (LCONT_LINK_STATE_MASK & ((ControlRegValue) >> LCONT_LINK_STATE_SHIFT)) -+ -+/* control reg bits i.e. E4_DataBusMap.LinkContSettings */ -+#define LCONT_MOD45_DISABLE (1 << 0) /* is set the link will try to run in TNB mode. */ -+#define LCONT_CONFIG_PHASE_MASK 0x7 /* This set the delay through the phase alignment buffer. */ -+#define LCONT_CONFIG_PHASE_SHIFT 1 -+ -+#define LCONT_PLL_REF_VAL_BITS_MASK 0x7f /* This is the divide value on the LinkIn clock to form the comms PLL */ -+#define LCONT_PLL_REF_VAL_BITS_SHIFT 4 /* reference clock. Div value is (n - 2). e.g. to Divide by 7 set to 5. */ -+ -+#define LCONT_FORCE_COMMSCLK_LOCAL (1 << 11) /* This must be set at one end of a back to back Elan configuration. */ -+#define LCONT_LVDS_VOLTAGE_BITS_MASK 0x3 /* This is used to set the voltage swing on the LVDS link output pads. */ -+#define LCONT_LVDS_VOLTAGE_BITS_SHIFT 12 /* reference clock. Div value is (n - 2). e.g. to Divide by 7 set to 5. */ -+ -+#define LCONT_VOD_170 0 /* Approximate differential voltage swing in mV of link outputs into */ -+#define LCONT_VOD_360 1 /* a 100 ohm diferential load. */ -+#define LCONT_VOD_460 2 -+#define LCONT_VOD_550 3 -+ -+#define LCONT_LVDS_TERMINATION_MASK 0x3 /* This set the resistor values of the internal single ended termation */ -+#define LCONT_LVDS_TERMINATION_SHIFT 14 /* resistors of the link input and comms input clcok. */ -+ -+#define LCONT_TERM_55_OHM 0 /* Resistor values for internal termination of LVDS pads. */ -+#define LCONT_TERM_50_OHM 1 -+#define LCONT_TERM_AUTO_OHM 2 /* Should normally be set to auto. */ -+#define LCONT_TERM_45_OHM 3 -+ -+#define LCONT_LVDS_EN_TERM_UPDATE (1 << 47) /* This should be asserted and deasserted if LCONT_LVDS_TERMINATION is changed. */ -+ -+/* Macros used to access and construct MMU hash table and chain entries. */ -+/* -+ * Each hash entry is made up of a 64 byte block. Each entry hash two tags where each -+ * tag has 4 PTE's. PTE's 0 to 2 use the bottom 48 bits of a 64 bit word and PTE 3 -+ * uses the top 16 bits of 3 64 bit words. -+ * -+ * These macros can be used to build a single PTE. PTE3 needs to be built into a 48 bit -+ * object before they can be used. -+ */ -+#define PTE_ENTRY_MASK 0x0000ffffffffffffULL -+#define PTE_TYPE_MASK 0x000000000000000fULL -+#define PTE_PERM_MASK 0x00000000000000f0ULL -+#define PTE_PERM_TYPE_MASK 0x00000000000000ffULL -+#define PTE_REF_MASK 0x0000000000000100ULL -+#define PTE_PPN_MASK 0x00007ffffffffe00ULL -+#define PTE_MOD_MASK 0x0000800000000000ULL -+#define PTE_TOPADDR_MASK 0x0000600000000000ULL -+ -+#define PTE_MOD_SHIFT 47 -+#define PTE_PPN_SHIFT 9 -+#define PTE_REF_SHIFT 8 -+#define PTE_PERM_SHIFT 4 -+#define PTE_TYPE_SHIFT 0 -+ -+#define PTE_PADDR_SHIFT (12 - 9) /* Physical addresses are shifted down 3 this to go into the PTE */ -+ -+ -+/* Values required for tag 3 */ -+#define PTE_REF_3 0x0100000000000000ULL -+#define PTE_MOD_3 0x8000000000000000ULL -+#define PTE_ENTRY_MASK_3 0xffff000000000000ULL -+#define PTE_PERM_TYPE_MASK_3 0x00ff000000000000ULL -+#define PTE_ENTRY_3_FOR_0(NewPte) ((NewPte << (48)) & PTE_ENTRY_MASK_3) -+#define PTE_ENTRY_3_FOR_1(NewPte) ((NewPte << (32)) & PTE_ENTRY_MASK_3) -+#define PTE_ENTRY_3_FOR_2(NewPte) ((NewPte << (16)) & PTE_ENTRY_MASK_3) -+ -+/* Values required for the tags */ -+#define TAG_CONTEXT_MASK 0x0000000000003fffULL -+#define TAG_ADDRESS_MASK 0xfffffffff8000000ULL -+#define TAG_CHAINPTR_18TO6_MASK 0x0000000007ffc000ULL -+#define TAG_CHAINPTR_LOW_SHIFT (14 - 6) -+#define TAG_CHAINPTR_30TO19_MASK 0x0000000003ffc000ULL -+#define TAG_CHAINPTR_HIGH_SHIFT (19 - 14) -+#define TAG_COPY_BIT 0x0000000004000000ULL -+ -+/* -+ * This takes number loaded into the control register and returns the page size as a power of two. -+ */ -+ -+#define E4_PAGE_SIZE_TABLE E4_uint32 const PageSizeTable[] = {12, 13, 16, 19, 21, 22, 26, 29} -+#define E4_PAGE_SIZE_TABLE_SIZE (sizeof(PageSizeTable)/sizeof(PageSizeTable[0])) -+ -+/* -+ * This macro generates a hash block index. -+ * -+ * Cntx This is the 14 bit context. It should not be larger than 14 bits. -+ * VAddr This is the 64 bit virtual address. It does not require any masking and can be a byte address. -+ * PageSize This is the value loaded into the control register for this hash table. -+ * HashTableMask This should be set mask out upper bits past the end of the hash table. -+ */ -+#define E4MMU_SHIFT_ADDR(VAddr, Shift) \ -+ ((((E4_uint32)(VAddr)) >> (Shift)) | (((E4_uint32)((VAddr) >> 32)) << (32 - (Shift)))) -+ -+#define E4MMU_CONTEXT_SCRAMBLE(Cntx) \ -+ ((((Cntx) << 8) | ((Cntx) >> 6)) ^ (((Cntx) << 15) | ((Cntx) << 1))) -+ -+#define E4MMU_HASH_INDEX(Cntx, VAddr, PageShift, HashTableMask) \ -+ ((E4MMU_SHIFT_ADDR(VAddr, (PageShift) + 2) ^ E4MMU_CONTEXT_SCRAMBLE(Cntx)) & (HashTableMask)) -+ -+#define E4MMU_TAG(vaddr,ctx) (((vaddr) & TAG_ADDRESS_MASK) | ((ctx) & TAG_CONTEXT_MASK)) -+ -+#define E4MMU_TAG2VADDR(tag,hashidx,PageShift,HashTableMask) \ -+ (((tag) & TAG_ADDRESS_MASK) | ((((hashidx) ^ E4MMU_CONTEXT_SCRAMBLE((tag) & TAG_CONTEXT_MASK)) & (HashTableMask)) << ((PageShift + 2)))) -+ -+/* -+ * Detailed bit descriptions for the tags and PTE's are better done with the macros -+ * defined above. -+ */ -+typedef struct _E4_HashTableEntry -+{ -+ E4_uint64 Tag[2]; -+ E4_uint64 TagPTE[2][3]; -+} E4_HashTableEntry; -+ -+#define E4MMU_TAG_OFFSET(tag) ((tag) << 3) -+#define E4MMU_PTE_LOW_OFFSET(tag,pte) ((((tag)*3 + (pte) + 2) << 3)) -+#define E4MMU_PTE_HIGH_OFFSET(tag,pte) ((((tag)*3 + (pte) + 2) << 3) + 4) -+#define E4MMU_PTE3_WORD0_OFFSET(tag) ((((tag)*3 + 2) << 3) + 6) -+#define E4MMU_PTE3_WORD1_OFFSET(tag) ((((tag)*3 + 3) << 3) + 6) -+#define E4MMU_PTE3_WORD2_OFFSET(tag) ((((tag)*3 + 4) << 3) + 6) -+ -+ -+/* -+ * Hash0AddrBits is the size of the hash table in bytes as a power of 2. -+ * e.g. 11 would give 32 hash entries where each entry is 64 bytes. -+ */ -+#define SETUP_HASH_TABLES(Hash0PageSize, Hash0AddrBits, Hash1PageSize, Hash1AddrBits) \ -+ (((Hash0PageSize) << CONT_TABLE0_PAGE_SIZE_SHIFT) | \ -+ ((Hash0AddrBits) << CONT_TABLE0_MASK_SIZE_SHIFT) | \ -+ ((Hash1PageSize) << CONT_TABLE1_PAGE_SIZE_SHIFT) | \ -+ ((Hash1AddrBits) << CONT_TABLE1_MASK_SIZE_SHIFT)) -+ -+/* ECC status register */ -+#define ECC_Addr(s) ((s) & 0x7ffffff8ULL) -+#define ECC_Syndrome(s) (((s) >> 32) & 0xffffULL) -+#define ECC_RisingDQSSyndrome(s) (((s) >> 32) & 0xffULL) -+#define ECC_FallingDQSSyndrome(s) (((s) >> 40) & 0xffULL) -+#define ECC_UncorrectableErr(s) (((s) >> 48) & 1ULL) -+#define ECC_MultUncorrectErrs(s) (((s) >> 49) & 1ULL) -+#define ECC_CorrectableErr(s) (((s) >> 50) & 1ULL) -+#define ECC_MultCorrectErrs(s) (((s) >> 51) & 1ULL) -+ -+/* Permission type saved in a PTE. This is a four bit field */ -+#define PERM_Disabled 0x0 -+#define PERM_Unused 0x1 -+#define PERM_LocDataRead 0x2 -+#define PERM_LocDataWrite 0x3 -+#define PERM_LocRead 0x4 -+#define PERM_LocExecute 0x5 -+#define PERM_ReadOnly 0x6 -+#define PERM_LocWrite 0x7 -+#define PERM_LocEventOnly 0x8 -+#define PERM_LocEventWrite 0x9 -+#define PERM_RemoteEvent 0xa -+#define PERM_RemoteAll 0xb -+#define PERM_RemoteReadOnly 0xc -+#define PERM_RemoteWriteLocRead 0xd -+#define PERM_DataReadWrite 0xe -+#define PERM_NoFault 0xf -+ -+#define PERM_Mask 0xf -+ -+/* Permission type hints to device driver */ -+#define PERM_Preload 0x10 -+ -+#define PTE_SetPerm(Perm) (((Perm) & PERM_Mask) << 4) -+ -+/* Control info saved in the lookup field of the TLB */ -+#define PTE_PciNotLocal (1ULL << 0) /* Directs the access to the PCI interface */ -+#define PTE_BigEndian (1ULL << 1) /* Valid for PCI entries only */ -+#define PTE_RelaxedOrder (1ULL << 2) /* Valid for PCI entries only */ -+#define PTE_DontSnoop (1ULL << 3) /* Valid for PCI entries only */ -+ -+#define PTE_UseFixedSet (1ULL << 1) /* Value for non PCI entries only */ -+#define PTE_CommandQueue (1ULL << 2) /* Value for non PCI entries only */ -+#define PTE_SetFixedSetNo(Set) ((((Set) & 3) << 2) | PTE_UseFixedSet) -+ -+#define PTE_TypeBitsMask (0xfULL) -+#define PTE_PermissionTypeMask (0xfULL << 4) -+#define PTE_Referenced (1ULL << 8) -+#define PTE_PhysicalPageNoMask (0x7ffffffffe00ULL) -+#define PTE_Modified (1ULL << 47) -+ -+#define PTE_PhysicalAddrShiftIntoPTE (12 - 9) -+ -+/* define page table entry bit fields */ -+#define TLB_PageSizeBits (3 << 0) -+#define TLB_ACCBits (7 << 2) -+#define TLB_LocalBit (1 << 5) -+#define TLB_PCI64BitTargetBit (1 << 6) -+#define TLB_PCIBigEndianBit (1 << 7) -+ -+#define TLB_ModifiedBit (1 << 55) -+#define TLB_ReferencedBit (1 << 63) -+ -+/* Used to read values from the tlb. */ -+#define TLB_TlbReadCntBitsSh 56 -+#define TLB_UseSelAddrSh (1ULL << 60) -+#define TLB_WriteTlbLine (1ULL << 61) -+ -+#define TLB_SEL_LINE(LineNo) (TLB_UseSelAddrSh | \ -+ ((E4_uint64)((LineNo) & 0xf) << TLB_TlbReadCntBitsSh)) -+ -+#define TLB_NUM_ENTRIES 16 -+/* -+ * The following macros are used with the test access port (TlbLineValue) for the TLBs. -+ */ -+#define TLV_DoPciAccess (1ULL << 0) -+#define TLV_CommandAccess (1ULL << 1) -+#define TLV_DoCacheAccess (1ULL << 2) -+#define TLV_notStartTLBWalk (1ULL << 3) -+#define TLV_UseFixedSet (1ULL << 4) -+#define TLV_BigEndian (1ULL << 4) -+#define TLV_RelaxedOrder (1ULL << 5) -+#define TLV_DontSnoop (1ULL << 6) -+#define TLV_FixedSetNo_MASK (3ULL << 5) -+#define TLV_PciTypeBits_MASK (7ULL << 4) -+#define TLV_LookupBits_MASK (0x7fULL) -+#define TLV_MissErr (1ULL << 7) -+#define TLV_TypeBits (0xffULL) -+ -+#define TLV_PhysicalAddr_MASK (0x3fffffffff000ULL) -+ -+#define TLV_TlbTesting (1ULL << 51) -+#define TLV_SelectUnitsTlbRead (1ULL << 52) -+#define TLV_SelectTProcTlbRead (1ULL << 53) -+ -+#define TLV_TlbLineSelect_MASK (0xf) -+#define TLV_UnitsTlbLineSelect_SHIFT (54) -+#define TLV_TProcTlbLineSelect_SHIFT (59) -+#define TLV_EnableUnitsTlbRead (1ULL << 58) -+#define TLV_EnableTProcTlbRead (1ULL << 63) -+ -+/* -+ * Use this macro to enable direct testing of the Units TLB. -+ * When Line is in the range 0 to 15 a TLB line is selected for reading or writing. -+ * When Line is set to -1 the tlb will be activated to perform a match. -+ */ -+#define TLV_UnitsTlbLineSel(Line) (((Line) == -1) ? 0ULL : \ -+ (TLV_EnableUnitsTlbRead | ((E4_uint64)((Line) & TLV_TlbLineSelect_MASK) << TLV_UnitsTlbLineSelect_SHIFT))) -+#define TLV_TProcTlbLineSel(Line) (((Line) == -1) ? 0ULL : \ -+ (TLV_EnableTProcTlbRead | ((E4_uint64)((Line) & TLV_TlbLineSelect_MASK) << TLV_TProcTlbLineSelect_SHIFT))) -+ -+/* -+ * Thread_Trap_State -+ * see f_RegFileControl.v TProcStatus -+ */ -+#define TS_HaltThread (1 << 0) -+#define TS_TrapForTooManyInstructions (1 << 1) -+#define TS_InstAccessException (1 << 2) -+#define TS_Unimplemented (1 << 3) -+#define TS_DataAccessException (1 << 4) -+#define TS_DataAlignmentError (1 << 5) -+#define TS_TrapForUsingBadData (1 << 6) -+#define TS_TrapTypeMask (0x7f) -+#define TS_DataPortNo(ts) (((ts) >> 7) & 7) -+#define TS_TrappedFlag (1 << 10) -+#define TS_MemLock (1 << 11) -+#define TS_XCCshift 12 -+#define TS_XCCmask 0xff -+#define TS_ICC(ts) (((ts) >> 12) & 15) -+#define TS_XCC(ts) (((ts) >> 16) & 15) -+#define TS_InstValid_F (1 << 20) -+#define TS_InstValid_R (1 << 21) -+#define TS_InstValid_E (1 << 22) -+#define TS_InstValid_W (1 << 23) -+#define TS_HighPriority (1 << 24) -+#define TS_RemoteThread (1 << 25) -+#define TS_TProcTranslationInProgress (1 << 26) -+#define TS_MemLock_E (1 << 27) -+ -+/* Thread run queue entries */ -+typedef struct E4_ThreadRegs -+{ -+ E4_uint64 Registers[7]; -+} E4_ThreadRegs; -+ -+typedef struct E4_TProcQueueEntry -+{ -+ E4_ThreadRegs Regs; /* XXXX: jon check this */ -+ E4_uint64 Context; /* XXXX: jon check this */ -+} E4_TProcQueueEntry; -+ -+typedef struct E4_DProcQueueEntry -+{ -+ E4_DMA Desc; -+ E4_uint64 Pad; -+} E4_DProcQueueEntry; -+ -+/* -+ * Packet acknowledge values. -+ */ -+#define E4_PAckOk 0 -+#define E4_PAckTestFail 1 -+#define E4_PAckDiscard 2 -+#define E4_PAckError 3 -+ -+/* -+ * return values from breaktest instruction. -+ */ -+#define ICC_CARRY_BIT (0x1ULL << 0) /* Breaktest: Load pending */ -+#define ICC_ZERO_BIT (0x1ULL << 1) /* Breaktest: Time to break */ -+#define ICC_SIGNED_BIT (0x1ULL << 2) /* Breaktest: Another thread ready */ -+#define ICC_TPROC_RDY_LOW_PRI (0x1ULL << 3) -+#define ICC_TPROC_RDY_HIGH_PRI (0x1ULL << 4) -+#define ICC_RUNNING_HIGH_PRI (0x1ULL << 5) -+#define ICC_RUNNING_AS_REMOTE (0x1ULL << 6) -+#define ICC_TIME_TO_BREAK (0x1ULL << 7) -+#define ICC_RS1LOAD_PENDING (0x1ULL << 8) -+#define ICC_TPROC_HALT (0x1ULL << 9) -+ -+/* -+ * Main Interrupt cookies -+ * [63:14] user cookie -+ * [13:0] context -+ */ -+#define E4_MAIN_INT_SHIFT 14 -+#define E4_MAIN_INT_COOKIE(cookie) ((cookie) >> E4_MAIN_INT_SHIFT) -+#define E4_MAIN_INT_CTX(cookie) ((cookie) & 0x3FFF) -+ -+typedef E4_uint64 E4_MainIntEntry; -+ -+#define E4_MainIntEntrySize sizeof (E4_MainIntEntry) -+ -+/* -+ * The internal databus is 64 bits wide. -+ * All writes to the internal registers MUST be made with 64 bit write operations. -+ * These can be made up of pairs 32 bit writes on the PCI bus. The writes will be -+ * treated as nops if they are performed with two separate 32 bit writes. -+ */ -+typedef volatile struct _E4_DataBusMap -+{ -+ E4_uint64 InputTrans[4][16]; /* 0x000 */ -+ -+ E4_uint64 Dma0TransAddr; /* 0x200 */ -+ E4_DMA Dma0Desc; /* Current Dma0 registers */ /* 0x208 */ -+ -+ E4_uint64 Dma1TransAddr; /* 0x240 */ -+ E4_DMA Dma1Desc; /* Current Dma1 registers */ /* 0x248 */ -+ -+ E4_uint64 Dma0LastPacketSize; /* 0x280 */ -+ E4_uint64 Dma0ThisPacketSize; /* 0x288 */ -+ E4_uint64 Dma0DescSizeInProg; /* 0x290 */ -+ E4_uint64 Dma0BytesToPrefetch; /* 0x298 */ -+ E4_uint64 Dma0PrefetchAddr; /* 0x2a0 */ -+ E4_uint64 EventCountAndType; /* 0x2a8 */ -+ E4_uint64 EventParameters[2]; /* 0x2b0 */ -+ -+ E4_uint64 Dma1LastPacketSize; /* 0x2c0 */ -+ E4_uint64 Dma1ThisPacketSize; /* 0x2c8 */ -+ E4_uint64 Dma1DescSizeInProg; /* 0x2d0 */ -+ E4_uint64 Dma1BytesToPrefetch; /* 0x2d8 */ -+ E4_uint64 Dma1PrefetchAddr; /* 0x2e0 */ -+ E4_Input_Ptrs InputTrapAndFilter; /* 0x2e8 */ -+ E4_uint64 EventAddress; /* 0x2f0 */ -+ E4_QueuePtr MainIntQueuePtrs; /* 0x2f8 */ -+ -+ E4_uint64 Event_Copy[16]; /* 0x300 */ -+ -+ E4_uint64 CommandCopy[7]; /* 0x380 */ -+ E4_uint64 CommandHold; /* 0x3b8 */ -+ -+ E4_uint64 InputQueueDesc[4]; /* 0x3c0 */ -+ -+ /* Run queue Pointers */ -+ E4_uint64 DProcLowPriPtrs; /* 0x3e0 */ -+ E4_uint64 DProcHighPriPtrs; /* 0x3e8 */ -+ E4_uint64 TProcLowPriPtrs; /* 0x3f0 */ -+ E4_uint64 TProcHighPriPtrs; /* 0x3f8 */ -+ -+ E4_uint64 CProcStatus; /* 0x400 */ -+ E4_uint64 TProcStatus; /* 0x408 */ -+ E4_uint64 IProcStatus; /* 0x410 */ -+ E4_uint64 EProcStatus; /* 0x418 */ -+ E4_uint64 DProc0Status; /* 0x420 */ -+ E4_uint64 DProc1Status; /* 0x428 */ -+ E4_Sched_Status SchedStatus; /* 0x430 */ -+ -+ E4_uint64 LoadIProcCntxFilter; /* Will load one of 4 cntx filter regs. Write only */ /* 0x438 */ -+ -+ E4_CommandControl CommandControl; /* 0x440 */ -+ E4_uint64 CommandCacheTestPort; /* 0x448 */ -+ E4_uint64 CommandLowPriRunPtrs; /* 0x450 */ -+ E4_uint64 CommandHighPriRunPtrs; /* 0x458 */ -+ E4_uint64 CommandSchedDataPort[4]; /* 0x460 */ -+ -+ E4_uint64 DmaRouteBuffer[2][2]; /* Write only. Should not be written to. */ /* 0x480 */ -+ E4_uint64 StenRouteBuffer[2]; /* Write only. Should not be written to. */ /* 0x4a0 */ -+ E4_uint64 pad4[0x098 - 0x096]; /* 0x4b0 */ -+ -+ E4_uint64 DmaAlignmentPort[8]; /* Write only. Should only be written to clear the prev reg. */ /* 0x4c0 */ -+ -+ E4_uint64 MmuBlockEntry[8]; /* Used for hash table and chain fetches */ /* 0x500 */ -+ E4_uint64 WriteUnitsTlbLine[3]; /* 0x550 */ -+ E4_uint64 pad5; /* 0x540 */ -+ E4_uint64 WriteTProcTlbLine[3]; /* 0x568 */ -+ E4_uint64 pad6; /* 0x540 */ -+ -+ E4_uint64 MmuTableBasePtrs; /* Both tables packed into a single 64 bit value */ /* 0x580 */ -+ E4_uint64 MmuFaultAndRootCntxPtr; /* Both packed into a single 64 bit value */ /* 0x588 */ -+ E4_uint64 UnitsVAddr; /* 0x590 */ -+ E4_uint64 TProcVAddr; /* 0x598 */ -+ E4_uint64 UnitsCntx; /* 0x5a0 */ -+ E4_uint64 TProcCntx; /* Read only. Writes access VProcCacheWritePort */ /* 0x5a8 */ -+ E4_uint64 FaultAddrReg; /* 0x5b0 */ -+ E4_uint64 FaultTypeAndContextReg; /* 0x5b8 */ -+ -+ E4_uint32 SysControlReg; /* 0x5c0 */ -+ E4_uint32 CacheTagValue; /* 0x5c4 */ -+ E4_uint64 TlbLineValue; /* 0x5c8 */ -+ E4_uint64 SDRamConfigReg; /* 0x5d0 */ -+ E4_uint32 InterruptMask; /* 0x5d8 */ -+ E4_uint32 InterruptReg; /* 0x5dc */ -+ E4_uint64 SDRamECCStatus; /* 0x5e0 */ -+ E4_uint32 LinkControlReg; /* 0x5e8 */ -+ E4_uint32 LinkContSettings; /* 0x5ec */ -+ E4_uint64 LinkPortKey; /* 0x5f0 */ -+ E4_uint64 LinkPortLock; /* 0x5f8 */ -+ -+ E4_uint64 SDRamWriteBuffer[4][8]; /* 0x600 */ -+ E4_uint64 SDRamReadBuffer[4][8]; /* 0x700 */ -+ -+ E4_uint64 TProcRegs[64]; /* 0x800 */ -+ E4_uint64 TProcStartUp[8]; /* Not to be used except by the elan itself */ /* 0xa00 */ -+ -+ E4_uint64 LoadPending; /* 0xa40 */ -+ E4_uint64 StortPending; /* 0xa48 */ -+ E4_uint64 DirtyBits; /* 0xa50 */ -+ E4_uint64 BadBits; /* 0xa58 */ -+ -+ E4_uint64 ICachePort_Cntl_Addr; /* 0xa60 */ -+ E4_uint64 Thread_Trap_State; /* 0xa68 */ -+ -+/* Instruction buffer (4 * 32 bit words) */ -+ E4_uint64 nPC_W; /* 0xa70 */ -+ E4_uint64 PC_W; /* 0xa78 */ -+ -+ E4_uint64 ICacheFillData[8]; /* 0xa80 */ -+ E4_uint64 ICachePort[8]; /* 0xac0 */ -+ -+ E4_uint64 PciDataBufs[4][8]; /* 0xb00 */ -+ -+ E4_uint64 CommandQueueBuffer[128]; /* 0xc00 */ -+} E4_DataBusMap; -+ -+/* -+ * These macros are used to setup the thread pcoessors ICache. -+ */ -+#define E4_ICacheTagAddrShift 6 -+#define E4_AccessICacheRams 1 -+#define E4_InvalidTagValue 0xffffffffffffffffULL -+#define E4_ICacheSizeInBytes (1024*16) -+#define E4_ICacheLineSizeInBytes (64) -+#define E4_ICacheLines (E4_ICacheSizeInBytes/E4_ICacheLineSizeInBytes) -+#define E4_ICachePortSize ( (sizeof((E4_DataBusMap *) 0)->ICachePort) / \ -+ (sizeof((E4_DataBusMap *) 0)->ICachePort[0])) -+ -+#define E4_ICacheFixupInsn 0xc0b02f95ull /* st1 [%r0 + 0xf95] */ -+#define E4_ICacheFixupAddr 0xf95ull -+#define E4_ICacheFixupOffset 0xfc0 -+ -+/* -+ * Event interrupt -+ */ -+typedef volatile union _E4_EventInt -+{ -+ E4_uint64 ForceAlign; -+ struct { -+ E4_uint32 IntCookie; -+ E4_uint32 EventContext; /* Bits 16 to 28 */ -+ } s; -+} E4_EventInt; -+ -+/* -+ * The following are used to interpret a fault status register. -+ */ -+ -+/* -+ * FSR[14:0] - AccessType -+ * -+ * T = Type bit -+ * S = size bit. Size is in units of 64 bits or 8 bytes. -+ * E = Byte end pointer. Used to define the last written byte of the last 64 bits written. -+ * D = Data type bit. Used for endian conversion in the PCI interface. -+ * C = Used by the cache to decide if this access should allocate a cache line. -+ * d = Set if dma read or write data data. This is used to guarantee order at the PCI interface. -+ * A = Access type used to check permissions by the MMU in a virtual access. -+ * P = Part Write. If set some byte enables may be used. Effects the action of a cache miss. -+ */ -+ -+/* FSR[7:0] */ -+/* bit 7 => virtual write */ -+#define AT_VirtualWriteAccBit (1 << 7) /* AAADDdC1EEESSSS = Virtual Write */ -+#define AT_VirtualWriteSizeMask 0xf /* size of write access (0 => 128 bytes) */ -+#define AT_VirtualWriteEndPtrShift 4 /* end byte pointer for part write block */ -+#define AT_VirtualWriteEndPtrMask 0x7 -+ -+/* else bit 6 => virtual read */ -+#define AT_VirtualReadAccBit (1 << 6) /* AAADDdC01SSSSSS = Virtual Read */ -+#define AT_VirtualReadSizeMask 0x3f /* size of read access (0 => 512 bytes) */ -+ -+/* else => special access */ -+#define AT_SelBitsMask 0xf /* Bits to select the type of acces from */ -+#define AT_SelBitsShift 0x4 -+#define AT_SpecialRd (0x0 << 4) /* AAADDdC0000TTTT = Special read Access */ -+#define AT_SpecialWr (0x1 << 4) /* AAADDdC0001TTTT = Special write Access */ -+#define AT_PhysicalRd (0x2 << 4) /* AAADDdC00100SSS = Physical Read */ -+#define AT_PhysicalWr (0x3 << 4) /* AAADDdC0011PSSS = Physical write */ -+ -+#define AT_OtherSizeMask 0xf /* Size bits used by all other accesses. 0=128 bytes */ -+#define AT_SpecialBitsMask 0xf /* Bits used to define the special access types */ -+#define AT_CacheSizeBitsMask 0x7 /* Size bits used for local accesses. 0=64 */ -+#define AT_CachePhysPartWriteBit 0x8 /* This bit is set if the access is a part write to the cache */ -+ -+/* Special memory access operations */ -+#define AT_RegAccess 0x0 -+#define AT_GetCntxFilter 0xe /* Only used by special reads */ -+#define AT_RouteFetch 0xf /* Only used by special reads */ -+ -+/* FSR[9:8] */ -+#define AT_NonAlloc (1 << 8) /* 1=Do not fill cache with this data */ -+#define AT_DmaData (1 << 9) /* This is a DMA read access. Required to guarantee dma read order. */ -+ -+/* FSR[11:10] - Data Type - defines data type for endian conversion in PCI interface*/ -+#define AT_BlkDataTyMask 0x3 -+#define AT_BlkDataTyShift 10 -+ -+#define AT_BlkDataType(FSR) (((FSR) >> AT_BlkDataTyShift) & AT_BlkDataTyMask) -+#define AT_TypeByte 0x0 -+#define AT_TypeHWord 0x1 -+#define AT_TypeWord 0x2 -+#define AT_TypeDWord 0x3 -+ -+/* FSR[14:12] - Access Permissions */ -+#define AT_PermBitsMask 0x7 -+#define AT_PermBitsShift 12 -+ -+#define AT_Perm(FSR) (((FSR) >> AT_PermBitsShift) & AT_PermBitsMask) -+#define AT_PermLocalDataRead 0x0 -+#define AT_PermLocalDataWrite 0x1 -+#define AT_PermRemoteRead 0x2 -+#define AT_PermRemoteWrite 0x3 -+#define AT_PermExecute 0x4 -+#define AT_PermLocalEvent 0x5 -+#define AT_PermRemoteEvent 0x7 -+ -+/* FSR[22:15] - reason for fault */ -+ -+#define FSR_WalkForThread (1 << 15) /* The thread processor caused the fault */ -+#define FSR_Walking (1 << 16) /* The fault was caused during a hash table access */ -+#define FSR_NoTranslationsFound (1 << 17) /* The hash table did not contain a matching tag */ -+#define FSR_WalkingProtectionFault (1 << 18) /* A protection fault was detected while walking */ -+#define FSR_HashTable1 (1 << 19) /* Was accessing hash table 1 not 0 */ -+#define FSR_RouteVProcErr (1 << 20) /* This is an invalid vproc for a route fetch */ -+#define FSR_FaultForBadData (1 << 21) /* Bad data (double bit ECC error) while performing a walk access */ -+#define FSR_FaultForMaxChainCount (1 << 22) /* The Elan4 has walked a chain of 1024 items. */ -+ -+typedef volatile struct _E4_FaultSave -+{ -+ E4_uint64 FSRAndFaultContext; /* Bits 0-31 : FaultContext. Bits 32-63 : FaultStatus Register */ -+ E4_uint64 FaultAddress; -+} E4_FaultSave; -+ -+#define FaultSaveContext(FSRAndFaultContext) ((E4_uint32) ((FSRAndFaultContext) & 0xFFFFFFFF)) -+#define FaultSaveFSR(FSRAndFaultContext) ((E4_uint32) ((FSRAndFaultContext) >> 32)) -+ -+typedef union E4_TrTypeCntx -+{ -+ E4_uint32 TypeContext; -+ struct -+ { -+#if (BYTE_ORDER == LITTLE_ENDIAN) || defined(__LITTLE_ENDIAN__) -+ E4_uint32 Type:16; /* Transaction type field */ -+ E4_uint32 Context:13; /* Transaction context */ -+ E4_uint32 TypeCntxInvalid:1; /* Bit 29 */ -+ E4_uint32 StatusRegValid:1; /* Bit 30 */ -+ E4_uint32 LastTrappedTrans:1; /* Bit 31 */ -+#else -+ E4_uint32 LastTrappedTrans:1; /* Bit 31 */ -+ E4_uint32 StatusRegValid:1; /* Bit 30 */ -+ E4_uint32 TypeCntxInvalid:1; /* Bit 29 */ -+ E4_uint32 Context:13; /* Transaction context */ -+ E4_uint32 Type:16; /* Transaction type field */ -+#endif -+ } s; -+} E4_TrTypeCntx; -+ -+#define MAX_TRAPPED_TRANS 28 -+#define TRANS_DATA_DWORDS 16 -+#define TRANS_DATA_BYTES 128 -+#define NO_OF_INPUT_CHANNELS 4 -+ -+#define CH0_LOW_PRI_CHAN 0 -+#define CH1_LOW_PRI_CHAN 1 -+#define CH0_HIGH_PRI_CHAN 2 -+#define CH1_HIGH_PRI_CHAN 3 -+ -+/* Words have been swapped for big endian access when fetched with dword access from elan.*/ -+typedef struct _E4_IprocTrapHeader -+{ -+ E4_uint64 TrAddr; -+ E4_uint64 IProcStatusCntxAndTrType; -+} E4_IprocTrapHeader; -+ -+typedef struct _E4_IprocTrapData -+{ -+ E4_uint64 Data[TRANS_DATA_DWORDS]; -+} E4_IprocTrapData; -+ -+/* -+ * This struct defines the trap state for the inputers. It requires a contiguous 16K byte block of local memory. -+ * The channel bits have been grouped to the low end of the address to force all Identify cookies to use the -+ * same cache line. -+ */ -+typedef struct _E4_IprocTrapState -+{ -+ E4_IprocTrapData TrData[MAX_TRAPPED_TRANS][NO_OF_INPUT_CHANNELS]; -+ E4_IprocTrapHeader TrHeader[MAX_TRAPPED_TRANS][NO_OF_INPUT_CHANNELS]; -+ E4_uint64 pad[8*NO_OF_INPUT_CHANNELS]; -+} E4_IprocTrapState; -+ -+/* -+ * 64 kbytes of elan local memory. Must be aligned on a 64k boundary -+ */ -+#define E4_LowPriQueueSize 0x400 -+#define E4_HighPriQueueSize 0x100 -+ -+typedef struct _E4_FaultSaveArea -+{ -+ E4_FaultSave TProcData[8]; -+ E4_FaultSave TProcInst; -+ E4_FaultSave Dummy[7]; -+ E4_FaultSave SchedProc; -+ E4_FaultSave DProc; -+ E4_FaultSave EventProc; -+ E4_FaultSave IProc; -+ E4_FaultSave DProcData[4]; -+ E4_FaultSave QReadData[8]; -+} E4_FaultSaveArea; -+ -+/* Macros to manipulate event queue pointers */ -+/* generate index in EventIntQueue */ -+#define E4_EVENT_INTQ_INDEX(fptr) (((fptr) & 0x1fff) >> 3) -+/* generate next fptr */ -+#define E4_EVENT_INTQ_NEXT(fptr) ((((fptr) + 8) & ~0x4000) | 0x2000) -+ -+typedef struct _E4_CommandPort -+{ -+ volatile E4_uint64 Command[1024]; /* a whole 8k page */ -+} E4_CommandPort; -+ -+/* -+ * This is the allocation of unit numbers within the ELAN. It is used to extract the fault address -+ * and fault type after a unit has trapped on a memory fetch. Only units that can generate traps -+ * have been included. -+ */ -+#define CUN_TProcData0 0x00 -+#define CUN_TProcData1 0x01 -+#define CUN_TProcData2 0x02 -+#define CUN_TProcData3 0x03 -+#define CUN_TProcData4 0x04 -+#define CUN_TProcData5 0x05 -+#define CUN_TProcData6 0x06 -+#define CUN_TProcData7 0x07 -+#define CUN_TProcInst 0x08 -+ -+/* memory current unit numbers -+ * TProc data bus */ -+#define CUN_DProcPA0 0x10 -+#define CUN_DProcPA1 0x11 -+#define CUN_DProcPrefetch 0x12 -+#define CUN_CommandProc 0x13 -+#define CUN_DProcData0 0x14 /* Dma prefetch reads. */ -+#define CUN_DProcData1 0x15 /* Dma prefetch reads. */ -+#define CUN_DProcData2 0x16 /* Dma prefetch reads. */ -+#define CUN_DProcData3 0x17 /* Dma prefetch reads. */ -+ -+#define CUN_IProcLowPri 0x18 -+#define CUN_IProcHighPri 0x19 -+#define CUN_Spare0 0x1A -+#define CUN_Spare1 0x1B -+#define CUN_Spare2 0x1C -+#define CUN_ThreadQueue 0x1D -+#define CUN_EventProc0 0x1e -+#define CUN_EventProc1 0x1f -+ -+#define CUN_Entries 0x20 -+ -+typedef struct E4_Registers -+{ -+ E4_CacheTags Tags; /* 4k bytes c000 -> cfff */ -+ E4_DataBusMap Regs; /* 4k bytes d000 -> dfff */ -+ E4_User_Regs uRegs; /* 8k bytes e000 -> ffff */ -+} E4_Registers; -+ -+#define I2cCntl_I2cPortWrite (0 << 0) -+#define I2cCntl_I2cPortRead (1 << 0) -+#define I2cCntl_I2cPortGenStopBit (1 << 1) -+#define I2cCntl_I2cPortGenRestartBit (1 << 2) -+#define I2cCntl_I2cPortAccFailed (1 << 3) -+#define I2cCntl_I2cStopped (1 << 4) -+#define I2cCntl_I2cWakeupFailed (1 << 5) -+#define I2cCntl_I2cFastMode (1 << 6) -+#define I2cCntl_I2cPortBusy (1 << 7) -+ -+#define I2cCntl_LedI2cRegBase_Mask 0x7f -+#define I2cCntl_I2cUpdatingLedReg (1 << 7) -+ -+#define I2cCntl_InvertLedValues (1 << 0) /* read/write */ -+#define I2cCntl_LedRegWriteFailed (1 << 1) /* read only */ -+#define I2cCntl_EEPromLoadFailed (1 << 2) /* read only */ -+#define I2cCntl_InhibitI2CRom (1 << 3) /* read only */ -+#define I2cCntl_BadRomCrc (1 << 4) /* read only */ -+#define I2cCntl_MapInI2cConfigData (1 << 5) /* read/write */ -+#define I2cCntl_SampleNewLedValues (1 << 6) /* read/write */ -+#define I2cCntl_ClearLinkError (1 << 7) /* write only */ -+ -+typedef struct E4_I2C -+{ -+ volatile E4_uint8 I2cWrData; -+ volatile E4_uint8 I2cRdData; -+ volatile E4_uint8 I2cPortControl; -+ volatile E4_uint8 I2cLedBase; -+ volatile E4_uint8 I2cStatus; -+ volatile E4_uint8 I2cLedsValue; -+ volatile E4_uint16 I2cPad; -+ -+ E4_uint8 pad[256 - sizeof(E4_uint64)]; -+ -+ E4_uint8 UnchangedElan4ConfigRegs[256]; -+ E4_uint8 I2cRomConfigShadowValues[256]; -+ E4_uint8 ChangedElan4ConfigRegs[256]; -+} E4_I2C; -+ -+typedef struct _E4_ContextControlBlock -+{ -+ E4_uint32 Filter; /* Use a Network context to index for this value */ -+ E4_uint32 VirtualProcessTable; /* Use a local context to index for this value */ -+} E4_ContextControlBlock; -+ -+/* -+ * Filter -+ * [13:0] Context -+ * [14] DiscardAll -+ * [15] AckAll -+ * [16] HighPri -+ * [17] CountStats -+ * [31:18] Unused -+ */ -+#define E4_FILTER_STATS (1 << 17) -+#define E4_FILTER_HIGH_PRI (1 << 16) -+#define E4_FILTER_ACKOK_ALL (1 << 15) -+#define E4_FILTER_DISCARD_ALL (1 << 14) -+#define E4_FILTER_CONTEXT_MASK (0x3FFF) -+ -+/* -+ * VirtualProcessTable -+ * [8:0] Unused -+ * [12:9] Size num vp entries = 512 << Size -+ * [30:13] Pointer -+ * [31] Valid -+ */ -+#define E4_VPT_MIN_ENTRIES 512 -+#define E4_VPT_VALID ((unsigned)1 << 31) -+#define E4_VPT_PTR_SHIFT 0 -+#define E4_VPT_SIZE_SHIFT 9 -+#define E4_VPT_SIZE_MASK 0xf -+#define E4_VPT_NUM_VP(vpt_val) (E4_VPT_MIN_ENTRIES << (((vpt_val) >> E4_VPT_SIZE_SHIFT) & E4_VPT_SIZE_MASK)) -+#define E4_VPT_VALUE(ptr,size) (((ptr) << E4_VPT_PTR_SHIFT) | ((size) << E4_VPT_SIZE_SHIFT)) -+ -+ -+/* Virtual Process Table */ -+typedef struct _E4_VirtualProcessEntry -+{ -+ E4_uint64 Values[2]; -+} E4_VirtualProcessEntry; -+ -+/* -+ * Entries have the following format - rtX is a packed route -+ * -+ * |rt11|rt10|rt9 |rt8 |rt7 |rt6 |rt5 |rt4 |rt3 |rt2 |rt2 |rt0 |PAAADD RRRRRR| -+ * |output context |rt23|rt22|rt21|rt20|rt19|rt18|rt17|rt16|rt15|rt14|rt13|rt12| -+ */ -+ -+#define ROUTE_CTXT_SHIFT 48 -+#define ROUTE_CTXT_MASK (~((1ull << ROUTE_CTXT_SHIFT)-1)) -+#define ROUTE_CTXT_VALUE(ctx) (((E4_uint64) ctx) << ROUTE_CTXT_SHIFT) -+ -+#define ROUTE_PACKED_OFFSET 16 -+#define ROUTE_NUM_PACKED 24 -+ -+/* defines for first flit of a route */ -+#define FIRST_TIMEOUT(Val) ((Val) << 14) /* [15:14] */ -+#define FIRST_SYSTEM_PACKET (1 << 13) /* [13] */ -+#define FIRST_FLOOD_PACKET (1 << 12) /* [12] */ -+#define FIRST_HIGH_PRI (1 << 11) /* [11] */ -+#define FIRST_AGE(Val) ((Val) << 7) /* [10:7] */ -+#define FIRST_OPTIONS_MASK (0xFF80) -+ -+/* [6:0] unpacked 1st route value */ -+#define FIRST_INVALID (0) -+#define FIRST_ROUTE(Val) (0x08 | (Val)) -+#define FIRST_ADAPTIVE (0x30) -+#define FIRST_BCAST_TREE (0x20) -+#define FIRST_MYLINK (0x10) -+#define FIRST_BCAST(Top, Bot) (0x40 | ((Top) << 3) | (Bot)) -+ -+/* defines for 3 bit packed entries for subsequent flits */ -+#define PACKED_INVALID (0) -+#define PACKED_ROUTE(Val) (8 | (Val)) -+#define PACKED_ADAPTIVE (3) -+#define PACKED_BCAST_TREE (2) -+#define PACKED_MYLINK (1) -+#define PACKED_BCAST0(Top,Bot) (4 | (Bot & 3)) -+#define PACKED_BCAST1(Top,Bot) ((Top << 1) | (Bot >> 2)) -+ -+#endif /* _ASM */ -+/* The MMU root context pointer has a mask to bounds check -+ * it - this is computed as follows. -+ */ -+#define E4_CONTEXT_MASK(num) (((num) >= 0x2000) ? 0x00 : \ -+ ((num) >= 0x1000) ? 0x80 : \ -+ ((num) >= 0x0800) ? 0xc0 : \ -+ ((num) >= 0x0400) ? 0xe0 : \ -+ ((num) >= 0x0200) ? 0xf0 : \ -+ ((num) >= 0x0100) ? 0xf8 : \ -+ ((num) >= 0x0080) ? 0xfc : \ -+ ((num) >= 0x0040) ? 0xfe : 0xff) -+/* -+ * This generates the size field for a virtual process table. -+ * Size defined as 2^n no of 8K pages. -+ * Single cycle route fetches are possible if the minimum vproc table size is 8k. -+ */ -+#define E4_GEN_VPT_SIZE(Size) (((Size) & E4_VPT_SIZE_MASK) << E4_VPT_SIZE_SHIFT) -+ -+#define COMMAND_RUN_QUEUE_BITS (13 + 2) /* 8K entries of 4 bytes. This is fixed in hardware. */ -+#define COMMAND_DESCS_SPACE_BITS (13 + 5) /* 8K entries of 32 bytes. This is fixed in hardware. */ -+#define COMMAND_INSERTER_CACHE_ENTRIES 16 -+ -+#define COM_TEST_PORT_ADDR_MASK 0xfULL -+#define COM_TEST_PORT_ADDR_SH 0 -+ -+/* -+ * The flush register is accessed through the CommandControl register. -+ * The address is naturally alligned. It also positions the command descriptors in memory. -+ * When no command queues need flushing it should be or with COM_FLUSH_INVALID. This sets -+ * it to the top command queue descriptor. This cannot be accessed from the PCI. -+ */ -+#define COM_ENABLE_DEQUEUE (1 << 4) -+#define COM_FLUSH_DESCRIPTOR_MASK 0x7fffffe0ULL -+#define COM_FLUSH_INVALID 0x0003ffe0ULL -+ -+ -+/* -+ * Elan4 BAR1 is split up as follows : -+ * -+ * RevA -+ * 0x3f00000 EBUS other -+ * 0x3e00000 EBUS ROM -+ * 0x3dfc000 registers -+ * 0x0000000 command ports -+ * -+ * RevB -+ * 0x3ffc000 registers -+ * 0x3ff8000 padding -+ * 0x3ff6000 i2c registers -+ * 0x0000000 command ports -+ */ -+#define ELAN4_BAR1_SIZE (1 << 26) /* 64M */ -+#define ELAN4_REG_SIZE (1 << 14) /* 16K */ -+ -+#define ELAN4_REVA_EBUS_SIZE (1 << 21) /* 2M */ -+#define ELAN4_REVA_EBUS_OFFSET (ELAN4_BAR1_SIZE - ELAN4_REVA_EBUS_SIZE) -+#define ELAN4_REVA_REG_OFFSET (ELAN4_REVA_EBUS_OFFSET - ELAN4_REG_SIZE) -+#define ELAN4_REVA_NUM_COMMAND_QUEUES (ELAN4_REVA_REG_OFFSET >> 13) -+ -+#define ELAN4_REVA_EBUS_ROM_SIZE (1 << 20) /* 1M */ -+#define ELAN4_REVA_EBUS_ROM_OFFSET 0 -+ -+#define ELAN4_REVB_I2C_PADDING (1 << 14) /* 16K */ -+#define ELAN4_REVB_I2C_SIZE (1 << 13) /* 8k */ -+#define ELAN4_REVB_REG_OFFSET (ELAN4_BAR1_SIZE - ELAN4_REG_SIZE) -+#define ELAN4_REVB_I2C_OFFSET (ELAN4_REVB_REG_OFFSET - ELAN4_REVB_I2C_PADDING - ELAN4_REVB_I2C_SIZE) -+#define ELAN4_REVB_NUM_COMMAND_QUEUES (ELAN4_REVB_I2C_OFFSET >> 13) -+ -+#endif /* notdef _ELAN4_REGISTERS_H */ -diff -urN clean/include/elan4/sdram.h linux-2.6.9/include/elan4/sdram.h ---- clean/include/elan4/sdram.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan4/sdram.h 2003-09-24 09:55:55.000000000 -0400 -@@ -0,0 +1,41 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __ELAN4_SDRAM_H -+#define __ELAN4_SDRAM_H -+ -+#ident "$Id: sdram.h,v 1.8 2003/09/24 13:55:55 david Exp $" -+/* $Source: /cvs/master/quadrics/elan4hdr/sdram.h,v $*/ -+ -+/* Include header file generated by sdram configuration program */ -+#include -+ -+/* SDRAM bank shift definitions */ -+#define SDRAM_0_CS_SHIFT 25 -+#define SDRAM_1_CS_SHIFT 27 -+#define SDRAM_2_CS_SHIFT 28 -+#define SDRAM_3_CS_SHIFT 29 -+ -+#define SDRAM_BANK_SHIFT(cfg) \ -+ (((cfg >> SDRAM_RamSize_SH) & 3) == 0 ? SDRAM_0_CS_SHIFT : \ -+ ((cfg >> SDRAM_RamSize_SH) & 3) == 1 ? SDRAM_1_CS_SHIFT : \ -+ ((cfg >> SDRAM_RamSize_SH) & 3) == 2 ? SDRAM_2_CS_SHIFT : SDRAM_3_CS_SHIFT) -+ -+#define SDRAM_BANK_SIZE(cfg) (1ULL << SDRAM_BANK_SHIFT(cfg)) -+#define SDRAM_BANK_OFFSET(cfg,bank) ((unsigned long long)(bank) << SDRAM_BANK_SHIFT(cfg)) -+#define SDRAM_NUM_BANKS(cfg) (4) -+#define SDRAM_MAX_BANKS 4 -+ -+/* When the elan access sdram it passes eaddr[12] as sdramaddr[12] when -+ * running with a 4k page size, however PCI accesses pass paddr[12], so -+ * we must ensure that sdram pages are allocated such that eaddr[12] is the -+ * same as paddr[12] - the easiest way is to allocate sdram in 8k chunks and -+ * ensure that maddr[12] == eaddr[12] == pgoff[0] */ -+#define SDRAM_MIN_PAGE_SIZE (8192) -+ -+#endif /* __ELAN4_SDRAM_H */ -diff -urN clean/include/elan4/stats.h linux-2.6.9/include/elan4/stats.h ---- clean/include/elan4/stats.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan4/stats.h 2005-04-19 12:14:52.000000000 -0400 -@@ -0,0 +1,83 @@ -+/* -+ * Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: stats.h,v 1.12 2005/04/19 16:14:52 addy Exp $" -+/* $Source: /cvs/master/quadrics/elan4mod/stats.h,v $*/ -+ -+#ifndef __ELAN4_STATS_H -+#define __ELAN4_STATS_H -+ -+#define ELAN4_DEV_STATS_BUCKETS 8 -+ -+ -+typedef struct elan4_dev_stats -+{ -+ unsigned long s_interrupts; -+ -+ unsigned long s_mainints[ELAN4_DEV_STATS_BUCKETS]; -+ unsigned long s_mainint_punts; -+ unsigned long s_mainint_rescheds; -+ -+ unsigned long s_haltints; -+ -+ unsigned long s_cproc_traps; -+ unsigned long s_dproc_traps; -+ unsigned long s_eproc_traps; -+ unsigned long s_iproc_traps; -+ unsigned long s_tproc_traps; -+ -+ unsigned long s_cproc_trap_types[0x10]; -+ unsigned long s_dproc_trap_types[7]; -+ unsigned long s_eproc_trap_types[4]; -+ unsigned long s_iproc_trap_types[0xa]; -+ unsigned long s_tproc_trap_types[7]; -+ -+ unsigned long s_correctable_errors; -+ unsigned long s_multiple_errors; -+ -+ unsigned long s_link_errors; -+ unsigned long s_lock_errors; -+ unsigned long s_deskew_errors; -+ unsigned long s_phase_errors; -+ unsigned long s_data_errors; -+ unsigned long s_fifo_overflow0; -+ unsigned long s_fifo_overflow1; -+ unsigned long s_mod45changed; -+ unsigned long s_pack_not_seen; -+ unsigned long s_linkport_keyfail; -+ -+ unsigned long s_eop_reset; -+ unsigned long s_bad_length; -+ unsigned long s_crc_bad; -+ unsigned long s_crc_error; -+ -+ unsigned long s_cproc_timeout; -+ unsigned long s_dproc_timeout; -+ -+ unsigned long s_sdram_bytes_free; -+} ELAN4_DEV_STATS; -+ -+#define MainIntBuckets ((int[ELAN4_DEV_STATS_BUCKETS-1]) {1, 2, 3, 4, 8, 16, 32}) -+ -+#define BumpDevStat(dev,stat) ((dev)->dev_stats.stat++) -+#define BucketDevStat(dev,stat,n,bucket) ((n) <= (bucket)[0] ? (dev)->dev_stats.stat[0]++ : \ -+ (n) <= (bucket)[1] ? (dev)->dev_stats.stat[1]++ : \ -+ (n) <= (bucket)[2] ? (dev)->dev_stats.stat[2]++ : \ -+ (n) <= (bucket)[3] ? (dev)->dev_stats.stat[3]++ : \ -+ (n) <= (bucket)[4] ? (dev)->dev_stats.stat[4]++ : \ -+ (n) <= (bucket)[5] ? (dev)->dev_stats.stat[5]++ : \ -+ (n) <= (bucket)[6] ? (dev)->dev_stats.stat[6]++ : \ -+ (dev)->dev_stats.stat[7]++) -+ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -+#endif /*__ELAN4_STATS_H */ -diff -urN clean/include/elan4/tprintf.h linux-2.6.9/include/elan4/tprintf.h ---- clean/include/elan4/tprintf.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan4/tprintf.h 2003-09-04 08:39:17.000000000 -0400 -@@ -0,0 +1,24 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __ELAN4_TPRINTF_H -+#define __ELAN4_TPRINTF_H -+ -+#ident "$Id: tprintf.h,v 1.6 2003/09/04 12:39:17 david Exp $" -+/* $Source: /cvs/master/quadrics/elan4hdr/tprintf.h,v $*/ -+ -+ -+#ifdef _ASM -+#define TPRINTF0(string) add %r0, __LINE__, %r0 -+#define TPRINTF1(string,reg) add reg, __LINE__, %r0 -+#else -+#define TPRINTF0(string) asm volatile ("add %%r0, %0, %%r0" : : "i" (__LINE__)) -+#define TPRINTF1(string, value) asm volatile ("add %0, %1, %%r0" : : "r" (value), "i" (__LINE__)) -+#endif /* _ASM */ -+ -+#endif /* __ELAN4_TPRINTF_H */ -diff -urN clean/include/elan4/trap.h linux-2.6.9/include/elan4/trap.h ---- clean/include/elan4/trap.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan4/trap.h 2003-10-07 08:11:10.000000000 -0400 -@@ -0,0 +1,95 @@ -+/* -+ * Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: trap.h,v 1.10 2003/10/07 12:11:10 david Exp $" -+/* $Source: /cvs/master/quadrics/elan4mod/trap.h,v $*/ -+ -+#ifndef __ELAN4_TRAP_H -+#define __ELAN4_TRAP_H -+ -+/* -+ * If the EProc Faults whilst performing an action (e.g. Read/Write on the data src or dest Addr) -+ * the Eproc increments the Addr(s) by a block size (64 bytes): -+ * 1: Fault on Read: -+ * Src EventAddr = Read Addr + block -+ * 2: Fault on Write: -+ * Src EventAddr = Read Addr + block -+ * Dst EventAddr = Read Addr + block -+ * Size = Size - block ndwords -+ * We must rewind the addr correctly to completely the transfer successfully -+ */ -+#define EVENT_COPY_NDWORDS 0x8 -+#define EVENT_COPY_BLOCK_SIZE 0x40 -+ -+typedef struct elan4_eproc_trap -+{ -+ E4_uint64 tr_status; -+ E4_FaultSave tr_faultarea; -+ E4_Event tr_event; -+ E4_Addr tr_eventaddr; -+} ELAN4_EPROC_TRAP; -+ -+typedef struct elan4_cproc_trap -+{ -+ E4_uint64 tr_status; /* cproc status register */ -+ E4_uint64 tr_command; /* cproc command */ -+ E4_CommandQueueDesc tr_qdesc; /* copy of command queue descriptor */ -+ E4_FaultSave tr_faultarea; /* fault area for mmu traps */ -+ ELAN4_EPROC_TRAP tr_eventtrap; /* associated event trap (waitevent) */ -+} ELAN4_CPROC_TRAP; -+ -+typedef struct elan4_dproc_trap -+{ -+ E4_DMA tr_desc; -+ E4_FaultSave tr_packAssemFault; -+ E4_FaultSave tr_prefetchFault; -+ E4_uint64 tr_status; -+} ELAN4_DPROC_TRAP; -+ -+typedef struct elan4_tproc_trap -+{ -+ E4_uint64 tr_regs[64]; -+ E4_FaultSave tr_dataFault; -+ E4_FaultSave tr_instFault; -+ E4_uint64 tr_status; -+ E4_uint64 tr_state; -+ E4_Addr tr_pc; -+ E4_Addr tr_npc; -+ E4_uint64 tr_dirty; -+ E4_uint64 tr_bad; -+} ELAN4_TPROC_TRAP; -+ -+typedef struct elan4_iproc_trap -+{ -+ E4_uint32 tr_numTransactions; -+ E4_uint32 tr_flags; -+ E4_uint32 tr_trappedTrans; -+ E4_uint32 tr_waitForEopTrans; -+ E4_uint32 tr_identifyTrans; -+ E4_uint32 tr_pad; -+ -+ E4_FaultSave tr_faultarea; -+ E4_IprocTrapHeader tr_transactions[MAX_TRAPPED_TRANS]; -+ E4_IprocTrapData tr_dataBuffers[MAX_TRAPPED_TRANS]; -+} ELAN4_IPROC_TRAP; -+ -+#define TR_FLAG_ACK_SENT (1 << 0) -+#define TR_FLAG_EOP_ERROR (1 << 1) -+#define TR_FLAG_BAD_TRANS (1 << 2) -+#define TR_FLAG_DMA_PACKET (1 << 3) -+#define TR_FLAG_EOP_BAD (1 << 4) -+#define TR_FLAG_TOOMANY_TRANS (1 << 5) -+ -+#define TR_TRANS_INVALID (0xffffffff) -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -+#endif /* __ELAN4_TRAP_H */ -diff -urN clean/include/elan4/trtype.h linux-2.6.9/include/elan4/trtype.h ---- clean/include/elan4/trtype.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan4/trtype.h 2004-02-06 05:38:21.000000000 -0500 -@@ -0,0 +1,112 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef _ELAN4_TRTYPE_H -+#define _ELAN4_TRTYPE_H -+ -+#ident "$Id: trtype.h,v 1.20 2004/02/06 10:38:21 mike Exp $" -+/* $Source: /cvs/master/quadrics/elan4hdr/trtype.h,v $*/ -+ -+/*<15:11> Size field is used to give the number of additional 64 bit data values. -+ A value from 0 to 16 inclusive is valid. */ -+ -+#include -+ -+#define TR_SIZE_SHIFT (11) -+#define TR_SIZE_MASK (0x1f << TR_SIZE_SHIFT) -+#define SET_TR_SIZE(Size) (((Size) << TR_SIZE_SHIFT) & TR_SIZE_MASK) -+ -+/* <10:9> Last Transaction and AckNow bits, marks the last transaction and -+ enables a PACK_OK to be sent. */ -+#define TR_LAST_AND_SEND_ACK (3 << 9) -+ -+ -+/* <8> Only valid on the last transaction. Delays execution until an EOP_GOOD is received. -+ * Any other EOP type will abort execution of this transaction. */ -+#define TR_WAIT_FOR_EOP (1 << 8) -+ -+/* -+ * Data type. This is used by transactions of variable data type. It controls any endian -+ * converion required if the destiantion host processor has a big endian memory format. -+ */ -+/* WriteBlock <8:7> Data type -+ <6:0> Part write size */ -+#define TR_DATATYPE_SHIFT (6) -+#define TR_DATATYPE_MASK ((1 << 2) - 1) -+ -+#define TR_DATATYPE_BYTE E4_DATATYPE_BYTE -+#define TR_DATATYPE_SHORT E4_DATATYPE_SHORT -+#define TR_DATATYPE_WORD E4_DATATYPE_WORD -+#define TR_DATATYPE_DWORD E4_DATATYPE_DWORD -+ -+/* <5:0> Transaction Type -+ * For Writeblock <5:3> 000 => Write, 0001 => Read -+ * <2:0> End Byte Addr */ -+#define TR_OPCODE_MASK 0x3F -+#define TR_BLOCK_OPCODE_MASK 0x38 -+ -+#define TR_WRITEBLOCK 0x0 -+#define TR_ENDBYTE_MASK 0x7 -+#define TR_WRITE(Size, EndByte, DataType) \ -+ (0x0 | SET_TR_SIZE(Size) | ((EndByte) & TR_ENDBYTE_MASK) | \ -+ (((DataType) & TR_DATATYPE_MASK) << TR_DATATYPE_SHIFT)) -+ -+#define TR_NOP_TRANS (0x10 | SET_TR_SIZE(0)) -+#define TR_SETEVENT 0x10 -+#define TR_SETEVENT_NOIDENT (TR_SETEVENT | SET_TR_SIZE(0) | TR_LAST_AND_SEND_ACK) -+#define TR_SETEVENT_IDENTIFY (TR_SETEVENT | SET_TR_SIZE(1) | TR_LAST_AND_SEND_ACK) -+#define TR_REMOTEDMA (0x11 | SET_TR_SIZE(7) | TR_LAST_AND_SEND_ACK) -+#define TR_SENDDISCARD (0x12 | SET_TR_SIZE(0)) -+ -+/* -+ * Conditional transactions that might return PAckTestFail. -+ * All will allow further exection of the packet if ([Address] operator DataValue) is true. -+ * e.g. for TR_GTE further execution if ([Address] >= DataValue) is true. -+ * These should be used where a definite TRUE/FALSE answer is required. -+ */ -+#define TR_GTE (0x14 | SET_TR_SIZE(1)) -+#define TR_LT (0x15 | SET_TR_SIZE(1)) -+#define TR_EQ (0x16 | SET_TR_SIZE(1)) -+#define TR_NEQ (0x17 | SET_TR_SIZE(1)) -+ -+/* -+ * Conditional transactions that might return PAckDiscard. -+ * All will allow further exection of the packet if ([Address] operator DataValue) is true. -+ * e.g. for TR_GTE further execution if ([Address] >= DataValue) is true. -+ * These should be used where eventually a TRUE answer is expected but the node might not be ready yet. -+ * These can be mixed with the normal conditionals to allow a single packet to test for readyness and -+ * a TRUE/FALSE answer. -+ */ -+#define TR_GTE_DISCARD (0x34 | SET_TR_SIZE(1)) -+#define TR_LT_DISCARD (0x35 | SET_TR_SIZE(1)) -+#define TR_EQ_DISCARD (0x36 | SET_TR_SIZE(1)) -+#define TR_NEQ_DISCARD (0x37 | SET_TR_SIZE(1)) -+ -+#define TR_TRACEROUTE_TRANS 0x18 -+#define TR_TRACEROUTE(Size) (TR_TRACEROUTE_TRANS | (TR_DATATYPE_WORD << TR_DATATYPE_SHIFT) |SET_TR_SIZE(Size)) -+#define TR_IDENTIFY (0x19 | SET_TR_SIZE(0)) -+ -+#define TR_ADDWORD (0x1c | SET_TR_SIZE(2) | TR_LAST_AND_SEND_ACK) -+#define TR_INPUT_Q_COMMIT (0x1d | SET_TR_SIZE(1) | TR_LAST_AND_SEND_ACK) -+#define TR_TESTANDWRITE (0x1e | SET_TR_SIZE(3) | TR_LAST_AND_SEND_ACK) -+#define TR_INPUT_Q_GETINDEX (0x1f | SET_TR_SIZE(0)) -+ -+ -+ -+/* TraceRoute formate */ -+#define TR_TRACEROUTE0_CHANID(val) ((val) & 1) /* 0 Chan Id */ -+#define TR_TRACEROUTE0_LINKID(val) (((val) >> 1) & 7) /* 1:3 Link Id */ -+#define TR_TRACEROUTE0_REVID(val) (((val) >> 4) & 7) /* 4:6 Revision Id */ -+#define TR_TRACEROUTE0_BCAST_PIN(val) (((val) >> 7) & 1) /* 7 Bcast Top Pin */ -+#define TR_TRACEROUTE0_LNR(val) (((val) >> 8) & 0xFF) /* 8:15 Global Link Not Ready */ -+ -+#define TR_TRACEROUTE1_ROUTES_SELECTED(val) ((val & 0xFF)) /* 0:7 Routes Selected */ -+#define TR_TRACEROUTE1_BCAST_TOP(val) (((val) >> 8) & 7) /* 8:10 Broadcast Top */ -+#define TR_TRACEROUTE1_BCAST_BOTTOM(val) (((val) >> 12) & 7) /* 12:14 Broadcast Bottom */ -+ -+#endif /* _ELAN4_TRANSACTIONTYPE_H */ -diff -urN clean/include/elan4/types.h linux-2.6.9/include/elan4/types.h ---- clean/include/elan4/types.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan4/types.h 2003-09-04 08:39:17.000000000 -0400 -@@ -0,0 +1,69 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __ELAN4_TYPES_H -+#define __ELAN4_TYPES_H -+ -+#ident "@(#)$Id: types.h,v 1.9 2003/09/04 12:39:17 david Exp $" -+/* $Source: /cvs/master/quadrics/elan4hdr/types.h,v $*/ -+ -+#include -+/* -+ * "flip" values for correctly indexing into -+ * block data which was copied from the Elan -+ * using 64 bit accesses. -+ */ -+#if defined(__LITTLE_ENDIAN__) -+# define ByteEndianFlip 0 -+# define ShortEndianFlip 0 -+# define WordEndianFlip 0 -+#else -+# define ByteEndianFlip 7 -+# define ShortEndianFlip 3 -+# define WordEndianFlip 1 -+#endif -+ -+ -+#ifndef _ASM -+ -+typedef signed int E4_int; -+typedef unsigned int E4_uint; -+ -+typedef signed char E4_int8; -+typedef unsigned char E4_uint8; -+ -+typedef signed short E4_int16; -+typedef unsigned short E4_uint16; -+ -+typedef signed int E4_int32; -+typedef unsigned int E4_uint32; -+ -+#ifdef _LP64 -+typedef signed long E4_int64; -+typedef unsigned long E4_uint64; -+#else -+typedef signed long long E4_int64; -+typedef unsigned long long E4_uint64; -+#endif -+ -+/* 64-bit Elan4 */ -+typedef E4_uint64 E4_Addr; -+typedef E4_uint32 E4_LocPhysAddr; /* Really 31 bits */ -+ -+#define OneK (1024) -+#define EightK (8*OneK) -+ -+#define E4_DATATYPE_BYTE 0 -+#define E4_DATATYPE_SHORT 1 -+#define E4_DATATYPE_WORD 2 -+#define E4_DATATYPE_DWORD 3 -+ -+#endif /* _ASM */ -+ -+#endif /* __ELAN4_TYPES_H */ -+ -diff -urN clean/include/elan4/user.h linux-2.6.9/include/elan4/user.h ---- clean/include/elan4/user.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan4/user.h 2005-04-21 07:12:06.000000000 -0400 -@@ -0,0 +1,347 @@ -+/* -+ * Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: user.h,v 1.45 2005/04/21 11:12:06 mike Exp $" -+/* $Source: /cvs/master/quadrics/elan4mod/user.h,v $*/ -+ -+#ifndef __ELAN4_USER_H -+#define __ELAN4_USER_H -+ -+#include -+#include -+#include -+ -+typedef struct trap_queue -+{ -+ unsigned q_back; /* Next free space */ -+ unsigned q_front; /* First object to remove */ -+ unsigned q_size; /* Size of queue */ -+ unsigned q_count; /* Current number of entries */ -+ unsigned q_slop; /* FULL <=> (count+slop) == size */ -+} RING_QUEUE; -+ -+#define RING_QUEUE_INIT(q,num,slop) ((q).q_size = (num), (q).q_slop = (slop), (q).q_front = (q).q_back = 0, (q).q_count = 0) -+#define RING_QUEUE_FULL(q) ((q).q_count >= ((q).q_size - (q).q_slop)) -+#define RING_QUEUE_REALLY_FULL(q) ((q).q_count == (q).q_size) -+#define RING_QUEUE_EMPTY(q) ((q).q_count == 0) -+#define RING_QUEUE_NEXT(q,indx) ((indx) = (((indx)+1) % (q).q_size)) -+#define RING_QUEUE_PREV(q,indx) ((indx) = (((indx)+(q).q_size-1) % (q).q_size)) -+#define RING_QUEUE_ADD(q) (RING_QUEUE_NEXT(q ,(q).q_back), (++(q).q_count) >= ((q).q_size - (q).q_slop)) -+#define RING_QUEUE_REMOVE(q) (RING_QUEUE_NEXT(q, (q).q_front), (--(q).q_count) == 0) -+#define RING_QUEUE_ADD_FRONT(q) (RING_QUEUE_PREV(q, (q).q_front), (++(q).q_count) >= ((q).q_size - (q).q_slop)) -+#define RING_QUEUE_ENTRY(qArea,indx) (&(qArea)[(indx)]) -+#define RING_QUEUE_FRONT(q,qArea) RING_QUEUE_ENTRY(qArea, (q).q_front) -+#define RING_QUEUE_BACK(q,qArea) RING_QUEUE_ENTRY(qArea, (q).q_back) -+#define RING_QUEUE_ITERATE(q,idx) for (idx = (q).q_front; idx != (q).q_back; idx = (((idx) + 1) % (q).q_size)) -+ -+typedef struct user_rgn -+{ -+ struct user_rgn *rgn_mnext; /* Doubly linked list of regions */ -+ struct user_rgn *rgn_mprev; /* sorted on main address */ -+ virtaddr_t rgn_mbase; /* main address of base of region */ -+ -+ struct user_rgn *rgn_enext; /* Doubly linked list of regions */ -+ struct user_rgn *rgn_eprev; /* sorted on elan address */ -+ E4_Addr rgn_ebase; /* elan address of base of region */ -+ -+ unsigned long rgn_len; /* length of region */ -+ unsigned rgn_perm; /* elan access permission */ -+} USER_RGN; -+ -+typedef struct user_vpseg -+{ -+ struct list_head vps_link; -+ -+ unsigned short vps_process; /* virtual process number */ -+ unsigned short vps_entries; /* and # virtual processes */ -+ -+ unsigned vps_type; -+ union -+ { -+ struct { -+ ELAN_CAPABILITY *cap; -+ E4_VirtualProcessEntry *routes; -+ } p2p; -+#define vps_p2p_cap vps_u.p2p.cap -+#define vps_p2p_routes vps_u.p2p.routes -+ -+ struct { -+ unsigned short lowvp; -+ unsigned short highvp; -+ } bcast; -+#define vps_bcast_lowvp vps_u.bcast.lowvp -+#define vps_bcast_highvp vps_u.bcast.highvp -+ } vps_u; -+} USER_VPSEG; -+ -+/* values for vps_type */ -+#define USER_VPSEG_P2P 0 -+#define USER_VPSEG_BCAST 1 -+ -+typedef struct user_cq -+{ -+ struct list_head ucq_link; -+ -+ ELAN4_CQ *ucq_cq; /* the real command queue */ -+ -+ unsigned char ucq_state; /* command queue state */ -+ unsigned char ucq_errored; /* command queue has errored */ -+ unsigned char ucq_flags; /* flags */ -+ ELAN4_CPROC_TRAP ucq_trap; /* trap state */ -+ -+ atomic_t ucq_ref; /* # references to this cq (mmaps) */ -+} USER_CQ; -+ -+/* values for ucq_state */ -+#define UCQ_RUNNING 0 /* command queue is running */ -+#define UCQ_TRAPPED 1 /* command queue has trapped */ -+#define UCQ_NEEDS_RESTART 2 /* command queue has trapped, and needs restarting */ -+#define UCQ_STOPPED 3 /* command queue has trapped, and delivered to user */ -+ -+/* values for ucq_flags */ -+#define UCQ_SYSTEM (1 << 0) -+#define UCQ_REORDER (1 << 1) -+ -+extern int num_fault_save; -+extern int min_fault_pages; -+extern int max_fault_pages; -+ -+typedef struct fault_save -+{ -+ struct fault_save *next; -+ E4_Addr addr; -+ E4_uint32 count; -+} FAULT_SAVE; -+ -+typedef struct user_iproc_trap -+{ -+ unsigned char ut_state; -+ ELAN4_IPROC_TRAP ut_trap; -+} USER_IPROC_TRAP; -+ -+/* values for ut_state */ -+#define UTS_IPROC_RUNNING 0 -+#define UTS_IPROC_TRAPPED 1 -+#define UTS_IPROC_RESOLVING 2 -+#define UTS_IPROC_EXECUTE_PACKET 3 -+#define UTS_IPROC_EXECUTING 4 -+#define UTS_IPROC_NETWORK_ERROR 5 -+#define UTS_IPROC_STOPPED 6 -+ -+typedef struct user_ctxt_entry -+{ -+ struct list_head cent_link; /* entry chained on context */ -+ ELAN_CAPABILITY *cent_cap; /* capability we attached with */ -+} USER_CTXT_ENTRY; -+ -+typedef struct user_ctxt -+{ -+ ELAN4_CTXT uctx_ctxt; /* is also an elan context */ -+ -+ spinlock_t uctx_spinlock; /* spinlock for items used with interrupt handler */ -+ kcondvar_t uctx_wait; /* place to sleep (traphandler/swapout/swapin/neterr fixup) */ -+ -+ unsigned uctx_status; /* status (uctx_spinlock) */ -+ -+ pid_t uctx_trap_pid; /* pid to deliver signals to on trap */ -+ int uctx_trap_signo; /* signal number to deliver */ -+ unsigned uctx_trap_state; /* state of trap handling code */ -+ unsigned uctx_trap_count; /* count of "thread" in user_trap_handler() */ -+ -+ unsigned uctx_int_count; /* # interrupts since last zeroed */ -+ unsigned long uctx_int_start; /* tick when int_count last zeroed */ -+ unsigned long uctx_int_delay; /* # ticks to delay next wakeup */ -+ struct timer_list uctx_int_timer; /* and timer to use to delay signal */ -+ struct timer_list uctx_shuffle_timer; /* and timer to use to delay shuffle signal */ -+ -+ struct timer_list uctx_neterr_timer; /* network error timer */ -+ -+ struct list_head uctx_vpseg_list; /* list of vp segments we've got */ -+ kmutex_t uctx_vpseg_lock; /* and lock to protect it. */ -+ ELAN4_ROUTE_TABLE *uctx_routetable; /* our virtual process table */ -+ ELAN_POSITION uctx_position; /* position in network */ -+ -+ struct list_head uctx_cent_list; /* list of attached network contexts */ -+ -+ USER_CQ *uctx_ddcq; /* command queue for re-issueing traps */ -+ E4_uint64 uctx_ddcq_insertcnt; /* # dwords inserted into command queue */ -+ E4_uint64 uctx_ddcq_completed; /* last "completed" write was here */ -+ int uctx_ddcq_intr; /* count of outstanding ddcq interrupts */ -+ -+ ELAN4_HALTOP uctx_haltop; /* halt operation for flushing */ -+ ELAN4_DMA_FLUSHOP uctx_dma_flushop; /* flush operation for flushing dma runqueue */ -+ -+ INTCOOKIE_TABLE *uctx_intcookie_table; /* table of interrupt cookies (shared with other uctxs for this task) */ -+ -+ kmutex_t uctx_cqlock; /* lock for create/destory cqs */ -+ struct list_head uctx_cqlist; /* list of command queues (uctx_cqlock,uctx_spinlock) */ -+ -+ ELAN4_DPROC_TRAP *uctx_dprocTraps; /* queue of dproc traps to resolve/reissue */ -+ RING_QUEUE uctx_dprocTrapQ; -+ -+ ELAN4_TPROC_TRAP *uctx_tprocTraps; /* queue of tproc traps to resolve/reissue */ -+ RING_QUEUE uctx_tprocTrapQ; -+ -+ ELAN4_EPROC_TRAP *uctx_eprocTraps; /* queue of eproc traps to resolve */ -+ RING_QUEUE uctx_eprocTrapQ; -+ -+ USER_IPROC_TRAP uctx_iprocTrap[2]; /* input trap state, 1 per virtual channel */ -+ -+ E4_DMA *uctx_dmas; /* queue of dmas to restart */ -+ RING_QUEUE uctx_dmaQ; -+ -+ E4_ThreadRegs *uctx_threads; /* queue of threads to restart */ -+ RING_QUEUE uctx_threadQ; -+ -+ ELAN4_NETERR_MSG *uctx_msgs; /* queue of neterr messages */ -+ RING_QUEUE uctx_msgQ; -+ kmutex_t uctx_rgnmutex; /* lock for create/destroy regions */ -+ spinlock_t uctx_rgnlock; /* spinlock to protect linked lists */ -+ USER_RGN *uctx_mrgns; /* Doubly linked list of memory regions (uctx_rgnlock) */ -+ USER_RGN *uctx_mtail; /* Last memory region on list (uctx_rgnlock) */ -+ USER_RGN *uctx_mrgnlast; /* Last region 'hit' (uctx_rgnlock) */ -+ -+ USER_RGN *uctx_ergns; /* Doubly linked list of memory regions (uctx_rgnlock) */ -+ USER_RGN *uctx_etail; /* Last memory region on list (uctx_rgnlock) */ -+ USER_RGN *uctx_ergnlast; /* Last region 'hit' (uctx_rgnlock) */ -+ -+ ELAN4_USER_PAGE *uctx_upage; /* kernel page shared with user */ -+ sdramaddr_t uctx_trampoline; /* sdram page for tproc trampoline */ -+ -+ E4_Addr uctx_upage_addr; /* elan addr page mapped into */ -+ E4_Addr uctx_trestart_addr; /* address of thread restart code */ -+ FAULT_SAVE *uctx_faults; -+ FAULT_SAVE *uctx_fault_list; -+ int uctx_num_fault_save; -+ spinlock_t uctx_fault_lock; -+} USER_CTXT; -+ -+/* bit values for uctx_status */ -+#define UCTX_EXITING (1 << 0) /* context is exiting. */ -+#define UCTX_USER_FILTERING (1 << 1) /* user requested context filter */ -+#define UCTX_USER_STOPPED (1 << 2) /* user requested stop */ -+ -+#define UCTX_SWAPPING (1 << 3) /* context is swapping out */ -+#define UCTX_SWAPPED (1 << 4) /* context is swapped out */ -+ -+#define UCTX_STOPPING (1 << 5) /* stopping elan from running this context */ -+#define UCTX_STOPPED (1 << 6) /* elan no longer running this context */ -+ -+#define UCTX_EPROC_QUEUE_FULL (1 << 7) /* reasons for stopping running */ -+#define UCTX_DPROC_QUEUE_FULL (1 << 8) -+#define UCTX_TPROC_QUEUE_FULL (1 << 9) -+#define UCTX_IPROC_CH0_TRAPPED (1 << 10) -+#define UCTX_IPROC_CH1_TRAPPED (1 << 11) -+ -+#define UCTX_NETERR_TIMER (1 << 12) -+#define UCTX_NETERR_FIXUP (1 << 13) -+ -+#define UCTX_EPROC_QUEUE_OVERFLOW (1 << 14) -+#define UCTX_DPROC_QUEUE_OVERFLOW (1 << 15) -+#define UCTX_TPROC_QUEUE_OVERFLOW (1 << 16) -+ -+#define UCTX_EPROC_QUEUE_ERROR (1 << 17) -+#define UCTX_DPROC_QUEUE_ERROR (1 << 18) -+#define UCTX_TPROC_QUEUE_ERROR (1 << 19) -+ -+#define UCTX_STOPPED_REASONS (UCTX_EPROC_QUEUE_FULL | UCTX_DPROC_QUEUE_FULL | UCTX_TPROC_QUEUE_FULL) -+#define UCTX_SWAPPED_REASONS (UCTX_EXITING | UCTX_USER_STOPPED | UCTX_NETERR_FIXUP) -+#define UCTX_NACKING_REASONS (UCTX_USER_FILTERING | UCTX_IPROC_CH0_TRAPPED | UCTX_IPROC_CH1_TRAPPED) -+ -+#define UCTX_OVERFLOW_REASONS (UCTX_EPROC_QUEUE_OVERFLOW | UCTX_DPROC_QUEUE_OVERFLOW | UCTX_TPROC_QUEUE_OVERFLOW) -+#define UCTX_ERROR_REASONS (UCTX_EPROC_QUEUE_ERROR | UCTX_DPROC_QUEUE_ERROR | UCTX_TPROC_QUEUE_ERROR) -+ -+#define UCTX_RUNNABLE(uctx) (((uctx)->uctx_status & (UCTX_SWAPPED_REASONS | UCTX_STOPPED_REASONS)) == 0) -+#define UCTX_NACKING(uctx) (((uctx)->uctx_status & (UCTX_SWAPPED_REASONS | UCTX_STOPPED_REASONS | UCTX_NACKING_REASONS)) != 0) -+ -+/* values for uctx_trap_signalled */ -+#define UCTX_TRAP_IDLE 0 -+#define UCTX_TRAP_SLEEPING 1 -+#define UCTX_TRAP_SIGNALLED 2 -+#define UCTX_TRAP_ACTIVE 3 -+ -+extern int user_p2p_route_options; -+extern int user_bcast_route_options; -+extern int user_dproc_retry_count; -+extern int user_cproc_retry_count; -+extern int user_ioproc_enabled; -+extern int user_pagefault_enabled; -+ -+extern USER_CTXT *user_alloc (ELAN4_DEV *dev); -+extern void user_free (USER_CTXT *uctx); -+extern void user_swapout (USER_CTXT *uctx, unsigned reason); -+extern void user_swapin (USER_CTXT *uctx, unsigned reason); -+extern int user_attach (USER_CTXT *uctx, ELAN_CAPABILITY *cap); -+extern void user_detach (USER_CTXT *uctx, ELAN_CAPABILITY *cap); -+extern void user_block_inputter (USER_CTXT *uctx, unsigned blocked); -+extern int user_alloc_trap_queues (USER_CTXT *uctx, unsigned ndproc_traps, unsigned neproc_traps, -+ unsigned ntproc_traps, unsigned nthreads, unsigned ndmas); -+ -+extern int user_add_p2pvp (USER_CTXT *uctx, unsigned process, ELAN_CAPABILITY *cap); -+extern int user_add_bcastvp (USER_CTXT *uctx, unsigned process, unsigned lowvp, unsigned highvp); -+extern int user_removevp (USER_CTXT *uctx, unsigned process); -+ -+extern int user_set_route (USER_CTXT *uctx, unsigned process, E4_VirtualProcessEntry *route); -+extern int user_reset_route (USER_CTXT *uctx, unsigned process); -+extern int user_get_route (USER_CTXT *uctx, unsigned process, E4_VirtualProcessEntry *route); -+extern int user_check_route (USER_CTXT *uctx, unsigned process, E4_VirtualProcessEntry *route, unsigned *error); -+extern int user_send_neterr_msg (USER_CTXT *uctx, unsigned int vp, unsigned int nctx, unsigned int retries, ELAN4_NETERR_MSG *msg); -+extern int user_neterr_sten (USER_CTXT *uctx, unsigned int vp, E4_uint64 cookie, int waitforeop); -+extern int user_neterr_dma (USER_CTXT *uctx, unsigned int vp, E4_uint64 cookie, int waitforeop); -+ -+extern int user_resume_eproc_trap (USER_CTXT *uctx, E4_Addr addr); -+extern int user_resume_cproc_trap (USER_CTXT *uctx, unsigned indx); -+extern int user_resume_dproc_trap (USER_CTXT *uctx, E4_DMA *dma); -+extern int user_resume_tproc_trap (USER_CTXT *uctx, E4_ThreadRegs *regs); -+extern int user_resume_iproc_trap (USER_CTXT *uctx, unsigned channel, unsigned trans, -+ E4_IprocTrapHeader *hdrp, E4_IprocTrapData *datap); -+ -+extern int user_trap_handler (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp, int nticks); -+extern USER_CQ *user_findcq (USER_CTXT *uctx, unsigned num); -+extern USER_CQ *user_alloccq (USER_CTXT *uctx, unsigned size, unsigned perm, unsigned flags); -+extern void user_freecq (USER_CTXT *uctx, USER_CQ *cq); -+extern void user_dropcq (USER_CTXT *uctx, USER_CQ *cq); -+ -+/* user_osdep.c */ -+extern int user_load_range (USER_CTXT *uctx, E4_Addr addr, unsigned long nbytes, E4_uint32 fsr); -+extern void user_update_main (USER_CTXT *uctx, struct mm_struct *mm, struct vm_area_struct *vma, unsigned long start, unsigned long len); -+extern void user_unload_main (USER_CTXT *uctx, unsigned long start, unsigned long len); -+ -+ -+/* regions.c */ -+extern USER_RGN *user_findrgn_elan (USER_CTXT *uctx, E4_Addr addr, int tail); -+extern USER_RGN *user_findrgn_main (USER_CTXT *uctx, virtaddr_t addr, int tail); -+extern USER_RGN *user_rgnat_elan (USER_CTXT *uctx, E4_Addr addr); -+extern USER_RGN *user_rgnat_main (USER_CTXT *uctx, virtaddr_t addr); -+extern int user_setperm (USER_CTXT *uctx, virtaddr_t maddr, E4_Addr eaddr, unsigned long len, unsigned perm); -+extern void user_clrperm (USER_CTXT *uctx, E4_Addr addr, unsigned long len); -+extern int user_checkperm (USER_CTXT *uctx, E4_Addr raddr, unsigned long rsize, unsigned access); -+extern virtaddr_t user_elan2main (USER_CTXT *uctx, E4_Addr addr); -+extern E4_Addr user_main2elan (USER_CTXT *uctx, virtaddr_t addr); -+extern void user_preload_main (USER_CTXT *uctx, virtaddr_t addr, unsigned long len); -+extern void user_freergns (USER_CTXT *uctx); -+ -+/* user_ddcq.c */ -+extern int user_ddcq_check (USER_CTXT *uctx, unsigned num); -+extern int user_ddcq_flush (USER_CTXT *uctx); -+extern void user_ddcq_intr (USER_CTXT *uctx); -+extern void user_ddcq_write_dword (USER_CTXT *uctx, E4_Addr addr, E4_uint64 value); -+extern void user_ddcq_interrupt (USER_CTXT *uctx, E4_uint64 cookie); -+extern void user_ddcq_run_dma (USER_CTXT *uctx, E4_DMA *dma); -+extern void user_ddcq_run_thread (USER_CTXT *uctx, E4_ThreadRegs *regs); -+extern void user_ddcq_setevent (USER_CTXT *uctx, E4_Addr addr); -+extern void user_ddcq_seteventn (USER_CTXT *uctx, E4_Addr addr, E4_uint32 count); -+extern void user_ddcq_waitevent (USER_CTXT *uctx, E4_Addr addr, E4_uint64 CountAndType, E4_uint64 Param0, E4_uint64 Param1); -+ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -+#endif /* __ELAN4_USER_H */ -diff -urN clean/include/elan4/userregs.h linux-2.6.9/include/elan4/userregs.h ---- clean/include/elan4/userregs.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan4/userregs.h 2004-10-06 10:50:38.000000000 -0400 -@@ -0,0 +1,383 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __ELAN4_USERREGS_H -+#define __ELAN4_USERREGS_H -+ -+#ident "$Id: userregs.h,v 1.15 2004/10/06 14:50:38 addy Exp $" -+/* $Source: /cvs/master/quadrics/elan4hdr/userregs.h,v $*/ -+ -+#ifdef __cplusplus -+extern "C" { -+#endif -+ -+/* -+ * Statistic control reg values -+ * Each 4-bit nibble of the control word specifies what statistic -+ * is to be recorded in each of the 8 statistic counters -+ */ -+#define COUNT_REG0_SHIFT 32ull -+#define COUNT_REG1_SHIFT 36ull -+#define COUNT_REG2_SHIFT 40ull -+#define COUNT_REG3_SHIFT 44ull -+#define COUNT_REG4_SHIFT 48ull -+#define COUNT_REG5_SHIFT 52ull -+#define COUNT_REG6_SHIFT 56ull -+#define COUNT_REG7_SHIFT 60ull -+ -+ -+/* Count reg 0 */ -+#define STC_INPUT_NON_WRITE_BLOCKS (0x0ull << COUNT_REG0_SHIFT) -+#define STP_DMA_EOP_WAIT_ACK (0x1ull << COUNT_REG0_SHIFT) -+#define STP_TPROC_RUNNING (0x2ull << COUNT_REG0_SHIFT) -+#define STC_STEN_PKTS_OPEN (0x3ull << COUNT_REG0_SHIFT) -+#define STP_CPROC_HOLDS_FFU_DP (0x4ull << COUNT_REG0_SHIFT) -+#define STC_TLB_TABLE_WALKS (0x5ull << COUNT_REG0_SHIFT) -+#define STC_CACHE_HITS (0x6ull << COUNT_REG0_SHIFT) -+#define STC_PCI_SLAVE_READS (0x7ull << COUNT_REG0_SHIFT) -+#define STP_PCI_WAITING_FOR_GNT (0x8ull << COUNT_REG0_SHIFT) -+#define STP_SYS_CLOCK_RATE0 (0xfull << COUNT_REG0_SHIFT) -+ -+#define STATS_REG0_NAMES { \ -+ "STC_INPUT_NON_WRITE_BLOCKS", \ -+ "STP_DMA_EOP_WAIT_ACK", \ -+ "STP_TPROC_RUNNING", \ -+ "STC_STEN_PKTS_OPEN", \ -+ "STP_CPROC_HOLDS_FFU_DP", \ -+ "STC_TLB_TABLE_WALKS", \ -+ "STC_CACHE_HITS", \ -+ "STC_PCI_SLAVE_READS", \ -+ "STP_PCI_WAITING_FOR_GNT", \ -+ "STP_SYS_CLOCK_RATE0" \ -+} -+ -+/* Count reg 1 */ -+#define STC_INPUT_WRITE_BLOCKS (0x0ull << COUNT_REG1_SHIFT) -+#define STP_DMA_DATA_TRANSMITTING (0x1ull << COUNT_REG1_SHIFT) -+#define STC_CPROC_VALUES_EXE (0x2ull << COUNT_REG1_SHIFT) -+#define STC_STEN_TRANS_SENT (0x3ull << COUNT_REG1_SHIFT) -+#define STP_TPROC_DQ_HOLDS_FFU_DP (0x4ull << COUNT_REG1_SHIFT) -+#define STC_TPROC_TLB_HITS (0x5ull << COUNT_REG1_SHIFT) -+#define STC_CACHE_ALLOC_MISSES (0x6ull << COUNT_REG1_SHIFT) -+#define STP_PCI_MASTER_READ_WAITING (0x7ull << COUNT_REG1_SHIFT) -+#define STP_PCI_WAITING_FOR_DEVSEL (0x8ull << COUNT_REG1_SHIFT) -+#define STP_SYS_CLOCK_RATE1 (0xfull << COUNT_REG1_SHIFT) -+ -+#define STATS_REG1_NAMES { \ -+ "STC_INPUT_WRITE_BLOCKS", \ -+ "STP_DMA_DATA_TRANSMITTING", \ -+ "STC_CPROC_VALUES_EXE", \ -+ "STC_STEN_TRANS_SENT", \ -+ "STP_TPROC_DQ_HOLDS_FFU_DP", \ -+ "STC_TPROC_TLB_HITS", \ -+ "STC_CACHE_ALLOC_MISSES", \ -+ "STP_PCI_MASTER_READ_WAITING", \ -+ "STP_PCI_WAITING_FOR_DEVSEL", \ -+ "STP_SYS_CLOCK_RATE1" \ -+} -+ -+/* Count reg 2 */ -+#define STC_INPUT_PKTS (0x0ull << COUNT_REG2_SHIFT) -+#define STP_DMA_WAITING_MEM (0x1ull << COUNT_REG2_SHIFT) -+#define STC_CPROC_TRANSFERS (0x2ull << COUNT_REG2_SHIFT) -+#define STP_STEN_WAIT_NETWORK_BUSY (0x3ull << COUNT_REG2_SHIFT) -+#define STP_IPROC_HOLDS_FFU_DP (0x4ull << COUNT_REG2_SHIFT) -+#define STC_UNITS_TLB_HITS (0x5ull << COUNT_REG2_SHIFT) -+#define STC_CACHE_NON_ALLOC_MISSES (0x6ull << COUNT_REG2_SHIFT) -+#define STP_PCI_MASTER_WRITE_WAITING (0x7ull << COUNT_REG2_SHIFT) -+#define STC_PCI_OUT_OF_ORDER_SPLIT_COMP (0x8ull << COUNT_REG2_SHIFT) -+#define STP_SYS_CLOCK_RATE2 (0xfull << COUNT_REG2_SHIFT) -+ -+#define STATS_REG2_NAMES { \ -+ "STC_INPUT_PKTS", \ -+ "STP_DMA_WAITING_MEM", \ -+ "STC_CPROC_TRANSFERS", \ -+ "STP_STEN_WAIT_NETWORK_BUSY", \ -+ "STP_IPROC_HOLDS_FFU_DP", \ -+ "STC_UNITS_TLB_HITS", \ -+ "STC_CACHE_NON_ALLOC_MISSES", \ -+ "STP_PCI_MASTER_WRITE_WAITING", \ -+ "STC_PCI_OUT_OF_ORDER_SPLIT_COMP", \ -+ "STP_SYS_CLOCK_RATE2" \ -+} -+ -+/* Count reg 3 */ -+#define STC_INPUT_PKTS_REJECTED (0x0ull << COUNT_REG3_SHIFT) -+#define STP_DMA_WAIT_NETWORK_BUSY (0x1ull << COUNT_REG3_SHIFT) -+#define STC_CPROC_PREFETCH_SDRAM (0x2ull << COUNT_REG3_SHIFT) -+#define STP_STEN_BLOCKED_ACKS_OR_VC (0x3ull << COUNT_REG3_SHIFT) -+#define STP_EPROC_HOLDS_FFU_DP (0x4ull << COUNT_REG3_SHIFT) -+#define STP_TPROC_BLOCKED_MEMSYS (0x5ull << COUNT_REG3_SHIFT) -+#define STC_CACHE_WRITE_BACKS (0x6ull << COUNT_REG3_SHIFT) -+#define STP_PCI_SLAVE_READ_WAITING (0x7ull << COUNT_REG3_SHIFT) -+#define STP_PCI_IDLE_CYCLES (0x8ull << COUNT_REG3_SHIFT) -+#define STP_SYS_CLOCK_RATE3 (0xfull << COUNT_REG3_SHIFT) -+ -+#define STATS_REG3_NAMES { \ -+ "STC_INPUT_PKTS_REJECTED", \ -+ "STP_DMA_WAIT_NETWORK_BUSY", \ -+ "STC_CPROC_PREFETCH_SDRAM", \ -+ "STP_STEN_BLOCKED_ACKS_OR_VC", \ -+ "STP_EPROC_HOLDS_FFU_DP", \ -+ "STP_TPROC_BLOCKED_MEMSYS", \ -+ "STC_CACHE_WRITE_BACKS", \ -+ "STP_PCI_SLAVE_READ_WAITING", \ -+ "STP_PCI_IDLE_CYCLES", \ -+ "STP_SYS_CLOCK_RATE3" \ -+} -+ -+/* Count reg 4 */ -+#define STP_INPUT_DATA_TRANSMITTING (0x0ull << COUNT_REG4_SHIFT) -+#define STC_DMA_PKTS_ACCEPTED (0x1ull << COUNT_REG4_SHIFT) -+#define STC_CPROC_FLUSH_REQ_SDRAM (0x2ull << COUNT_REG4_SHIFT) -+#define STP_STEN_EOP_WAIT_ACK (0x3ull << COUNT_REG4_SHIFT) -+#define STP_DMA_HOLDS_FFU_DP (0x4ull << COUNT_REG4_SHIFT) -+#define STP_UNIT_BLOCKED_MEMSYS (0x5ull << COUNT_REG4_SHIFT) -+#define STC_PCI_MASTER_READS (0x6ull << COUNT_REG4_SHIFT) -+#define STP_PCI_SLAVE_WRITE_WAITING (0x7ull << COUNT_REG4_SHIFT) -+#define STC_INPUT_PACKETS_DISCARDED (0x8ull << COUNT_REG4_SHIFT) -+#define STP_SYS_CLOCK_RATE4 (0xfull << COUNT_REG4_SHIFT) -+ -+#define STATS_REG4_NAMES { \ -+ "STP_INPUT_DATA_TRANSMITTING", \ -+ "STC_DMA_PKTS_ACCEPTED", \ -+ "STC_CPROC_FLUSH_REQ_SDRAM", \ -+ "STP_STEN_EOP_WAIT_ACK", \ -+ "STP_DMA_HOLDS_FFU_DP", \ -+ "STP_UNIT_BLOCKED_MEMSYS", \ -+ "STC_PCI_MASTER_READS", \ -+ "STP_PCI_SLAVE_WRITE_WAITING", \ -+ "STC_INPUT_PACKETS_DISCARDED", \ -+ "STP_SYS_CLOCK_RATE4" \ -+} -+ -+/* Count reg 5 */ -+#define STP_INPUT_WAITING_NETWORK_DATA (0x0ull << COUNT_REG5_SHIFT) -+#define STC_DMA_PKTS_REJECTED (0x1ull << COUNT_REG5_SHIFT) -+#define STC_CPROC_INSERT_CACHE_MISSES (0x2ull << COUNT_REG5_SHIFT) -+#define STP_STEN_TRANSMITTING_DATA (0x3ull << COUNT_REG5_SHIFT) -+#define FFU_BLOCKED_DIFF_FFU_PROC (0x4ull << COUNT_REG5_SHIFT) -+#define STP_TABLE_WALKS_BLOCKED_MEMSYS (0x5ull << COUNT_REG5_SHIFT) -+#define STC_PCI_MASTER_WRITES (0x6ull << COUNT_REG5_SHIFT) -+#define STP_PCI_MASTER_HOLDS_BUS (0x7ull << COUNT_REG5_SHIFT) -+#define STC_PCI_NO_SPLIT_COMPS (0x8ull << COUNT_REG5_SHIFT) -+#define STP_SYS_CLOCK_RATE5 (0xfull << COUNT_REG5_SHIFT) -+ -+#define STATS_REG5_NAMES { \ -+ "STP_INPUT_WAITING_NETWORK_DATA", \ -+ "STC_DMA_PKTS_REJECTED", \ -+ "STC_CPROC_INSERT_CACHE_MISSES", \ -+ "STP_STEN_TRANSMITTING_DATA", \ -+ "FFU_BLOCKED_DIFF_FFU_PROC", \ -+ "STP_TABLE_WALKS_BLOCKED_MEMSYS", \ -+ "STC_PCI_MASTER_WRITES", \ -+ "STP_PCI_MASTER_HOLDS_BUS", \ -+ "STC_PCI_NO_SPLIT_COMPS", \ -+ "STP_SYS_CLOCK_RATE5" \ -+} -+ -+/* Count reg 6 */ -+#define STP_INPUT_BLOCKED_WAITING_TRANS (0x0ull << COUNT_REG6_SHIFT) -+#define STP_TPROC_INST_STALL (0x1ull << COUNT_REG6_SHIFT) -+#define STP_CPROC_WAITING_DESCHED (0x2ull << COUNT_REG6_SHIFT) -+#define STP_STEN_PKT_OPEN_WAITING_DATA (0x3ull << COUNT_REG6_SHIFT) -+#define STP_TLB_HASH_TABLE_ACCESSES (0x4ull << COUNT_REG6_SHIFT) -+#define STP_PCI_SLAVE_BLOCKED_MEMSYS (0x5ull << COUNT_REG6_SHIFT) -+#define STP_PCI_TRANSFERRING_DATA (0x6ull << COUNT_REG6_SHIFT) -+#define STP_PCI_MASTER_WAITING_BUS (0x7ull << COUNT_REG6_SHIFT) -+#define STP_PCI_READ_LATENCY (0x8ull << COUNT_REG6_SHIFT) -+#define STP_SYS_CLOCK_RATE6 (0xfull << COUNT_REG6_SHIFT) -+ -+#define STATS_REG6_NAMES { \ -+ "STP_INPUT_BLOCKED_WAITING_TRANS", \ -+ "STP_TPROC_INST_STALL", \ -+ "STP_CPROC_WAITING_DESCHED", \ -+ "STP_STEN_PKT_OPEN_WAITING_DATA", \ -+ "STP_TLB_HASH_TABLE_ACCESSES", \ -+ "STP_PCI_SLAVE_BLOCKED_MEMSYS", \ -+ "STP_PCI_TRANSFERRING_DATA", \ -+ "STP_PCI_MASTER_WAITING_BUS", \ -+ "STP_PCI_READ_LATENCY", \ -+ "STP_SYS_CLOCK_RATE6" \ -+} -+ -+/* Count reg 7 */ -+#define STC_INPUT_CTX_FILTER_FILL (0x0ull << COUNT_REG7_SHIFT) -+#define STP_TPROC_LOAD_STORE_STALL (0x1ull << COUNT_REG7_SHIFT) -+#define STC_CPROC_TIMEOUTS (0x2ull << COUNT_REG7_SHIFT) -+#define STP_STEN_BLOCKED_NETWORK (0x3ull << COUNT_REG7_SHIFT) -+#define STP_TLB_CHAIN_ACCESSES (0x4ull << COUNT_REG7_SHIFT) -+#define STP_CPROC_SCHED_BLOCKED_MEMSYS (0x5ull << COUNT_REG7_SHIFT) -+#define STC_PCI_SLAVE_WRITES (0x6ull << COUNT_REG7_SHIFT) -+#define STC_PCI_DISCONNECTS_RETRIES (0x7ull << COUNT_REG7_SHIFT) -+#define STC_RING_OSCILLATOR (0x8ull << COUNT_REG7_SHIFT) -+#define STP_SYS_CLOCK_RATE7 (0xfull << COUNT_REG7_SHIFT) -+ -+#define STATS_REG7_NAMES { \ -+ "STC_INPUT_CTX_FILTER_FILL", \ -+ "STP_TPROC_LOAD_STORE_STALL", \ -+ "STC_CPROC_TIMEOUTS", \ -+ "STP_STEN_BLOCKED_NETWORK", \ -+ "STP_TLB_CHAIN_ACCESSES", \ -+ "STP_CPROC_SCHED_BLOCKED_MEMSYS", \ -+ "STC_PCI_SLAVE_WRITES", \ -+ "STC_PCI_DISCONNECTS_RETRIES", \ -+ "STC_RING_OSCILLATOR", \ -+ "STP_SYS_CLOCK_RATE7" \ -+} -+ -+#define STATS_REG_NAMES { \ -+ STATS_REG0_NAMES, \ -+ STATS_REG1_NAMES, \ -+ STATS_REG2_NAMES, \ -+ STATS_REG3_NAMES, \ -+ STATS_REG4_NAMES, \ -+ STATS_REG5_NAMES, \ -+ STATS_REG6_NAMES, \ -+ STATS_REG7_NAMES, \ -+} -+ -+ -+#define INPUT_PERF_STATS (STC_INPUT_NON_WRITE_BLOCKS | STC_INPUT_WRITE_BLOCKS | \ -+ STC_INPUT_PKTS | STC_INPUT_PKTS_REJECTED | \ -+ STC_INPUT_CTX_FILTER_FILL | STP_INPUT_DATA_TRANSMITTING | \ -+ STP_INPUT_WAITING_NETWORK_DATA | STP_INPUT_BLOCKED_WAITING_TRANS | STC_INPUT_PACKETS_DISCARDED) -+ -+#define DMA_PERF_STATS (STC_DMA_PKTS_ACCEPTED | STC_DMA_PKTS_REJECTED | \ -+ STP_DMA_EOP_WAIT_ACK | STP_DMA_DATA_TRANSMITTING | \ -+ STP_DMA_WAITING_MEM | STP_DMA_WAIT_NETWORK_BUSY) -+ -+ -+#define TPROC_PERF_STATS (STP_TPROC_RUNNING | STP_TPROC_INST_STALL | \ -+ STP_TPROC_LOAD_STORE_STALL) -+ -+#define CPROC_PERF_STATS (STC_CPROC_VALUES_EXE | STC_CPROC_TRANSFERS | \ -+ STC_CPROC_PREFETCH_SDRAM | STC_CPROC_FLUSH_REQ_SDRAM | \ -+ STC_CPROC_INSERT_CACHE_MISSES | STP_CPROC_WAITING_DESCHED | \ -+ STC_CPROC_TIMEOUTS) -+ -+#define STEN_PERF_STATS (STC_STEN_PKTS_OPEN | STC_STEN_TRANS_SENT | \ -+ STP_STEN_WAIT_NETWORK_BUSY | STP_STEN_BLOCKED_ACKS_OR_VC | \ -+ STP_STEN_EOP_WAIT_ACK | STP_STEN_TRANSMITTING_DATA | \ -+ STP_STEN_PKT_OPEN_WAITING_DATA | STP_STEN_BLOCKED_NETWORK) -+ -+#define FFU_PREF_STATS (STP_CPROC_HOLDS_FFU_DP | STP_TPROC_DQ_HOLDS_FFU_DP | \ -+ STP_IPROC_HOLDS_FFU_DP | STP_EPROC_HOLDS_FFU_DP | \ -+ STP_DMA_HOLDS_FFU_DP | FFU_BLOCKED_DIFF_FFU_PROC) -+ -+#define TABLE_WALK_PERF_STATS (STC_TPROC_TLB_HITS | STC_UNITS_TLB_HITS | \ -+ STP_TLB_HASH_TABLE_ACCESSES | STP_TLB_CHAIN_ACCESSES | \ -+ STC_TLB_TABLE_WALKS) -+ -+#define ADDRESS_ARB_PERF_STATS (STP_UNIT_BLOCKED_MEMSYS | STP_TPROC_BLOCKED_MEMSYS | \ -+ STP_TABLE_WALKS_BLOCKED_MEMSYS | STP_CPROC_SCHED_BLOCKED_MEMSYS | \ -+ STP_PCI_SLAVE_BLOCKED_MEMSYS) -+ -+#define CACHE_PERF_STATS (STC_CACHE_HITS | STC_CACHE_ALLOC_MISSES | \ -+ STC_CACHE_NON_ALLOC_MISSES | STC_CACHE_WRITE_BACKS) -+ -+ -+#define PCI_PERF_STATS (STC_PCI_SLAVE_READS | STP_PCI_MASTER_READ_WAITING | \ -+ STP_PCI_MASTER_WRITE_WAITING | STP_PCI_SLAVE_READ_WAITING | \ -+ STP_PCI_SLAVE_WRITE_WAITING | STC_PCI_MASTER_WRITES | \ -+ STP_PCI_TRANSFERRING_DATA | STC_PCI_SLAVE_WRITES) -+ -+#define PCIBUS_PERF_STATS (STP_PCI_WAITING_FOR_GNT | STP_PCI_WAITING_FOR_DEVSEL | \ -+ STC_PCI_OUT_OF_ORDER_SPLIT_COMP | STP_PCI_IDLE_CYCLES | \ -+ STC_PCI_MASTER_READS | STP_PCI_MASTER_HOLDS_BUS | \ -+ STP_PCI_MASTER_WAITING_BUS | STC_PCI_DISCONNECTS_RETRIES) -+ -+ -+ extern const char *elan_stats_names[8][10]; -+ -+#define ELAN_STATS_NAME(COUNT, CONTROL) (elan_stats_names[(COUNT)][(CONTROL) & 7]) -+ -+ typedef volatile union e4_StatsControl -+ { -+ E4_uint64 StatsControl; -+ struct -+ { -+#if (BYTE_ORDER == LITTLE_ENDIAN) || defined(__LITTLE_ENDIAN__) -+ E4_uint32 StatCont0:4; -+ E4_uint32 StatCont1:4; -+ E4_uint32 StatCont2:4; -+ E4_uint32 StatCont3:4; -+ E4_uint32 StatCont4:4; -+ E4_uint32 StatCont5:4; -+ E4_uint32 StatCont6:4; -+ E4_uint32 StatCont7:4; -+#else -+ E4_uint32 StatCont7:4; -+ E4_uint32 StatCont6:4; -+ E4_uint32 StatCont5:4; -+ -+ E4_uint32 StatCont4:4; -+ E4_uint32 StatCont3:4; -+ E4_uint32 StatCont2:4; -+ E4_uint32 StatCont1:4; -+ E4_uint32 StatCont0:4; -+#endif -+ E4_uint32 pad; -+ } s; -+ } E4_StatsControl; -+ -+typedef volatile union e4_StatsCount -+{ -+ E4_uint64 ClockStat; -+ struct -+ { -+ E4_uint32 ClockLSW; /* read only */ -+ E4_uint32 StatsCount; -+ } s; -+} E4_StatsCount; -+ -+typedef volatile union e4_clock -+{ -+ E4_uint64 NanoSecClock; -+ struct -+ { -+ E4_uint32 ClockLSW; -+ E4_uint32 ClockMSW; -+ } s; -+} E4_Clock; -+#define E4_TIME( X ) ((X).NanoSecClock) -+ -+#define ELAN4_COMMS_CLOCK_FREQUENCY 660 /* In Mhz. This is half the bit rate. */ -+#define ELAN4_CLOCK_ADD_VALUE 200 /* For 200ns increment rate */ -+#define ELAN4_CLOCK_COMMS_DIV_VALUE (((ELAN4_COMMS_CLOCK_FREQUENCY * ELAN4_CLOCK_ADD_VALUE) / (1000 * 4)) - 1) -+#define ELAN4_CLOCK_TICK_RATE ((ELAN4_CLOCK_ADD_VALUE << 8) + ELAN4_CLOCK_COMMS_DIV_VALUE) -+ -+typedef volatile union e4_clocktickrate -+{ -+ E4_uint64 NanoSecClock; -+ struct -+ { -+ E4_uint32 pad1; -+ E4_uint32 TickRates; -+ } s; -+} E4_ClockTickRate; -+ -+/* -+ * This is made into an 8k byte object. -+ */ -+typedef volatile struct _E4_User_Regs -+{ -+ E4_StatsCount StatCounts[8]; -+ E4_StatsCount InstCount; -+ E4_Clock Clock; -+ E4_StatsControl StatCont; -+ E4_ClockTickRate ClockTickRate; -+ E4_uint8 pad1[EightK - ((sizeof(E4_StatsCount)*9)+sizeof(E4_StatsControl)+ -+ sizeof(E4_Clock)+sizeof(E4_ClockTickRate))]; -+} E4_User_Regs; -+ -+#ifdef __cplusplus -+} -+#endif -+ -+#endif /* __ELAN4_USERREGS_H */ -diff -urN clean/include/elan4/usertrap.h linux-2.6.9/include/elan4/usertrap.h ---- clean/include/elan4/usertrap.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan4/usertrap.h 2004-05-05 05:08:35.000000000 -0400 -@@ -0,0 +1,114 @@ -+/* -+ * Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: usertrap.h,v 1.17 2004/05/05 09:08:35 david Exp $" -+/* $Source: /cvs/master/quadrics/elan4mod/usertrap.h,v $*/ -+ -+#ifndef __ELAN4_USERTRAP_H -+#define __ELAN4_USERTRAP_H -+ -+#ifndef _ASM -+typedef struct elan4_user_page -+{ -+ E4_uint64 upage_ddcq_completed; -+} ELAN4_USER_PAGE; -+ -+typedef struct elan4_user_trap -+{ -+ int ut_type; -+ unsigned ut_proc; -+ unsigned ut_args[4]; -+ -+ union { -+ ELAN4_EPROC_TRAP eproc; -+ ELAN4_CPROC_TRAP cproc; -+ ELAN4_DPROC_TRAP dproc; -+ ELAN4_IPROC_TRAP iproc; -+ ELAN4_TPROC_TRAP tproc; -+ ELAN4_NETERR_MSG msg; -+ } ut_trap; -+} ELAN4_USER_TRAP; -+ -+#endif /* _ASM */ -+ -+ -+/* value for ut_type */ -+#define UTS_FINISHED 0 /* all pending traps have been handled */ -+#define UTS_RESCHEDULE 1 /* must return to user mode and re-enter */ -+#define UTS_UNIMP_INSTR 2 /* unimplemented thread instruction */ -+#define UTS_EXECUTE_PACKET 3 /* iproc trap needs packet executing */ -+#define UTS_NETWORK_ERROR_TRAP 4 /* network error on this trap */ -+#define UTS_NETWORK_ERROR_MSG 5 /* network error message */ -+#define UTS_NETWORK_ERROR_TIMER 6 /* network error timer expired */ -+ -+#define UTS_EFAULT -1 /* failed to copyout trap */ -+#define UTS_INVALID_ADDR -2 /* all -ve codes mean trap could not be resolved. */ -+#define UTS_INVALID_VPROC -3 -+#define UTS_INVALID_COMMAND -4 -+#define UTS_BAD_TRAP -5 -+#define UTS_ALIGNMENT_ERROR -6 -+#define UTS_QUEUE_OVERFLOW -7 -+#define UTS_QUEUE_ERROR -8 -+#define UTS_INVALID_TRANS -9 -+#define UTS_PERMISSION_DENIED -10 -+#define UTS_CPROC_ERROR -11 -+#define UTS_INVALID_COOKIE -12 -+#define UTS_NETERR_ERROR -13 -+ -+/* "special" values for registering handlers */ -+#define UTS_ALL_TRAPS -9999 -+ -+/* value for ut_proc */ -+#define UTS_NOPROC 0 -+#define UTS_EPROC 1 -+#define UTS_CPROC 2 -+#define UTS_DPROC 3 -+#define UTS_TPROC 4 -+#define UTS_IPROC 5 -+#define UTS_NETERR_MSG 6 -+ -+/* unimplemented trap numbers for thread processor */ -+#define ELAN4_T_TRAP_INSTR(t) (0x80202000 | ((t) & 0xFF)) -+ -+#define ELAN4_T_SYSCALL_TRAP 1 -+# define ELAN4_T_OPEN 0 -+# define ELAN4_T_WRITE 1 -+# define ELAN4_T_READ 2 -+# define ELAN4_T_IOCTL 3 -+# define ELAN4_T_LSEEK 4 -+# define ELAN4_T_POLL 5 -+# define ELAN4_T_CLOSE 6 -+# define ELAN4_T_KILL 7 -+# define ELAN4_T_MMAP 8 -+# define ELAN4_T_MUNMAP 9 -+# define ELAN4_T_ABORT 100 -+# define ELAN4_T_DEBUG 101 -+# define ELAN4_T_REGDUMP 102 -+ -+#define ELAN4_T_REGDUMP_TRAP 2 -+ -+#define ELAN4_T_LIBELAN_TRAP 3 -+# define ELAN4_T_TPORT_NEWBUF 0 -+# define ELAN4_T_TPORT_GC 1 -+# define ELAN4_T_TPORT_DEBUG 2 -+ -+#define ELAN4_T_ALLOC_TRAP 4 -+# define ELAN4_T_ALLOC_ELAN 0 -+# define ELAN4_T_ALLOC_MAIN 1 -+# define ELAN4_T_FREE_ELAN 2 -+# define ELAN4_T_FREE_MAIN 3 -+ -+/* reserved main interrupt cookies */ -+#define ELAN4_INT_COOKIE_DDCQ 0 -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -+#endif /* __ELAN4_USERTRAP_H */ -diff -urN clean/include/elan4/xsdram.h linux-2.6.9/include/elan4/xsdram.h ---- clean/include/elan4/xsdram.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/elan4/xsdram.h 2004-03-05 07:32:04.000000000 -0500 -@@ -0,0 +1,59 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2003 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __ELAN4_XSDRAM_H -+#define __ELAN4_XSDRAM_H -+ -+#ident "@(#)$Id: xsdram.h,v 1.13 2004/03/05 12:32:04 jon Exp $ $Name: QSNETMODULES-5-11-3_20050907 $" -+/* $Source: /cvs/master/quadrics/elan4hdr/xsdram.h,v $*/ -+ -+/* SAMSUNG K4H281638D-TCB3 */ -+ -+#define SDRAM_tRCF_1_SH 0 -+#define SDRAM_tRP_1_SH 4 -+#define SDRAM_tRCD_SH 8 -+#define SDRAM_tRRD_SH 12 -+#define SDRAM_tEndWr_SH 16 -+#define SDRAM_tEndRd_SH 20 -+#define SDRAM_Burst_SH 24 -+#define SDRAM_CL_SH 28 -+#define SDRAM_DsblBypass (1ULL << 31) -+#define SDRAM_RefreshRate_SH 32 -+#define SDRAM_RamSize_SH 34 -+#define SDRAM_ReadLtncy_1_SH 36 -+#define SDRAM_RdOffset_SH 40 -+#define SDRAM_FlightDelay_SH 42 -+ -+#define SDRAM_ENABLE_ECC (1ULL << 44) // Enables error detecting on the ECC. -+#define SDRAM_SDRAM_TESTING (1ULL << 45) // Switches to test mode for checking EEC data bits -+#define SDRAM_SETUP (1ULL << 46) // Writes SDram control reg when set. Also starts -+ -+#define SDRAM_CS_MODE0 0ULL // 64Mbit, 128Mbit, 256Mbit, 512Mbit or 1Gbit (16-bit output) -+#define SDRAM_CS_MODE1 1ULL // 64Mbit, 128Mbit, 256Mbit or 512Mbit (8-bit output) -+#define SDRAM_CS_MODE2 2ULL // 2Gbit (16-bit output) or 1Gbit (8-bit output) -+#define SDRAM_CS_MODE3 3ULL // 4Gbit (16-bit output) or 2Gbit (8-bit output) -+ -+#if defined(LINUX) && !defined(CONFIG_MPSAS) -+#define SDRAM_STARTUP_VALUE ((0xbULL << SDRAM_tRCF_1_SH) | (0x2ULL << SDRAM_tRP_1_SH) | \ -+ (0x3ULL << SDRAM_tRCD_SH) | (0x2ULL << SDRAM_tRRD_SH) | \ -+ (0xaULL << SDRAM_tEndWr_SH) | (0x6ULL << SDRAM_tEndRd_SH) | \ -+ (0x8ULL << SDRAM_Burst_SH) | (0x6ULL << SDRAM_CL_SH) | \ -+ (0x2ULL << SDRAM_RefreshRate_SH) | (0x3ULL << SDRAM_RamSize_SH) | \ -+ (0x1ULL << SDRAM_RdOffset_SH) | (0x1ULL << SDRAM_FlightDelay_SH) | \ -+ (0x4ULL << SDRAM_ReadLtncy_1_SH)) -+#else -+#define SDRAM_STARTUP_VALUE ((0xbULL << SDRAM_tRCF_1_SH) | (0x2ULL << SDRAM_tRP_1_SH) | \ -+ (0x3ULL << SDRAM_tRCD_SH) | (0x2ULL << SDRAM_tRRD_SH) | \ -+ (0xaULL << SDRAM_tEndWr_SH) | (0x6ULL << SDRAM_tEndRd_SH) | \ -+ (0x8ULL << SDRAM_Burst_SH) | (0x6ULL << SDRAM_CL_SH) | \ -+ (0x0ULL << SDRAM_RefreshRate_SH) | (0x0ULL << SDRAM_RamSize_SH) | \ -+ (0x1ULL << SDRAM_RdOffset_SH) | (0x1ULL << SDRAM_FlightDelay_SH) | \ -+ (0x4ULL << SDRAM_ReadLtncy_1_SH) | SDRAM_ENABLE_ECC | SDRAM_SETUP) -+#endif -+ -+#endif /* __ELAN4_XSDRAM_H */ -diff -urN clean/include/jtag/jtagio.h linux-2.6.9/include/jtag/jtagio.h ---- clean/include/jtag/jtagio.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/jtag/jtagio.h 2004-12-16 05:39:27.000000000 -0500 -@@ -0,0 +1,106 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "$Id: jtagio.h,v 1.8 2004/12/16 10:39:27 lee Exp $" -+/* $Source: /cvs/master/quadrics/jtagmod/jtagio.h,v $*/ -+ -+ -+#ifndef __SYS_JTAGMOD_H -+#define __SYS_JTAGMOD_H -+ -+#ifdef __cplusplus -+extern "C" { -+#endif -+ -+#define JTAG_MAX_CHIPS 8 -+#define JTAG_MAX_INSTR_LEN 8 -+#define JTAG_MAX_BITS (JTAG_MAX_CHIPS * JTAG_MAX_INSTR_LEN) -+#define JTAG_MAX_DATA_LEN 1024 -+ -+#define JTAG_BYPASS 0xFF -+ -+#define I2C_ADDR_LEN 7 /* 7 bits of address */ -+#define I2C_DATA_LEN 8 /* 8 bits of data */ -+#define I2C_MAX_DATA_LEN 9 /* and upto 9 bytes worth */ -+ -+#define BITS_PER_BYTE 8 -+#define JTAG_NBYTES(nbits) (((nbits)+BITS_PER_BYTE-1)/BITS_PER_BYTE) -+#define JTAG_BIT(v, num) (((v)[(num) / BITS_PER_BYTE] >> ((num) % BITS_PER_BYTE)) & 1) -+#define JTAG_SET_BIT(v, num) ((v)[(num) / BITS_PER_BYTE] |= (1 << ((num) % BITS_PER_BYTE))) -+#define JTAG_CLR_BIT(v, num) ((v)[(num) / BITS_PER_BYTE] &= ~(1 << ((num) % BITS_PER_BYTE))) -+ -+#define RING_CLOCK_CARD (0x3D) -+#define RING_CLOCK_SHIFT (0x3E) -+#define RING_JTAG_LOOPBACK (0x3F) -+#define RING_MAX (0x40) -+ -+#define RING_QUAD_BIT (0x40) -+#define RING_I2C_BIT (0x80) -+ -+#define VALID_JTAG_RING(ring) ((ring) < 0x20 || (ring) == RING_JTAG_LOOPBACK) -+#define VALID_I2C_RING(ring) ((ring) < 0x20 || (ring) == RING_CLOCK_CARD) -+ -+ -+typedef struct jtag_value -+{ -+ u_char bytes[JTAG_NBYTES(JTAG_MAX_DATA_LEN)]; -+} JTAG_VALUE; -+ -+/* arguements to JTAG_SHIFT_IR/JTAG_SHIFT_DR */ -+typedef struct jtag_reset_args -+{ -+ u_int ring; -+} JTAG_RESET_ARGS; -+ -+typedef struct jtag_shift_args -+{ -+ u_int ring; -+ u_int nbits; -+ u_char *value; -+} JTAG_SHIFT_ARGS; -+ -+typedef struct i2c_args -+{ -+ u_int ring; -+ u_int device; -+ u_int reg; -+ u_int count; -+ u_int ok; -+ u_char data[I2C_MAX_DATA_LEN]; -+} I2C_ARGS; -+ -+/* values for 'ok' - the return value from i2c_xx functions */ -+#define I2C_OP_SUCCESS 0 -+#define I2C_OP_ERROR 1 -+#define I2C_OP_NOT_IDLE 2 -+#define I2C_OP_NO_DEVICE 3 -+#define I2C_OP_WRITE_TO_BIG 4 -+#define I2C_OP_BAD_RESOURCE 5 -+ -+typedef struct i2c_clock_shift_args -+{ -+ u_int t; -+ u_int n; -+ u_int m; -+} I2C_CLOCK_SHIFT_ARGS; -+ -+#define JTAG_RESET _IOWR('j', '0', JTAG_RESET_ARGS) -+#define JTAG_SHIFT_IR _IOWR('j', '1', JTAG_SHIFT_ARGS) -+#define JTAG_SHIFT_DR _IOWR('j', '2', JTAG_SHIFT_ARGS) -+ -+#define I2C_CLOCK_SHIFT _IOWR('j', '4', I2C_CLOCK_SHIFT_ARGS) -+#define I2C_WRITE _IOWR('j', '5', I2C_ARGS) -+#define I2C_READ _IOWR('j', '6', I2C_ARGS) -+#define I2C_WRITEREG _IOWR('j', '7', I2C_ARGS) -+#define I2C_READREG _IOWR('j', '8', I2C_ARGS) -+ -+ -+#ifdef __cplusplus -+} -+#endif -+ -+#endif /* __SYS_JTAGMOD_H */ -diff -urN clean/include/linux/init_task.h linux-2.6.9/include/linux/init_task.h ---- clean/include/linux/init_task.h 2004-10-18 17:53:13.000000000 -0400 -+++ linux-2.6.9/include/linux/init_task.h 2005-10-10 17:47:17.000000000 -0400 -@@ -2,6 +2,7 @@ - #define _LINUX__INIT_TASK_H - - #include -+#include - - #define INIT_FILES \ - { \ -@@ -112,6 +113,7 @@ - .proc_lock = SPIN_LOCK_UNLOCKED, \ - .switch_lock = SPIN_LOCK_UNLOCKED, \ - .journal_info = NULL, \ -+ INIT_TASK_PTRACK(tsk) \ - } - - -diff -urN clean/include/linux/ioproc.h linux-2.6.9/include/linux/ioproc.h ---- clean/include/linux/ioproc.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/linux/ioproc.h 2005-10-10 17:47:17.000000000 -0400 -@@ -0,0 +1,271 @@ -+/* -*- linux-c -*- -+ * -+ * Copyright (C) 2002-2004 Quadrics Ltd. -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ * -+ * -+ */ -+ -+/* -+ * Callbacks for IO processor page table updates. -+ */ -+ -+#ifndef __LINUX_IOPROC_H__ -+#define __LINUX_IOPROC_H__ -+ -+#include -+#include -+ -+typedef struct ioproc_ops { -+ struct ioproc_ops *next; -+ void *arg; -+ -+ void (*release)(void *arg, struct mm_struct *mm); -+ void (*sync_range)(void *arg, struct vm_area_struct *vma, unsigned long start, unsigned long end); -+ void (*invalidate_range)(void *arg, struct vm_area_struct *vma, unsigned long start, unsigned long end); -+ void (*update_range)(void *arg, struct vm_area_struct *vma, unsigned long start, unsigned long end); -+ -+ void (*change_protection)(void *arg, struct vm_area_struct *vma, unsigned long start, unsigned long end, pgprot_t newprot); -+ -+ void (*sync_page)(void *arg, struct vm_area_struct *vma, unsigned long address); -+ void (*invalidate_page)(void *arg, struct vm_area_struct *vma, unsigned long address); -+ void (*update_page)(void *arg, struct vm_area_struct *vma, unsigned long address); -+ -+} ioproc_ops_t; -+ -+/* IOPROC Registration -+ * -+ * Called by the IOPROC device driver to register its interest in page table -+ * changes for the process associated with the supplied mm_struct -+ * -+ * The caller should first allocate and fill out an ioproc_ops structure with -+ * the function pointers initialised to the device driver specific code for -+ * each callback. If the device driver doesn't have code for a particular -+ * callback then it should set the function pointer to be NULL. -+ * The ioproc_ops arg parameter will be passed unchanged as the first argument -+ * to each callback function invocation. -+ * -+ * The ioproc registration is not inherited across fork() and should be called -+ * once for each process that the IOPROC device driver is interested in. -+ * -+ * Must be called holding the mm->page_table_lock -+ */ -+extern int ioproc_register_ops(struct mm_struct *mm, struct ioproc_ops *ip); -+ -+ -+/* IOPROC De-registration -+ * -+ * Called by the IOPROC device driver when it is no longer interested in page -+ * table changes for the process associated with the supplied mm_struct -+ * -+ * Normally this is not needed to be called as the ioproc_release() code will -+ * automatically unlink the ioproc_ops struct from the mm_struct as the -+ * process exits -+ * -+ * Must be called holding the mm->page_table_lock -+ */ -+extern int ioproc_unregister_ops(struct mm_struct *mm, struct ioproc_ops *ip); -+ -+#ifdef CONFIG_IOPROC -+ -+/* IOPROC Release -+ * -+ * Called during exit_mmap() as all vmas are torn down and unmapped. -+ * -+ * Also unlinks the ioproc_ops structure from the mm list as it goes. -+ * -+ * No need for locks as the mm can no longer be accessed at this point -+ * -+ */ -+static inline void -+ioproc_release(struct mm_struct *mm) -+{ -+ struct ioproc_ops *cp; -+ -+ while ((cp = mm->ioproc_ops) != NULL) { -+ mm->ioproc_ops = cp->next; -+ -+ if (cp->release) -+ cp->release(cp->arg, mm); -+ } -+} -+ -+/* IOPROC SYNC RANGE -+ * -+ * Called when a memory map is synchronised with its disk image i.e. when the -+ * msync() syscall is invoked. Any future read or write to the associated -+ * pages by the IOPROC should cause the page to be marked as referenced or -+ * modified. -+ * -+ * Called holding the mm->page_table_lock -+ */ -+static inline void -+ioproc_sync_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) -+{ -+ struct ioproc_ops *cp; -+ -+ for (cp = vma->vm_mm->ioproc_ops; cp; cp = cp->next) -+ if (cp->sync_range) -+ cp->sync_range(cp->arg, vma, start, end); -+} -+ -+/* IOPROC INVALIDATE RANGE -+ * -+ * Called whenever a valid PTE is unloaded e.g. when a page is unmapped by the -+ * user or paged out by the kernel. -+ * -+ * After this call the IOPROC must not access the physical memory again unless -+ * a new translation is loaded. -+ * -+ * Called holding the mm->page_table_lock -+ */ -+static inline void -+ioproc_invalidate_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) -+{ -+ struct ioproc_ops *cp; -+ -+ for (cp = vma->vm_mm->ioproc_ops; cp; cp = cp->next) -+ if (cp->invalidate_range) -+ cp->invalidate_range(cp->arg, vma, start, end); -+} -+ -+/* IOPROC UPDATE RANGE -+ * -+ * Called whenever a valid PTE is loaded e.g. mmaping memory, moving the brk -+ * up, when breaking COW or faulting in an anonymous page of memory. -+ * -+ * These give the IOPROC device driver the opportunity to load translations -+ * speculatively, which can improve performance by avoiding device translation -+ * faults. -+ * -+ * Called holding the mm->page_table_lock -+ */ -+static inline void -+ioproc_update_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) -+{ -+ struct ioproc_ops *cp; -+ -+ for (cp = vma->vm_mm->ioproc_ops; cp; cp = cp->next) -+ if (cp->update_range) -+ cp->update_range(cp->arg, vma, start, end); -+} -+ -+ -+/* IOPROC CHANGE PROTECTION -+ * -+ * Called when the protection on a region of memory is changed i.e. when the -+ * mprotect() syscall is invoked. -+ * -+ * The IOPROC must not be able to write to a read-only page, so if the -+ * permissions are downgraded then it must honour them. If they are upgraded -+ * it can treat this in the same way as the ioproc_update_[range|sync]() calls -+ * -+ * Called holding the mm->page_table_lock -+ */ -+static inline void -+ioproc_change_protection(struct vm_area_struct *vma, unsigned long start, unsigned long end, pgprot_t newprot) -+{ -+ struct ioproc_ops *cp; -+ -+ for (cp = vma->vm_mm->ioproc_ops; cp; cp = cp->next) -+ if (cp->change_protection) -+ cp->change_protection(cp->arg, vma, start, end, newprot); -+} -+ -+/* IOPROC SYNC PAGE -+ * -+ * Called when a memory map is synchronised with its disk image i.e. when the -+ * msync() syscall is invoked. Any future read or write to the associated page -+ * by the IOPROC should cause the page to be marked as referenced or modified. -+ * -+ * Not currently called as msync() calls ioproc_sync_range() instead -+ * -+ * Called holding the mm->page_table_lock -+ */ -+static inline void -+ioproc_sync_page(struct vm_area_struct *vma, unsigned long addr) -+{ -+ struct ioproc_ops *cp; -+ -+ for (cp = vma->vm_mm->ioproc_ops; cp; cp = cp->next) -+ if (cp->sync_page) -+ cp->sync_page(cp->arg, vma, addr); -+} -+ -+/* IOPROC INVALIDATE PAGE -+ * -+ * Called whenever a valid PTE is unloaded e.g. when a page is unmapped by the -+ * user or paged out by the kernel. -+ * -+ * After this call the IOPROC must not access the physical memory again unless -+ * a new translation is loaded. -+ * -+ * Called holding the mm->page_table_lock -+ */ -+static inline void -+ioproc_invalidate_page(struct vm_area_struct *vma, unsigned long addr) -+{ -+ struct ioproc_ops *cp; -+ -+ for (cp = vma->vm_mm->ioproc_ops; cp; cp = cp->next) -+ if (cp->invalidate_page) -+ cp->invalidate_page(cp->arg, vma, addr); -+} -+ -+/* IOPROC UPDATE PAGE -+ * -+ * Called whenever a valid PTE is loaded e.g. mmaping memory, moving the brk -+ * up, when breaking COW or faulting in an anoymous page of memory. -+ * -+ * These give the IOPROC device the opportunity to load translations -+ * speculatively, which can improve performance by avoiding device translation -+ * faults. -+ * -+ * Called holding the mm->page_table_lock -+ */ -+static inline void -+ioproc_update_page(struct vm_area_struct *vma, unsigned long addr) -+{ -+ struct ioproc_ops *cp; -+ -+ for (cp = vma->vm_mm->ioproc_ops; cp; cp = cp->next) -+ if (cp->update_page) -+ cp->update_page(cp->arg, vma, addr); -+} -+ -+#else -+ -+/* ! CONFIG_IOPROC so make all hooks empty */ -+ -+#define ioproc_release(mm) do { } while (0) -+ -+#define ioproc_sync_range(vma,start,end) do { } while (0) -+ -+#define ioproc_invalidate_range(vma, start,end) do { } while (0) -+ -+#define ioproc_update_range(vma, start, end) do { } while (0) -+ -+#define ioproc_change_protection(vma, start, end, prot) do { } while (0) -+ -+#define ioproc_sync_page(vma, addr) do { } while (0) -+ -+#define ioproc_invalidate_page(vma, addr) do { } while (0) -+ -+#define ioproc_update_page(vma, addr) do { } while (0) -+ -+#endif /* CONFIG_IOPROC */ -+ -+#endif /* __LINUX_IOPROC_H__ */ -diff -urN clean/include/linux/ptrack.h linux-2.6.9/include/linux/ptrack.h ---- clean/include/linux/ptrack.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/linux/ptrack.h 2005-10-10 17:47:17.000000000 -0400 -@@ -0,0 +1,65 @@ -+/* -+ * Copyright (C) 2000 Regents of the University of California -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ * -+ * Derived from exit_actn.c by -+ * Copyright (C) 2003 Quadrics Ltd. -+ * -+ */ -+#ifndef __LINUX_PTRACK_H -+#define __LINUX_PTRACK_H -+ -+/* -+ * Process tracking - this allows a module to keep track of processes -+ * in order that it can manage all tasks derived from a single process. -+ */ -+ -+#define PTRACK_PHASE_CLONE 1 -+#define PTRACK_PHASE_CLONE_FAIL 2 -+#define PTRACK_PHASE_EXEC 3 -+#define PTRACK_PHASE_EXIT 4 -+ -+#define PTRACK_FINISHED 0 -+#define PTRACK_INNHERIT 1 -+#define PTRACK_DENIED 2 -+ -+#ifdef CONFIG_PTRACK -+ -+typedef int (*ptrack_callback_t)(void *arg, int phase, struct task_struct *child); -+ -+struct ptrack_desc { -+ struct list_head link; -+ ptrack_callback_t callback; -+ void *arg; -+}; -+ -+extern int ptrack_register (ptrack_callback_t callback, void *arg); -+extern void ptrack_deregister (ptrack_callback_t callback, void *arg); -+extern int ptrack_registered (ptrack_callback_t callback, void *arg); -+ -+extern int ptrack_call_callbacks (int phase, struct task_struct *child); -+ -+#define INIT_TASK_PTRACK(tsk) \ -+ .ptrack_list = LIST_HEAD_INIT(tsk.ptrack_list) -+ -+#else -+#define ptrack_call_callbacks (phase, child) (0) -+ -+#define INIT_TASK_PTRACK(tsk) -+ -+#endif -+ -+#endif /* __LINUX_PTRACK_H */ -diff -urN clean/include/linux/sched.h linux-2.6.9/include/linux/sched.h ---- clean/include/linux/sched.h 2005-05-13 13:39:11.000000000 -0400 -+++ linux-2.6.9/include/linux/sched.h 2005-10-10 17:47:17.000000000 -0400 -@@ -184,6 +184,9 @@ - asmlinkage void schedule(void); - - struct namespace; -+#ifdef CONFIG_IOPROC -+struct ioproc_ops; -+#endif - - /* Maximum number of active map areas.. This is a random (large) number */ - #define DEFAULT_MAX_MAP_COUNT 65536 -@@ -259,6 +262,11 @@ - struct kioctx *ioctx_list; - - struct kioctx default_kioctx; -+ -+#ifdef CONFIG_IOPROC -+ /* hooks for io devices with advanced RDMA capabilities */ -+ struct ioproc_ops *ioproc_ops; -+#endif - }; - - extern int mmlist_nr; -@@ -600,6 +608,10 @@ - struct mempolicy *mempolicy; - short il_next; /* could be shared with used_math */ - #endif -+#ifdef CONFIG_PTRACK -+/* process tracking callback */ -+ struct list_head ptrack_list; -+#endif - }; - - static inline pid_t process_group(struct task_struct *tsk) -diff -urN clean/include/qsnet/autoconf.h linux-2.6.9/include/qsnet/autoconf.h ---- clean/include/qsnet/autoconf.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/qsnet/autoconf.h 2005-10-10 17:47:30.000000000 -0400 -@@ -0,0 +1,44 @@ -+/* -+ * Copyright (c) 2005 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ * NOTE: This file has been automatically generated: -+ * node : lester0.hp.com -+ * kernel : ../linux-2.6.9 -+ * date : Mon Oct 10 17:47:29 EDT 2005 -+ * -+ */ -+ -+#ifndef __QSNET_AUTOCONF_H -+#define __QSNET_AUTOCONF_H -+ -+#include -+#undef NO_RMAP -+#undef AC -+#undef NO_O1_SCHED -+#undef NO_NPTL -+#undef NO_ABI -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0) -+#define PROCESS_ACCT -+#endif -+#undef RSS_ATOMIC -+#define NO_COPROC -+#undef NO_IOPROC -+#undef NO_PTRACK -+#define NO_PANIC_NOTIFIER -+#undef NO_SHM_CLEANUP -+#undef NO_PDE -+ -+ -+#define CONFIG_EIP -+#define CONFIG_ELAN -+#define CONFIG_ELAN3 -+#define CONFIG_ELAN4 -+#define CONFIG_EP -+#define CONFIG_JTAG -+#define CONFIG_QSNET -+#define CONFIG_RMS -+ -+#endif /* __QSNET_AUTOCONF_H */ -+ -diff -urN clean/include/qsnet/condvar.h linux-2.6.9/include/qsnet/condvar.h ---- clean/include/qsnet/condvar.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/qsnet/condvar.h 2003-06-07 11:43:33.000000000 -0400 -@@ -0,0 +1,140 @@ -+/* -+ * Copyright (C) 2000 Regents of the University of California -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ * -+ */ -+ -+#if !defined(_LINUX_CONDVAR_H) -+#define _LINUX_CONDVAR_H -+ -+#if defined(__KERNEL__) -+ -+#include -+#include -+ -+#define CV_RET_SIGPENDING 0 -+#define CV_RET_TIMEOUT (-1) -+#define CV_RET_NORMAL 1 -+ -+struct kcondvar_task { -+ struct task_struct *task; /* need to wrap task in this */ -+ struct list_head list; /* to thread as a list */ -+ int blocked; -+}; -+ -+typedef struct { -+ struct list_head task_list; /* list of kcondvar_task's */ -+} kcondvar_t; -+ -+#define kcondvar_wait(c,l,fl) debug_kcondvar_wait(c, l, fl, 0, TASK_UNINTERRUPTIBLE) -+#define kcondvar_waitsig(c,l,fl) debug_kcondvar_wait(c, l, fl, 0, TASK_INTERRUPTIBLE) -+#define kcondvar_timedwait(c,l,fl,to) debug_kcondvar_wait(c, l, fl, to, TASK_UNINTERRUPTIBLE) -+#define kcondvar_timedwaitsig(c,l,fl,to) debug_kcondvar_wait(c, l, fl, to, TASK_INTERRUPTIBLE) -+#define kcondvar_wakeupone(c,l) kcondvar_wakeup(c, l, 0) -+#define kcondvar_wakeupall(c,l) kcondvar_wakeup(c, l, 1) -+ -+extern __inline__ void -+kcondvar_init(kcondvar_t *c) -+{ -+ INIT_LIST_HEAD(&c->task_list); -+} -+ -+extern __inline__ void -+kcondvar_destroy(kcondvar_t *c) -+{ -+ ASSERT(list_empty(&c->task_list)); -+} -+ -+/* -+ * We thread a struct kcondvar_task, allocated on the stack, onto the kcondvar_t's -+ * task_list, and take it off again when we wake up. -+ */ -+extern __inline__ int -+debug_kcondvar_wait(kcondvar_t *c, spinlock_t *l, unsigned long *fl, long tmo, int state) -+{ -+ struct kcondvar_task cvt; -+ int ret = CV_RET_NORMAL; -+ -+ ASSERT(!in_interrupt()); /* we can block */ -+ ASSERT(SPINLOCK_HELD(l)); /* enter holding lock */ -+ -+ cvt.task = current; -+ cvt.blocked = 1; -+ list_add(&cvt.list, &c->task_list); -+ do { -+ /* Note: we avoid using TASK_UNINTERRUPTIBLE here because avenrun() -+ * (linux/kernel/timer.c:calc_load()) -+ * computation treats it like TASK_RUNNABLE hence creates false high -+ * load averages when we create kernel threads. -+ * The cvt.blocked flag distinguishes a signal wakeup from a kcondvar_wakeup. -+ * -+ * However, if we do take a signal we could end up busily spinning here, if -+ * we ignore it (state == TASK_UNINTERRUPTIBLE) so once we see a signal -+ * pending we do sleep TASK_UNINTERRUPTIBLE to stop a busy spin. -+ * I have now blocked all signals for kernel threads to prevent this -+ * happening but other users of kcondvar_wait may still hit this spin. -+ */ -+ set_current_state (signal_pending(current) ? state : TASK_INTERRUPTIBLE); -+ -+ if (fl) -+ spin_unlock_irqrestore(l, *fl); -+ else -+ spin_unlock(l); -+ if (tmo) { -+ if (tmo <= jiffies || !schedule_timeout(tmo - jiffies)) -+ ret = CV_RET_TIMEOUT; -+ } else -+ schedule(); -+ if (fl) -+ spin_lock_irqsave (l, *fl); -+ else -+ spin_lock(l); -+ -+ /* signal_pending - Only exit the loop if the user was waiting TASK_INTERRUPTIBLE */ -+ if ((state == TASK_INTERRUPTIBLE) && signal_pending(current)) -+ ret = CV_RET_SIGPENDING; -+ -+ } while (cvt.blocked && ret == CV_RET_NORMAL); -+ list_del(&cvt.list); -+ -+ /* Reset task state in case we didn't sleep above */ -+ set_current_state (TASK_RUNNING); -+ -+ return ret; /* return holding lock */ -+} -+ -+extern __inline__ void -+kcondvar_wakeup(kcondvar_t *c, spinlock_t *l, int wakeall) -+{ -+ struct list_head *lp; -+ struct kcondvar_task *cvtp; -+ -+ ASSERT(SPINLOCK_HELD(l)); /* already holding lock */ -+ for (lp = c->task_list.next; lp != &c->task_list; lp = lp->next) { -+ cvtp = list_entry(lp, struct kcondvar_task, list); -+ if (cvtp->blocked) { -+ cvtp->blocked = 0; -+ /* wake_up_process added to kernel/ksyms.c */ -+ wake_up_process(cvtp->task); -+ if (!wakeall) -+ break; -+ } -+ } -+} /* return still holding lock */ -+ -+ -+#endif /* __KERNEL__ */ -+#endif /* _LINUX_CONDVAR_H */ -diff -urN clean/include/qsnet/config.h linux-2.6.9/include/qsnet/config.h ---- clean/include/qsnet/config.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/qsnet/config.h 2005-04-28 18:59:31.000000000 -0400 -@@ -0,0 +1,195 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef _QSNET_CONFIG_H -+#define _QSNET_CONFIG_H -+ -+#ident "$Id: config.h,v 1.24 2005/04/28 22:59:31 robin Exp $" -+/* $Source: /cvs/master/quadrics/qsnet/config.h,v $*/ -+ -+ -+/* -+ * QSNET standard defines : -+ * -+ * Target operating system defines -+ * SOLARIS -+ * TRU64UNIX/DIGITAL_UNIX -+ * LINUX -+ * -+ * Target processor defines -+ * SPARC -+ * ALPHA -+ * I386 -+ * IA64 -+ * X86_64 -+ * -+ * Byte order defines -+ * __LITTLE_ENDIAN__ -+ * __BIG_ENDIAN__ -+ * -+ * Data size defines -+ * _LP64 - LP64 - long/pointer is 64 bits -+ * _ILP32 - LP32 - long/pointer is 32 bits -+ * -+ * Elan defines for main processor -+ * __MAIN_LITTLE_ENDIAN__ - main byte order (for thread code) -+ * __MAIN_BIG_ENDIAN__ -+ * _MAIN_LP64 - main long size (for thread code) -+ * _MAIN_ILP32 -+ * -+ * Compiling for kernel (defined in makefile) -+ * _KERNEL -+ * -+ */ -+ -+#if defined(__LP64__) && !defined(_LP64) -+# define _LP64 -+#endif -+ -+#if defined(__arch64__) && !defined(_LP64) && !defined(_ILP32) -+# define _LP64 -+#endif -+ -+#if defined(__alpha__) && !defined(_LP64) && !defined(_ILP32) -+# define _LP64 -+#endif -+ -+#if !defined(__arch64__) && !defined(_ILP32) && !defined(_LP64) -+# define _ILP32 -+#endif -+ -+#if defined(__ELAN__) || defined(__ELAN3__) -+ -+#define __LITTLE_ENDIAN__ -+ -+#if defined(__host_solaris) && defined(__host_sparc) -+#define SOLARIS -+#define SPARC -+#define SOLARIS_SPARC -+#define _MAIN_ILP32 -+#define __MAIN_BIG_ENDIAN__ -+ -+#elif defined(__host_osf) -+#define TRU64UNIX -+#define DIGITAL_UNIX -+#define ALPHA -+#define _MAIN_LP64 -+#define __MAIN_LITTLE_ENDIAN__ -+ -+#elif defined(__host_linux) && defined(__host_alpha) -+#define LINUX -+#define ALPHA -+#define LINUX_ALPHA -+#define _MAIN_LP64 -+#define __MAIN_LITTLE_ENDIAN__ -+ -+#elif defined(__host_linux) && defined(__host_sparc) -+#define LINUX -+#define SPARC -+#define LINUX_SPARC -+#define __MAIN_BIG_ENDIAN__ -+#ifdef __KERNEL__ -+# define _MAIN_LP64 -+#else -+# define _MAIN_ILP32 -+#endif -+ -+#elif defined(__host_linux) && defined(__host_i386) -+#define LINUX -+#define I386 -+#define LINUX_I386 -+#define _MAIN_ILP32 -+#define __MAIN_LITTLE_ENDIAN__ -+ -+#elif defined(__host_linux) && defined(__host_ia64) -+#define LINUX -+#define IA64 -+#define LINUX_IA64 -+#define _MAIN_LP64 -+#define __MAIN_LITTLE_ENDIAN__ -+ -+#elif defined(__host_linux) && defined(__host_x86_64) -+#define LINUX -+#define X86_64 -+#define LINUX_X86_64 -+#define _MAIN_LP64 -+#define __MAIN_LITTLE_ENDIAN__ -+ -+#else -+#error Cannot determine operating system/processor architecture. -+#endif -+ -+#else /* !defined(__ELAN3__) */ -+ -+#if (defined(sun) || defined(__sun)) && defined(sparc) && !defined(__sparcv9) /* Sun Solaris 5.6 */ -+#define SOLARIS -+#define SPARC -+#define SOLARIS_SPARC -+#ifndef __BIG_ENDIAN__ -+#define __BIG_ENDIAN__ -+#endif -+ -+#elif (defined(sun) || defined(__sun)) && defined(sparc) && defined(__sparcv9) /* Sun Solaris 5.7 */ -+#define SOLARIS -+#define SPARC -+#define SOLARIS_SPARC -+#define __BIG_ENDIAN__ -+ -+#elif defined(__osf__) && defined(__alpha) /* Digital Unix */ -+#define TRU64UNIX -+#define DIGITAL_UNIX -+#define ALPHA -+#define __LITTLE_ENDIAN__ -+ -+#elif (defined(linux) || defined(__linux__)) && defined(__alpha) /* Linux Alpha */ -+ -+#define LINUX -+#define ALPHA -+#define LINUX_ALPHA -+#define __LITTLE_ENDIAN__ -+ -+#elif (defined(linux) || defined(__linux__)) && defined(__sparc) /* Linux Sparc */ -+ -+#define LINUX -+#define SPARC -+#define LINUX_SPARC -+#define __BIG_ENDIAN__ -+ -+#elif (defined(linux) || defined(__linux__)) && defined(__i386) /* Linux i386 */ -+ -+#define LINUX -+#define I386 -+#define LINUX_I386 -+#define __LITTLE_ENDIAN__ -+ -+#elif (defined(linux) || defined(__linux__)) && defined(__ia64) /* Linux ia64 */ -+ -+#define LINUX -+#define IA64 -+#define LINUX_IA64 -+#define __LITTLE_ENDIAN__ -+ -+#elif (defined(linux) || defined(__linux__)) && (defined(__x86_64) || defined(__x86_64__)) /* Linux x86_64 */ -+ -+#define LINUX -+#define X86_64 -+#define LINUX_X86_64 -+#define __LITTLE_ENDIAN__ -+ -+#elif defined(__QNXNTO__) -+#define QNX -+#define I386 -+#define __LITTLE_ENDIAN__ -+#else -+#error Cannot determine operating system/processor architecture. -+#endif -+ -+#endif -+ -+#include -+ -+#endif /* _QSNET_CONFIG_H */ -diff -urN clean/include/qsnet/crwlock.h linux-2.6.9/include/qsnet/crwlock.h ---- clean/include/qsnet/crwlock.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/qsnet/crwlock.h 2003-09-24 10:07:02.000000000 -0400 -@@ -0,0 +1,207 @@ -+/* -+ * Copyright (C) 2000 Regents of the University of California -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ * -+ */ -+ -+/* -+ * Complex - Reader/Writer locks -+ * Ref: "UNIX Systems for Modern Architectures", by Curt Schimmel, -+ * sec 11.6.3. -+ * -+ * This implementation is based on semaphores and may not be called from -+ * interrupt handlers. -+ * -+ */ -+ -+#if !defined(_LINUX_RWLOCK_H) -+#define _LINUX_RWLOCK_H -+ -+#if defined(__KERNEL__) -+ -+typedef enum { RD, WRT, ANY } crwlock_type_t; -+ -+#define crwlock_write_held(l) debug_crwlock_held(l, WRT, __BASE_FILE__,__LINE__) -+#define crwlock_read_held(l) debug_crwlock_held(l, RD, __BASE_FILE__, __LINE__) -+#define crwlock_held(l) debug_crwlock_held(l, ANY, __BASE_FILE__, __LINE__) -+ -+#define crwlock_read(l) debug_crwlock_read(l, __BASE_FILE__, __LINE__) -+#define crwlock_write(l) debug_crwlock_write(l, __BASE_FILE__, __LINE__) -+#define crwlock_done(l) debug_crwlock_done(l, __BASE_FILE__, __LINE__) -+ -+#if defined(DEBUG_RWLOCK) && defined(__alpha__) && !defined(DEBUG_SPINLOCK) -+#define DEBUG_SPINLOCK -+#endif -+ -+#include -+#include -+#include -+#include -+#include -+ -+#if !defined(DEBUG_SPINLOCK) -+#define debug_spin_lock(lock, file, line) spin_lock(lock) -+#endif -+ -+typedef struct { -+ spinlock_t m_lock; /* protects cnt fields below */ -+ int m_rdcnt; /* # of rdrs in crit section */ -+ int m_wrcnt; /* # of wrtrs in crit section */ -+ int m_rdwcnt; /* # of waiting readers */ -+ int m_wrwcnt; /* # of waiting writers */ -+ struct semaphore m_rdwait; /* sema where readers wait */ -+ struct semaphore m_wrwait; /* sema where writers wait */ -+ pid_t m_wrholder; /* task holding write lock */ -+} crwlock_t; -+ -+extern __inline__ void -+crwlock_init(crwlock_t *l) -+{ -+ l->m_lock = SPIN_LOCK_UNLOCKED; -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0) -+ l->m_rdwait = MUTEX_LOCKED; -+ l->m_wrwait = MUTEX_LOCKED; -+#else -+ sema_init(&l->m_rdwait,0); -+ sema_init(&l->m_wrwait,0); -+#endif -+ l->m_rdcnt = l->m_wrcnt = l->m_rdwcnt = l->m_wrwcnt = 0; -+ l->m_wrholder = PID_NONE; -+} -+ -+extern __inline__ void -+crwlock_destroy(crwlock_t *l) -+{ -+ ASSERT(l->m_rdcnt == 0 && l->m_wrcnt == 0); -+} -+ -+/* -+ * If a writer has the lock presently or there are writers waiting, -+ * then we have to wait. -+ */ -+extern __inline__ void -+debug_crwlock_read(crwlock_t *l, char *file, int line) -+{ -+ ASSERT(!in_interrupt()); -+ spin_lock(&l->m_lock); -+ if (l->m_wrcnt || l->m_wrwcnt) { -+ l->m_rdwcnt++; -+ spin_unlock(&l->m_lock); -+ down(&l->m_rdwait); /* P */ -+ } else { -+ l->m_rdcnt++; -+ spin_unlock(&l->m_lock); -+ } -+} -+ -+/* -+ * If we're the last reader, and a writer is waiting, -+ * then let the writer go now. -+ */ -+/* private */ -+extern __inline__ void -+debug_crwlock_read_done(crwlock_t *l, char *file, int line) -+{ -+ spin_lock(&l->m_lock); -+ l->m_rdcnt--; -+ if (l->m_wrwcnt && l->m_rdcnt == 0) { -+ l->m_wrcnt = 1; -+ l->m_wrwcnt--; -+ spin_unlock(&l->m_lock); -+ up(&l->m_wrwait); /* V */ -+ return; -+ } -+ spin_unlock(&l->m_lock); -+} -+ -+extern __inline__ void -+debug_crwlock_write(crwlock_t *l, char *file, int line) -+{ -+ ASSERT(!in_interrupt()); -+ spin_lock(&l->m_lock); -+ if (l->m_wrcnt || l->m_rdcnt) { /* block if lock is in use */ -+ l->m_wrwcnt++; -+ spin_unlock(&l->m_lock); -+ down(&l->m_wrwait); /* P */ -+ } else { /* lock is not in use */ -+ l->m_wrcnt = 1; -+ spin_unlock(&l->m_lock); -+ } -+ l->m_wrholder = current->pid; -+} -+ -+/* private */ -+extern __inline__ void -+debug_crwlock_write_done(crwlock_t *l, char *file, int line) -+{ -+ int rdrs; -+ -+ spin_lock(&l->m_lock); -+ l->m_wrholder = PID_NONE; -+ if (l->m_rdwcnt) { /* let any readers go first */ -+ l->m_wrcnt = 0; -+ rdrs = l->m_rdwcnt; -+ l->m_rdcnt = rdrs; -+ l->m_rdwcnt = 0; -+ spin_unlock(&l->m_lock); -+ while (rdrs--) -+ up(&l->m_rdwait); /* V */ -+ } else if (l->m_wrwcnt) { /* or let any writer go */ -+ l->m_wrwcnt--; -+ spin_unlock(&l->m_lock); -+ up(&l->m_wrwait); /* V */ -+ } else { /* nobody waiting, unlock */ -+ l->m_wrcnt = 0; -+ spin_unlock(&l->m_lock); -+ } -+} -+ -+extern __inline__ void -+debug_crwlock_done(crwlock_t *l, char *file, int line) -+{ -+ if (l->m_wrholder == current->pid) -+ debug_crwlock_write_done(l, file, line); -+ else -+ debug_crwlock_read_done(l, file, line); -+} -+ -+/* -+ * Return nonzero if lock is held -+ */ -+extern __inline__ int -+debug_crwlock_held(crwlock_t *l, crwlock_type_t t, char *file, int line) -+{ -+ int res; -+ -+ spin_lock(&l->m_lock); -+ switch(t) { -+ case RD: -+ res = l->m_rdcnt; -+ break; -+ case WRT: -+ res = l->m_wrcnt; -+ break; -+ case ANY: -+ res = l->m_wrcnt + l->m_rdcnt; -+ break; -+ } -+ spin_unlock(&l->m_lock); -+ -+ return res; -+} -+ -+#endif /* __KERNEL__ */ -+#endif /* _LINUX_RWLOCK_H */ -diff -urN clean/include/qsnet/ctrl_linux.h linux-2.6.9/include/qsnet/ctrl_linux.h ---- clean/include/qsnet/ctrl_linux.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/qsnet/ctrl_linux.h 2003-03-26 04:32:03.000000000 -0500 -@@ -0,0 +1,37 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __QSNET_CTRL_LINUX_H -+#define __QSNET_CTRL_LINUX_H -+ -+#ident "$Id: ctrl_linux.h,v 1.3 2003/03/26 09:32:03 mike Exp $" -+/* $Source: /cvs/master/quadrics/qsnet/ctrl_linux.h,v $*/ -+ -+#define QSNETIO_USER_BASE 0x40 -+ -+#define QSNETIO_DEBUG_DUMP _IO ('e', QSNETIO_USER_BASE + 0) -+ -+typedef struct qsnetio_debug_buffer_struct -+{ -+ caddr_t addr; -+ size_t len; -+} QSNETIO_DEBUG_BUFFER_STRUCT; -+#define QSNETIO_DEBUG_BUFFER _IOWR ('e', QSNETIO_USER_BASE + 1, QSNETIO_DEBUG_BUFFER_STRUCT) -+ -+typedef struct qsnetio_debug_kmem_struct -+{ -+ void *handle; -+} QSNETIO_DEBUG_KMEM_STRUCT; -+#define QSNETIO_DEBUG_KMEM _IOWR ('e', QSNETIO_USER_BASE + 2, QSNETIO_DEBUG_KMEM_STRUCT) -+ -+#endif /* __QSNET_CTRL_LINUX_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "linux" -+ * End: -+ */ -diff -urN clean/include/qsnet/debug.h linux-2.6.9/include/qsnet/debug.h ---- clean/include/qsnet/debug.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/qsnet/debug.h 2005-03-23 06:04:54.000000000 -0500 -@@ -0,0 +1,68 @@ -+/* -+ * Copyright (C) 2000 Regents of the University of California -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ * -+ */ -+#ifndef _QSNET_DEBUG_H -+#define _QSNET_DEBUG_H -+ -+#if defined(DIGITAL_UNIX) -+#include -+#elif defined(LINUX) -+extern int qsnet_assfail (char *ex, const char *func, char *file, int line); -+ -+#define ASSERT(EX) do { \ -+ if (!(EX) && qsnet_assfail (#EX, __FUNCTION__, __BASE_FILE__, __LINE__)) { \ -+ BUG(); \ -+ } \ -+} while (0) -+#endif /* DIGITAL_UNIX */ -+ -+/* debug.c */ -+extern void qsnet_debug_init(void); -+extern void qsnet_debug_fini(void); -+extern void qsnet_debug_disable(int); -+extern void qsnet_debug_alloc(void); -+ -+#define QSNET_DEBUG_BUFFER ((unsigned int)(0x01)) -+#define QSNET_DEBUG_CONSOLE ((unsigned int)(0x02)) -+#define QSNET_DEBUG_BUF_CON ( QSNET_DEBUG_BUFFER | QSNET_DEBUG_CONSOLE ) -+ -+#ifdef __GNUC__ -+extern void qsnet_debugf (unsigned int mode, const char *fmt, ...) -+ __attribute__ ((format (printf,2,3))); -+extern void kqsnet_debugf (char *fmt, ...) -+ __attribute__ ((format (printf,1,2))); -+#else -+extern void qsnet_debugf (unsigned int mode, const char *fmt, ...); -+extern void kqsnet_debugf (char *fmt, ...); -+#endif -+extern void qsnet_vdebugf (unsigned int mode, const char *prefix, const char *fmt, va_list ap); -+extern int qsnet_debug_buffer(caddr_t ubuffer, int len); -+extern int qsnet_debug_dump (void); -+extern int qsnet_debug_kmem (void *handle); -+ -+extern void qsnet_debug_buffer_on(void); -+extern void qsnet_debug_buffer_clear(void); -+extern void qsnet_debug_buffer_mark(char *str); -+ -+#endif /* _QSNET_DEBUG_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "linux" -+ * End: -+ */ -diff -urN clean/include/qsnet/kcompat.h linux-2.6.9/include/qsnet/kcompat.h ---- clean/include/qsnet/kcompat.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/qsnet/kcompat.h 2005-07-05 11:09:03.000000000 -0400 -@@ -0,0 +1,27 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __QSNET_KCOMPAT_H -+#define __QSNET_KCOMPAT_H -+ -+#ident "$Id: kcompat.h,v 1.1.2.1 2005/07/05 15:09:03 mike Exp $" -+/* $Source: /cvs/master/quadrics/qsnet/kcompat.h,v $*/ -+ -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) -+ -+#define module_param(VAR,TYPE,PERM) MODULE_PARM(VAR,"i") -+ -+#endif /* KERNEL_VERSION(2,6,0) */ -+ -+#endif /* __QSNET_KCOMPAT_H */ -+ -+ -+ -+ -+ -+ -+ -diff -urN clean/include/qsnet/kernel.h linux-2.6.9/include/qsnet/kernel.h ---- clean/include/qsnet/kernel.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/qsnet/kernel.h 2005-07-20 07:35:37.000000000 -0400 -@@ -0,0 +1,39 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __QSNET_KERNEL_H -+#define __QSNET_KERNEL_H -+ -+#ident "$Id: kernel.h,v 1.8.18.1 2005/07/20 11:35:37 mike Exp $" -+/* $Source: /cvs/master/quadrics/qsnet/kernel.h,v $*/ -+ -+#include -+#include -+ -+#if defined(SOLARIS) -+#include -+#endif -+ -+#if defined(DIGITAL_UNIX) -+#include -+#endif -+ -+#if defined(LINUX) -+#include -+#endif -+ -+#include -+#include -+ -+#endif /* __QSNET_KERNEL_H */ -+ -+ -+ -+ -+ -+ -+ -diff -urN clean/include/qsnet/kernel_linux.h linux-2.6.9/include/qsnet/kernel_linux.h ---- clean/include/qsnet/kernel_linux.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/qsnet/kernel_linux.h 2005-09-07 10:35:03.000000000 -0400 -@@ -0,0 +1,374 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __QSNET_KERNEL_LINUX_H -+#define __QSNET_KERNEL_LINUX_H -+ -+#ident "$Id: kernel_linux.h,v 1.69.2.3 2005/09/07 14:35:03 mike Exp $" -+/* $Source: /cvs/master/quadrics/qsnet/kernel_linux.h,v $*/ -+ -+#include -+#if defined(MODVERSIONS) -+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0) -+#include -+#else -+#include -+#endif -+#endif -+ -+#include -+#include -+ -+/* ASSERT(spin_is_locked(l)) would always fail on UP kernels */ -+#if defined(CONFIG_SMP) -+#define SPINLOCK_HELD(l) spin_is_locked(l) -+#else -+#define SPINLOCK_HELD(l) (1) -+#endif -+ -+#include -+#include -+ -+#include -+#include -+ -+#include -+#include -+#include -+#include -+ -+#include -+ -+#include -+#include -+#include -+ -+#if defined(LINUX_ALPHA) -+# include /* for TSUNAMI_MEM */ -+#endif -+ -+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0) -+# undef MOD_INC_USE_COUNT -+# undef MOD_DEC_USE_COUNT -+# define MOD_INC_USE_COUNT -+# define MOD_DEC_USE_COUNT -+#endif -+ -+#define MIN(a,b) ((a) > (b) ? (b) : (a)) -+#define MAX(a,b) ((a) > (b) ? (a) : (b)) -+ -+/* stray types */ -+typedef u64 u_longlong_t; -+typedef unsigned long uintptr_t; -+typedef int bool_t; -+ -+typedef unsigned long virtaddr_t; /* virtual address */ -+typedef unsigned long ioaddr_t; /* io address */ -+typedef unsigned long sdramaddr_t; /* elan sdram offset */ -+ -+/* 386 kernel can be compiled with PAE enabled to use a 44 bit physical address */ -+#if defined(CONFIG_X86_PAE) -+typedef unsigned long long physaddr_t; -+#else -+typedef unsigned long physaddr_t; -+#endif -+ -+/* ticks since reboot, and tick freq */ -+#define lbolt jiffies -+#define hz HZ -+ -+/* System page size and friends */ -+#define PAGESIZE PAGE_SIZE -+#define PAGESHIFT PAGE_SHIFT -+#define PAGEOFFSET (PAGE_SIZE - 1) -+#define PAGEMASK PAGE_MASK -+ -+#define PAGE_ALIGNED(a) (((a) & PAGE_MASK) == a) -+ -+/* convert between bytes and pages */ -+#define btop(b) ((unsigned long)(b) >> PAGE_SHIFT) /* rnd down */ -+#define btopr(b) btop(PAGE_ALIGN((unsigned long) b)) /* rnd up */ -+#define ptob(p) ((unsigned long)(p) << PAGE_SHIFT) -+ -+/* round up sz to the nearest multiple of blk */ -+#define roundup(sz,blk) ((blk) * ((sz) / (blk) + ((sz) % (blk) ? 1 : 0))) -+ -+/* send a signal to a process */ -+#define psignal(pr,sig) send_sig(sig,pr,0) -+ -+/* microsecond delay */ -+#define DELAY(us) udelay(us) -+ -+/* macro macros */ -+#define MACRO_BEGIN do { -+#define MACRO_END } while (0) -+ -+/* D-Unix compatable errno values */ -+#define ESUCCESS 0 -+#define EFAIL 255 -+ -+/* ASSERT(NO_LOCKS_HELD) will be a no-op */ -+#define NO_LOCKS_HELD 1 -+ -+/* misc */ -+typedef int label_t; -+#define on_fault(ljp) ((ljp) == NULL) -+#define _NOTE(X) -+#define no_fault() ((void) 0) -+#define panicstr 0 -+ -+/* return from system call is -EXXX on linux */ -+#define set_errno(e) (-(e)) -+ -+/* -+ * BSD-style byte ops -+ */ -+ -+#define bcmp(src1,src2,len) memcmp(src1,src2,len) -+#define bzero(dst,len) memset(dst,0,len) -+#define bcopy(src,dst,len) memcpy(dst,src,len) -+ -+#define preemptable_start do { long must_yield_at = lbolt + (hz/10); -+#define preemptable_end } while (0) -+#define preemptable_check() do {\ -+ if ((lbolt - must_yield_at) > 0)\ -+ {\ -+ preemptable_yield() ; \ -+ must_yield_at = lbolt + (hz/10);\ -+ }\ -+ } while (0) -+ -+#define preemptable_yield() schedule() -+ -+#define CURPROC() current -+#define CURTHREAD() current -+#define SUSER() suser() -+ -+/* 64 bit IO operations on 32 bit intel cpus using MMX */ -+#if defined(LINUX_I386) -+extern u64 qsnet_readq (volatile u64 *ptr); -+extern void qsnet_writeq (u64 value, volatile u64 *ptr); -+ -+#define readq(ptr) qsnet_readq((void *) ptr) -+#define writeq(val,ptr) qsnet_writeq(val, (void *)ptr) -+#endif -+ -+/* -+ * Memory barriers -+ */ -+#ifndef mmiob -+# define mmiob() mb() -+#endif -+ -+/* -+ * Exit handlers -+ */ -+#define HANDLER_REGISTER(func,arg,flags) xa_handler_register(func,arg,flags) -+#define HANDLER_UNREGISTER(func,arg,flags) xa_handler_unregister(func,arg,flags) -+ -+/* -+ * KMEM_GETPAGES and KMEM_ALLOC both call kmem_alloc, which -+ * translates the call to kmalloc if < PAGE_SIZE, or vmalloc -+ * if >= PAGE_SIZE. vmalloc will always return a page-aligned -+ * region rounded up to the nearest page, while kmalloc will -+ * return bits and pieces of a page. -+ */ -+ -+#ifdef KMEM_DEBUG -+extern void *qsnet_kmem_alloc_debug(int len, int sleep, int zerofill, char *file, int line); -+extern void qsnet_kmem_free_debug(void *ptr, int len, char *file, int line); -+#define KMEM_ALLOC(ptr,type,len,sleep) \ -+ { KMEM_ASSERT(sleep); (ptr)=(type)qsnet_kmem_alloc_debug(len,sleep,0,__FILE__,__LINE__); } -+#define KMEM_ZALLOC(ptr,type,len,sleep) \ -+ { KMEM_ASSERT(sleep); (ptr)=(type)qsnet_kmem_alloc_debug(len,sleep,1,__FILE__,__LINE__); } -+ -+#define KMEM_FREE(ptr,len) qsnet_kmem_free_debug((void *)ptr,len,__FILE__,__LINE__) -+ -+#else -+ -+extern void *qsnet_kmem_alloc(int len, int sleep, int zerofill); -+extern void qsnet_kmem_free(void *ptr, int len); -+ -+#define KMEM_ALLOC(ptr,type,len,sleep) \ -+ { KMEM_ASSERT(sleep); (ptr)=(type)qsnet_kmem_alloc(len,sleep,0); } -+#define KMEM_ZALLOC(ptr,type,len,sleep) \ -+ { KMEM_ASSERT(sleep); (ptr)=(type)qsnet_kmem_alloc(len,sleep,1); } -+ -+#define KMEM_FREE(ptr,len) qsnet_kmem_free((void *)ptr,len) -+ -+#endif -+extern void qsnet_kmem_display(void *handle); -+extern physaddr_t kmem_to_phys(void *ptr); -+ -+#define KMEM_ASSERT(sleep) ASSERT(!(in_interrupt() && sleep)) -+ -+ -+#define KMEM_GETPAGES(ptr,type,pgs,sleep) KMEM_ZALLOC(ptr,type,ptob(pgs),sleep) -+#define KMEM_FREEPAGES(ptr,pgs) KMEM_FREE(ptr,ptob(pgs)); -+ -+/* -+ * Copying from user space -> kernel space (perms checked) -+ */ -+#define copyin(up,kp,size) copy_from_user(kp,up,size) -+#define copyin_noerr(up,kp,size) copy_from_user(kp,up,size) -+ -+/* get_user() gets xfer width right */ -+#define fulinux(ret, up) (get_user(ret, (up)) == 0 ? ret : -1) -+#define fulinuxp(ret, up) (get_user(ret, (up)) == 0 ? ret : NULL) -+ -+extern __inline__ int fubyte (u8 *up) { u8 ret; return fulinux(ret, up);} -+extern __inline__ int fusword (u16 *up) { u16 ret; return fulinux(ret, up);} -+extern __inline__ int fuword (u32 *up) { u32 ret; return fulinux(ret, up);} -+#if BITS_PER_LONG > 32 -+extern __inline__ u64 fulonglong(u64 *up) { u64 ret; return fulinux(ret, up);} -+#else -+extern __inline__ u64 fulonglong(u64 *up) { return ((u64) fuword((u32 *)up) | (((u64) fuword(((u32 *)up)+1))<<32)); } -+#endif -+extern __inline__ void *fuptr (void **up) { void *ret; return fulinuxp(ret,up);} -+ -+#define fubyte_noerr(up) fubyte(up) -+#define fusword_noerr(up) fusword(up) -+#define fuword_noerr(up) fuword(up) -+#define fulonglong_noerr(up) fulonglong(up) -+#define fuptr_noerr(up) fuptr(up) -+ -+extern __inline__ int copyinstr(char *up, char *kp, int max, int *size) -+{ -+ for (*size = 1; *size <= max; (*size)++) { -+ if (get_user(*kp, up++) != 0) -+ return EFAULT; /* bad user space addr */ -+ if (*kp++ == '\0') -+ return 0; /* success */ -+ } -+ *size = max; -+ return ENAMETOOLONG; /* runaway string */ -+} -+ -+/* -+ * Copying from kernel space -> user space (perms checked) -+ */ -+ -+#define copyout(kp,up,size) copy_to_user(up,kp,size) -+#define copyout_noerr(kp,up,size) copy_to_user(up,kp,size) -+ -+/* put_user() gets xfer width right */ -+#define sulinux(val, up) (put_user(val, (up)) == 0 ? 0 : -1) -+ -+extern __inline__ int subyte (u8 *up, u8 val) { return sulinux(val, up); } -+extern __inline__ int susword (u16 *up, u16 val) { return sulinux(val, up); } -+extern __inline__ int suword (u32 *up, u32 val) { return sulinux(val, up); } -+#if BITS_PER_LONG > 32 -+extern __inline__ int sulonglong(u64 *up, u64 val) { return sulinux(val, up); } -+#else -+extern __inline__ int sulonglong(u64 *up, u64 val) { return (suword((u32 *) up, (u32) val) == 0 ? -+ suword(((u32 *) up)+1, (u32) (val >> 32)) : -1); } -+#endif -+extern __inline__ int suptr (void **up,void *val){ return sulinux(val, up); } -+ -+#define subyte_noerr(up,val) subyte(up,val) -+#define susword_noerr(up,val) susword(up,val) -+#define suword_noerr(up,val) suword(up,val) -+#define sulonglong_noerr(up,val) sulonglong(up,val) -+#define suptr_noerr(up,val) suptr(up,val) -+ -+/* -+ * /proc/qsnet interface -+ */ -+extern inline int -+str_append(char *buf, char *add, int size) -+{ -+#define TRUNC_MSG "[Output truncated]\n" -+ int full = 0; -+ int max = size - strlen(TRUNC_MSG) - strlen(add) - 1; -+ -+ if (strlen(buf) > max) { -+ strcat(buf, TRUNC_MSG); -+ full = 1; -+ } else -+ strcat(buf, add); -+ return full; -+} -+ -+/* Spinlocks */ -+#define spin_lock_destroy(l) ((void) 0) -+ -+/* Complex - Reader/Writer locks - we added */ -+typedef crwlock_t krwlock_t; -+#define krwlock_init(l) crwlock_init(l) -+#define krwlock_destroy(l) crwlock_destroy(l) -+#define krwlock_write(l) crwlock_write(l) -+#define krwlock_read(l) crwlock_read(l) -+#define krwlock_done(l) crwlock_done(l) -+#define krwlock_is_locked(l) crwlock_held(l) -+#define krwlock_is_write_locked(l) crwlock_write_held(l) -+#define krwlock_is_read_locked(l) crwlock_read_held(l) -+ -+/* -+ * Timeouts - Solaris style. -+ */ -+typedef struct timer_list timer_fn_t; -+ -+extern inline void -+schedule_timer_fn(timer_fn_t *timer, void (*fun)(void *), void *arg, long hz_delay) -+{ -+ init_timer(timer); -+ -+ timer->function = (void (*)(unsigned long)) fun; -+ timer->data = (unsigned long) arg; -+ timer->expires = jiffies + hz_delay; -+ -+ add_timer(timer); -+} -+ -+/* returns 1 if timer_fn was cancelled */ -+extern inline int -+cancel_timer_fn(timer_fn_t *timer) -+{ -+ return (del_timer_sync(timer)); -+} -+ -+extern inline int -+timer_fn_queued(timer_fn_t *timer) -+{ -+ return (timer_pending (timer)); -+} -+/* -+ * Hold/release CPU's. -+ */ -+ -+extern void cpu_hold_all(void); -+extern void cpu_release_all(void); -+#define CAPTURE_CPUS() cpu_hold_all() -+#define RELEASE_CPUS() cpu_release_all() -+ -+#define IASSERT ASSERT -+ -+/* code to support multipage procfs entries */ -+ -+typedef struct display_info { -+ void (*func)(long, char *, ...); -+ long arg; -+} DisplayInfo; -+ -+typedef struct qsnet_proc_private -+{ -+ struct nodeset_private *pr_next; -+ void *pr_user_data; -+ char *pr_data; -+ int pr_data_len; -+ unsigned pr_off; -+ unsigned pr_len; -+ DisplayInfo pr_di; -+} QSNET_PROC_PRIVATE; -+ -+#endif /* __QSNET_KERNEL_LINUX_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "linux" -+ * End: -+ */ -diff -urN clean/include/qsnet/kpte.h linux-2.6.9/include/qsnet/kpte.h ---- clean/include/qsnet/kpte.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/qsnet/kpte.h 2005-03-18 08:56:40.000000000 -0500 -@@ -0,0 +1,132 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2004 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __QSNET_KPTE_H -+#define __QSNET_KPTE_H -+ -+#ident "@(#)$Id: kpte.h,v 1.5 2005/03/18 13:56:40 david Exp $ $Name: QSNETMODULES-5-11-3_20050907 $" -+/* $Source: /cvs/master/quadrics/qsnet/kpte.h,v $*/ -+ -+#include -+ -+#ifdef NO_RMAP -+# define pte_offset_kernel pte_offset -+# define pte_offset_map pte_offset -+# define pte_unmap(A) do { ; } while (0) -+#endif -+ -+/* -+ * Pte stuff -+ */ -+static __inline__ struct mm_struct * -+get_kern_mm(void) -+{ -+ return &init_mm; -+} -+ -+static __inline__ pte_t * -+find_pte_map(struct mm_struct *mm, unsigned long vaddr) -+{ -+ pgd_t *pgd; -+ pmd_t *pmd; -+ pte_t *ptep; -+ -+/* XXXX - need to handle huge tlb code */ -+ pgd = pgd_offset(mm, vaddr); -+ if (pgd_none(*pgd) || pgd_bad(*pgd)) -+ goto out; -+ -+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,10) -+ { -+ pud_t *pud = pud_offset(pgd, vaddr); -+ if (pud_none(*pud) || pud_bad(*pud)) -+ goto out; -+ -+ pmd = pmd_offset(pud, vaddr); -+ } -+#else -+ pmd = pmd_offset(pgd, vaddr); -+#endif -+ if (pmd_none(*pmd) || pmd_bad (*pmd)) -+ goto out; -+ -+ ptep = pte_offset_map (pmd, vaddr); -+ if (! ptep) -+ goto out; -+ -+ if (pte_present (*ptep)) -+ return ptep; -+ -+ pte_unmap (ptep); -+out: -+ return NULL; -+} -+ -+static __inline__ pte_t * -+find_pte_kernel(unsigned long vaddr) -+{ -+ pgd_t *pgd; -+ pmd_t *pmd; -+ pte_t *pte; -+ -+ pgd = pgd_offset_k(vaddr); -+ if (pgd && !pgd_none(*pgd)) { -+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,10) -+ pud_t *pud = pud_offset(pgd, vaddr); -+ if (pud && !pud_none(*pud)) { -+ pmd = pmd_offset(pud, vaddr); -+#else -+ pmd = pmd_offset(pgd, vaddr); -+#endif -+ if (pmd && pmd_present(*pmd)) { -+ pte = pte_offset_kernel(pmd, vaddr); -+ if (pte && pte_present(*pte)) -+ return (pte); -+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,10) -+ } -+#endif -+ } -+ } -+ return (NULL); -+} -+ -+static __inline__ physaddr_t -+pte_phys(pte_t pte) -+{ -+#if defined(LINUX_ALPHA) -+ /* RedHat 7.1 2.4.3-12 -+ * They have now enabled Monster windows on Tsunami -+ * and so can use the Main's phys pte value -+ */ -+ return (pte_val(pte) >> (32-PAGE_SHIFT)); -+#elif defined(LINUX_I386) || defined(LINUX_X86_64) -+#if defined(_PAGE_NX) -+ return (pte_val(pte) & ~((1 << PAGE_SHIFT)-1) & ~_PAGE_NX); -+#else -+ return (pte_val(pte) & ~((1 << PAGE_SHIFT)-1)); -+#endif -+#elif defined(LINUX_SPARC) -+ return (pte_val(pte) & _PAGE_PADDR); -+#elif defined(LINUX_IA64) -+ return (pte_val(pte) & _PFN_MASK); -+#else -+#error Unknown architecture -+#endif -+} -+ -+#ifndef page_to_pfn -+#define page_to_pfn(page) (pte_phys(mk_pte(page, __pgprot(0))) >> PAGE_SHIFT) -+#endif -+ -+#endif /* __QSNET_KPTE_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "stroustrup" -+ * End: -+ */ -diff -urN clean/include/qsnet/kthread.h linux-2.6.9/include/qsnet/kthread.h ---- clean/include/qsnet/kthread.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/qsnet/kthread.h 2004-10-28 07:50:29.000000000 -0400 -@@ -0,0 +1,71 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * Copyright (c) 2002-2004 by Quadrics Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __QSNET_KTHREAD_H -+#define __QSNET_KTHREAD_H -+ -+#ident "@(#)$Id: kthread.h,v 1.1 2004/10/28 11:50:29 david Exp $ $Name: QSNETMODULES-5-11-3_20050907 $" -+/* $Source: /cvs/master/quadrics/qsnet/kthread.h,v $*/ -+ -+#include -+ -+/* -+ * kernel threads -+ */ -+extern __inline__ void -+kernel_thread_init(char *comm) -+{ -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) -+#ifndef NO_NPTL -+# define sigmask_lock sighand->siglock -+#endif -+ lock_kernel(); -+ daemonize(); -+ reparent_to_init(); -+ -+ /* avoid getting signals */ -+ spin_lock_irq(¤t->sigmask_lock); -+ flush_signals(current); -+ sigfillset(¤t->blocked); -+ -+#ifdef NO_NPTL -+ recalc_sigpending(current); -+#else -+ recalc_sigpending(); -+#endif -+ -+ spin_unlock_irq(¤t->sigmask_lock); -+ -+ /* set our name for identification purposes */ -+ strncpy(current->comm, comm, sizeof(current->comm)); -+ -+ unlock_kernel(); -+#else -+ daemonize(comm); -+#endif -+} -+ -+extern __inline__ void * -+kernel_thread_wrap(caddr_t stk, int stksize, void (*proc)(void *), void *arg) -+{ -+ ASSERT(stk == NULL && stksize == 0); -+ kernel_thread((int (*)(void *))proc, arg, CLONE_FS | CLONE_FILES | CLONE_SIGHAND); -+ return (void *)1; /* non-null value */ -+} -+ -+#define kernel_thread_create(proc,arg) kernel_thread_wrap(NULL,0,(void (*)(void *))proc,arg) -+#define kernel_thread_exit() ((void) 0) -+#define kernel_thread_become_highpri() ((void) 0) -+ -+#endif /* __QSNET_KTHREAD_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "linux" -+ * End: -+ */ -diff -urN clean/include/qsnet/list.h linux-2.6.9/include/qsnet/list.h ---- clean/include/qsnet/list.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/qsnet/list.h 2003-10-27 08:55:33.000000000 -0500 -@@ -0,0 +1,80 @@ -+/* -+ * Copyright (c) 2003 by Quadrics Limited. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: list.h,v 1.5 2003/10/27 13:55:33 david Exp $" -+/* $Source: /cvs/master/quadrics/qsnet/list.h,v $*/ -+ -+#ifndef __QSNET_LIST_H -+#define __QSNET_LIST_H -+ -+/* Implementation of doubly linked lists - compatible with linux */ -+struct list_head -+{ -+ struct list_head *next; -+ struct list_head *prev; -+}; -+ -+#if !defined(LINUX) -+#if ! defined( offsetof ) -+#define offsetof(T,F) ((int )&(((T *)0)->F)) -+#endif -+ -+#define LIST_HEAD_INIT(name) { &(name), &(name) } -+ -+#define LIST_HEAD(name) \ -+ struct list_head name = LIST_HEAD_INIT(name) -+#endif -+ -+#define list_entry(ptr, type, off) \ -+ ((type *) ((unsigned long)(ptr) - offsetof (type,off))) -+ -+#define INIT_LIST_HEAD(list) \ -+MACRO_BEGIN \ -+ (list)->next = (list)->prev = (list); \ -+MACRO_END -+ -+#define list_add(new, list) \ -+MACRO_BEGIN \ -+ (list)->next->prev = (new); \ -+ (new)->next = (list)->next; \ -+ (new)->prev = (list); \ -+ (list)->next = (new); \ -+MACRO_END -+ -+#define list_add_tail(new, list) \ -+MACRO_BEGIN \ -+ (list)->prev->next = new; \ -+ (new)->prev = (list)->prev; \ -+ (new)->next = (list); \ -+ (list)->prev = (new); \ -+MACRO_END -+ -+#define list_del(entry) \ -+MACRO_BEGIN \ -+ (entry)->prev->next = (entry)->next; \ -+ (entry)->next->prev = (entry)->prev; \ -+MACRO_END -+ -+#define list_del_init(entry) \ -+MACRO_BEGIN \ -+ (entry)->prev->next = (entry)->next; \ -+ (entry)->next->prev = (entry)->prev; \ -+ (entry)->next = (entry)->prev = (entry); \ -+MACRO_END -+ -+#define list_empty(list) \ -+ ((list)->next == (list)) -+ -+#define list_for_each(pos,list) \ -+ for (pos = (list)->next; pos != (list); \ -+ pos = (pos)->next) -+ -+#define list_for_each_safe(pos,n,list) \ -+ for (pos = (list)->next, n = (pos)->next; pos != (list); \ -+ pos = n, n = (pos)->next) -+ -+#endif /* __QSNET_LIST_H */ -diff -urN clean/include/qsnet/module.h linux-2.6.9/include/qsnet/module.h ---- clean/include/qsnet/module.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/qsnet/module.h 2005-09-07 10:35:04.000000000 -0400 -@@ -0,0 +1,27 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __QSNET_MODULE_H -+#define __QSNET_MODULE_H -+ -+#ident "$Id: module.h,v 1.1.2.1 2005/09/07 14:35:04 mike Exp $" -+/* $Source: /cvs/master/quadrics/qsnet/module.h,v $*/ -+ -+#include -+ -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) -+#include -+#endif -+ -+#endif /* __QSNET_MODULE_H */ -+ -+ -+ -+ -+ -+ -+ -diff -urN clean/include/qsnet/mutex.h linux-2.6.9/include/qsnet/mutex.h ---- clean/include/qsnet/mutex.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/qsnet/mutex.h 2003-06-26 12:05:45.000000000 -0400 -@@ -0,0 +1,91 @@ -+/* -+ * Copyright (C) 2000 Regents of the University of California -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ * -+ */ -+ -+#if !defined(_LINUX_MUTEX_H) -+#define _LINUX_MUTEX_H -+#if defined(__KERNEL__) -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define PID_NONE 0 -+ -+typedef struct -+{ -+ struct semaphore sem; -+ pid_t holder; -+} kmutex_t; -+ -+extern __inline__ void -+kmutex_init (kmutex_t *l) -+{ -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0) -+ l->sem = MUTEX; -+#else -+ init_MUTEX(&l->sem); -+#endif -+ l->holder = PID_NONE; -+} -+ -+extern __inline__ void -+kmutex_destroy (kmutex_t *l) -+{ -+ ASSERT (l->holder == PID_NONE); -+} -+ -+extern __inline__ void -+kmutex_lock (kmutex_t *l) -+{ -+ ASSERT(l->holder != current->pid); -+ down (&l->sem); -+ l->holder = current->pid; -+} -+ -+extern __inline__ void -+kmutex_unlock (kmutex_t *l) -+{ -+ ASSERT(l->holder == current->pid); -+ -+ l->holder = PID_NONE; -+ up (&l->sem); -+} -+ -+extern __inline__ int -+kmutex_trylock (kmutex_t *l) -+{ -+ if (down_trylock (&l->sem) == 0) -+ { -+ l->holder = current->pid; -+ return (1); -+ } -+ return (0); -+} -+ -+extern __inline__ int -+kmutex_is_locked (kmutex_t *l) -+{ -+ return (l->holder == current->pid); -+} -+ -+#endif /* __KERNEL__ */ -+#endif /* _LINUX_MUTEX_H */ -diff -urN clean/include/qsnet/procfs_linux.h linux-2.6.9/include/qsnet/procfs_linux.h ---- clean/include/qsnet/procfs_linux.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/qsnet/procfs_linux.h 2005-07-20 07:35:37.000000000 -0400 -@@ -0,0 +1,263 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __PROCFS_LINUX_H -+#define __PROCFS_LINUX_H -+ -+#ident "$Id: procfs_linux.h,v 1.13.2.2 2005/07/20 11:35:37 mike Exp $" -+/* $Source: /cvs/master/quadrics/qsnet/procfs_linux.h,v $ */ -+ -+#if defined(__KERNEL__) -+ -+#include -+#include -+#include -+ -+extern gid_t qsnet_procfs_gid; -+ -+/* borrowed from fs/proc/proc_misc - helper for proc_read_int */ -+static inline int -+qsnet_proc_calc_metrics(char *page, char **start, off_t off, int count, int *eof, int len) -+{ -+ if (len <= off+count) *eof = 1; -+ *start = page + off; -+ len -= off; -+ if (len>count) len = count; -+ if (len<0) len = 0; -+ return len; -+} -+ -+static inline int -+qsnet_proc_write_int(struct file *file, const char *buf, unsigned long count, void *data) -+{ -+ char tmpbuf[16]; -+ int res = count; -+ -+ if (count > sizeof(tmpbuf) - 1) -+ return (-EINVAL); -+ -+ MOD_INC_USE_COUNT; -+ if (copy_from_user(tmpbuf, buf, count)) -+ res = -EFAULT; -+ else -+ { -+ tmpbuf[count] = '\0'; -+ *(int *)data = simple_strtoul(tmpbuf, NULL, 0); -+ } -+ MOD_DEC_USE_COUNT; -+ -+ return (res); -+} -+ -+static inline int -+qsnet_proc_read_int(char *page, char **start, off_t off, int count, int *eof, void *data) -+{ -+ int len, res; -+ -+ MOD_INC_USE_COUNT; -+ -+ len = sprintf(page, "%d\n", *(int *)data); -+ res = qsnet_proc_calc_metrics(page, start, off, count, eof, len); -+ -+ MOD_DEC_USE_COUNT; -+ return (res); -+} -+ -+static inline struct proc_dir_entry * -+qsnet_proc_register_int(struct proc_dir_entry *dir, char *path, int *var, int read_only) -+{ -+ struct proc_dir_entry *p; -+ -+ p = create_proc_entry(path, read_only ? S_IRUGO : S_IRUGO|S_IWUSR|S_IWGRP, dir); -+ if (p) { -+ if (! read_only) -+ p->write_proc = qsnet_proc_write_int; -+ p->read_proc = qsnet_proc_read_int; -+ p->data = var; -+ p->owner = THIS_MODULE; -+ p->gid = qsnet_procfs_gid; -+ } -+ return p; -+} -+ -+static inline int -+qsnet_proc_write_hex(struct file *file, const char *buf, unsigned long count, void *data) -+{ -+ char tmpbuf[16]; -+ int res = count; -+ -+ if (count > sizeof(tmpbuf) - 1) -+ return (-EINVAL); -+ -+ MOD_INC_USE_COUNT; -+ if (copy_from_user(tmpbuf, buf, count)) -+ res = -EFAULT; -+ else -+ { -+ tmpbuf[count] = '\0'; -+ *(int *)data = simple_strtoul(tmpbuf, NULL, 0); -+ } -+ MOD_DEC_USE_COUNT; -+ -+ return (res); -+} -+ -+static inline int -+qsnet_proc_read_hex(char *page, char **start, off_t off, int count, int *eof, void *data) -+{ -+ int len, res; -+ -+ MOD_INC_USE_COUNT; -+ -+ len = sprintf(page, "0x%x\n", *(int *)data); -+ res = qsnet_proc_calc_metrics(page, start, off, count, eof, len); -+ -+ MOD_DEC_USE_COUNT; -+ return (res); -+} -+ -+static inline struct proc_dir_entry * -+qsnet_proc_register_hex(struct proc_dir_entry *dir, char *path, int *var, int read_only) -+{ -+ struct proc_dir_entry *p; -+ -+ p = create_proc_entry(path, read_only ? S_IRUGO : S_IRUGO|S_IWUSR|S_IWGRP, dir); -+ if (p) { -+ if (! read_only) -+ p->write_proc = qsnet_proc_write_hex; -+ p->read_proc = qsnet_proc_read_hex; -+ p->data = var; -+ p->owner = THIS_MODULE; -+ p->gid = qsnet_procfs_gid; -+ } -+ return p; -+} -+ -+#define QSNET_PROC_STR_LEN_MAX ((int)256) -+ -+static inline int -+qsnet_proc_write_str(struct file *file, const char *buf, unsigned long count, void *data) -+{ -+ int res = count; -+ -+ if (count > (QSNET_PROC_STR_LEN_MAX - 1)) -+ return (-EINVAL); -+ -+ MOD_INC_USE_COUNT; -+ if (copy_from_user((char *)data, buf, count)) -+ res = -EFAULT; -+ else -+ { -+ ((char *)data)[count] = '\0'; -+ /* remove linefeed */ -+ if ( (count) && (((char *)data)[count -1] == '\n')) -+ ((char *)data)[count -1] = '\0'; -+ } -+ MOD_DEC_USE_COUNT; -+ -+ return (res); -+} -+ -+static inline int -+qsnet_proc_read_str(char *page, char **start, off_t off, int count, int *eof, void *data) -+{ -+ int len, res; -+ -+ if ( strlen(data) > (count + 1)) -+ return (-EINVAL); -+ -+ MOD_INC_USE_COUNT; -+ -+ /* cant output too much */ -+ if ( strlen(data) > (count + 1)) -+ { -+ MOD_DEC_USE_COUNT; -+ return (-EINVAL); -+ } -+ -+ -+ len = sprintf(page, "%s\n", (char *)data); -+ if (len > count) -+ { -+ MOD_DEC_USE_COUNT; -+ return (-EINVAL); -+ } -+ -+ res = qsnet_proc_calc_metrics(page, start, off, count, eof, len); -+ -+ MOD_DEC_USE_COUNT; -+ return (res); -+} -+ -+static inline struct proc_dir_entry * -+qsnet_proc_register_str(struct proc_dir_entry *dir, char *path, char *var, int read_only) -+{ -+ struct proc_dir_entry *p; -+ -+ p = create_proc_entry(path, read_only ? S_IRUGO : S_IRUGO|S_IWUSR|S_IWGRP, dir); -+ if (p) { -+ if (! read_only) -+ p->write_proc = qsnet_proc_write_str; -+ p->read_proc = qsnet_proc_read_str; -+ p->data = var; -+ p->owner = THIS_MODULE; -+ p->gid = qsnet_procfs_gid; -+ } -+ return p; -+} -+ -+extern struct proc_dir_entry *qsnet_procfs_root; -+extern struct proc_dir_entry *qsnet_procfs_config; -+ -+/* code for procfs handling multipage requests */ -+ -+void qsnet_proc_character_fill (long mode, char *fmt, ...); -+int qsnet_proc_release (struct inode *inode, struct file *file); -+ -+static inline ssize_t -+qsnet_proc_read (struct file *file, char *buf, size_t count, loff_t *ppos) -+{ -+ QSNET_PROC_PRIVATE *pr = (QSNET_PROC_PRIVATE *) file->private_data; -+ int error; -+ -+ if (pr->pr_off >= pr->pr_len) -+ return (0); -+ -+ if ((error = verify_area (VERIFY_WRITE, buf, count)) != 0) -+ return (error); -+ -+ if (count >= (pr->pr_len - pr->pr_off)) -+ count = pr->pr_len - pr->pr_off; -+ -+ copy_to_user (buf, pr->pr_data + pr->pr_off, count); -+ -+ pr->pr_off += count; -+ *ppos += count; -+ -+ return (count); -+} -+ -+ -+#ifdef NO_PDE -+static inline struct proc_dir_entry *PDE(const struct inode *inode) -+{ -+ return inode->u.generic_ip; -+} -+#endif -+#endif /* __KERNEL__ */ -+ -+#define QSNET_PROCFS_IOCTL "/proc/qsnet/ioctl" -+#define QSNET_PROCFS_KMEM_DEBUG "/proc/qsnet/kmem_debug" -+#define QSNET_PROCFS_VERSION "/proc/qsnet/version" -+ -+#endif /* __PROCFS_LINUX_H */ -+ -+/* -+ * Local variables: -+ * c-file-style: "linux" -+ * End: -+ */ -diff -urN clean/include/qsnet/types.h linux-2.6.9/include/qsnet/types.h ---- clean/include/qsnet/types.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/qsnet/types.h 2003-08-01 12:21:38.000000000 -0400 -@@ -0,0 +1,90 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef __QSNET_TYPES_H -+#define __QSNET_TYPES_H -+ -+#ident "$Id: types.h,v 1.16 2003/08/01 16:21:38 addy Exp $" -+/* $Source: /cvs/master/quadrics/qsnet/types.h,v $*/ -+ -+/* -+ * Include typedefs for ISO/IEC 9899:1990 standard types -+ * -+ * -+ * The following integer typedefs are used: -+ * -+ * int8_t, int16_t, int32_t, int64_t, intptr_t -+ * uint8_t, uint16_t, uint32_t, uint64_t, uintptr_t -+ * uchar_t, ushort_t, uint_t, ulong_t -+ * -+ * also defines the following: -+ * u_char, u_short, u_int, u_long, caddr_t -+ */ -+ -+#include -+ -+#if defined(SOLARIS) && defined(__KERNEL__) -+# include -+#endif -+ -+#if defined(SOLARIS) && !defined(__KERNEL__) -+# include -+# include -+#endif -+ -+#if defined(DIGITAL_UNIX) && defined(__KERNEL__) -+# include -+#endif -+ -+#if defined(DIGITAL_UNIX) && !defined(__KERNEL__) -+# include -+# include -+#endif -+ -+#if defined(LINUX) && defined(__KERNEL__) -+# include -+#endif -+ -+#if defined(LINUX) && !defined(__KERNEL__) -+# include -+# include -+# include -+ -+typedef unsigned char uchar_t; -+typedef unsigned short ushort_t; -+typedef unsigned int uint_t; -+typedef unsigned long ulong_t; -+#endif -+ -+#if defined(QNX) -+# include -+# include -+#endif -+ -+/* Define a type that will represent a Main CPU pointer -+ * on both the Main and the Elan -+ */ -+#ifdef __ELAN__ -+ -+#if defined(_MAIN_LP64) -+#define QSNET_MAIN_PTR uint64_t -+#else -+#define QSNET_MAIN_PTR uint32_t -+#endif -+ -+#else -+ -+#ifdef _LP64 -+#define QSNET_MAIN_PTR uint64_t -+#else -+#define QSNET_MAIN_PTR uint32_t -+#endif -+ -+#endif -+ -+ -+#endif /* __QSNET_TYPES_H */ -diff -urN clean/include/qsnet/workarounds.h linux-2.6.9/include/qsnet/workarounds.h ---- clean/include/qsnet/workarounds.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/qsnet/workarounds.h 2002-08-09 07:15:55.000000000 -0400 -@@ -0,0 +1,24 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ifndef _QSNET_WORKAROUNDS_H -+#define _QSNET_WORKAROUNDS_H -+ -+#ident "$Id: workarounds.h,v 1.11 2002/08/09 11:15:55 addy Exp $" -+/* $Source: /cvs/master/quadrics/qsnet/workarounds.h,v $ */ -+ -+/* Elan workarounds */ -+#undef ELAN_REVA_SUPPORTED /* rev a elans no longer supported. */ -+#undef ELITE_REVA_SUPPORTED /* removed since RMS disables broadcast on rev A elites. */ -+#define ELAN_REVB_BUG_1 -+/* WORKAROUND for GNAT hw-elan3/3263 */ -+#define ELAN_REVB_BUG_2 -+ -+/* WORKAROUND for GNATs ic-elan3/3637 & ic-elan3/3550 */ -+#define ELAN_REVB_BUG_3 -+ -+#endif /* _QSNET_WORKAROUNDS_H */ -diff -urN clean/include/rms/rmscall.h linux-2.6.9/include/rms/rmscall.h ---- clean/include/rms/rmscall.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/rms/rmscall.h 2005-07-28 06:49:09.000000000 -0400 -@@ -0,0 +1,149 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ * rmscall.h: user interface to rms kernel module -+ * -+ * $Id: rmscall.h,v 1.27.2.1 2005/07/28 10:49:09 robin Exp $ -+ * $Source: /cvs/master/quadrics/rmsmod/rmscall.h,v $ -+ * -+ */ -+ -+#ifndef RMSCALL_H_INCLUDED -+#define RMSCALL_H_INCLUDED 1 -+ -+#ident "$Id: rmscall.h,v 1.27.2.1 2005/07/28 10:49:09 robin Exp $" -+ -+#ifdef __cplusplus -+extern "C" { -+#endif -+ -+/* -+ * flags for rms_fork_register -+ * -+ * RMS_IOF is not in a public header file -+ */ -+#define RMS_IOF 1 /* inherit on fork */ -+ -+#ifndef __KERNEL__ -+#include -+#endif -+ -+#include -+#include -+ -+#define MAXCOREPATHLEN 32 -+ -+#if defined(SOLARIS) -+typedef long long rmstime_t; -+#else /* DIGITAL_UNIX */ -+typedef long rmstime_t; -+#endif -+ -+typedef enum { -+ -+ PRG_RUNNING = 0x01, /* program is running */ -+ PRG_ZOMBIE = 0x02, /* last process on a node has exited */ -+ PRG_NODE = 0x04, /* stats are complete for this node */ -+ PRG_KILLED = 0x08, /* program was killed */ -+ PRG_SUSPEND = 0x10, /* program is suspended */ -+ PRG_ERROR = 0x80 /* error collecting stats */ -+ -+} PRGSTATUS_FLAGS; -+ -+/* -+ * program time statistics extended in version 5 of the kernel module -+ */ -+typedef struct { -+ rmstime_t etime; /* elapsed cpu time (milli-secs) */ -+ rmstime_t atime; /* allocated cpu time (cpu milli-secs) */ -+ rmstime_t utime; /* user cpu time (cpu milli-secs) */ -+ rmstime_t stime; /* system cpu time (cpu milli-secs) */ -+ int ncpus; /* number of cpus allocated */ -+ int flags; /* program status flags */ -+ int mem; /* max memory size in MBytes */ -+ int pageflts; /* number of page faults */ -+ rmstime_t memint; /* memory integral */ -+} prgstats_old_t; -+ -+typedef struct { -+ uint64_t etime; /* elapsed cpu time (milli-secs) */ -+ uint64_t atime; /* allocated cpu time (cpu milli-secs) */ -+ uint64_t utime; /* user cpu time (cpu milli-secs) */ -+ uint64_t stime; /* system cpu time (cpu milli-secs) */ -+ uint64_t pageflts; /* number of page faults */ -+ uint64_t memint; /* memory integral */ -+ uint64_t ebytes; /* data transferred by the Elan(s) */ -+ uint64_t exfers; /* number of Elan data transfers */ -+ uint64_t spare64[4]; /* expansion space */ -+ int ncpus; /* number of cpus allocated */ -+ int flags; /* program status flags */ -+ int mem; /* max memory size in MBytes */ -+ int spare32[5]; /* expansion space */ -+} prgstats_t; -+ -+int rmsmod_init(void); -+void rmsmod_fini(void); -+ -+int rms_setcorepath(caddr_t path); -+int rms_getcorepath(pid_t pid, caddr_t path, int maxlen); -+int rms_prgcreate(int id, uid_t uid, int cpus); -+int rms_prgdestroy(int id); -+int rms_prgids(int maxids, int *prgids, int *nprgs); -+int rms_prginfo(int id, int maxpids, pid_t *pids, int *nprocs); -+int rms_prgaddcap(int id, int index, ELAN_CAPABILITY *cap); -+ -+int rms_prgsuspend(int id); -+int rms_prgresume(int id); -+int rms_prgsignal(int id, int signo); -+ -+int rms_getprgid(pid_t pid, int *id); -+int rms_ncaps(int *ncaps); -+int rms_getcap(int index, ELAN_CAPABILITY *cap); -+int rms_mycap(int *index); -+int rms_setcap(int index, int ctx); -+int rms_prefcap(int nprocess, int *index); -+ -+int rms_prggetstats(int id, prgstats_t *stats); -+void rms_accumulatestats(prgstats_t *total, prgstats_t *stats); -+char *rms_statsreport(prgstats_t *stats, char *buf); -+ -+int rms_elaninitdone(int vp); -+int rms_prgelanpids(int id, int maxpids, int *vps, pid_t *pids, int *npids); -+int rms_setelanstats(int id, uint64_t ebytes, uint64_t exfers); -+ -+int rms_setpset(int psid); -+int rms_getpset(int id, int *psid); -+int rms_modversion(void); -+ -+int rms_addproc(int id, pid_t pid); -+int rms_removeproc(int id, pid_t pid); -+int rms_ptrack_enabled(void); -+ -+#ifdef __cplusplus -+} -+#endif -+ -+ -+#if defined(__KERNEL__) -+ -+int rms_init(void); -+int rms_fini(void); -+int rms_reconfigure(void); -+ -+extern int rms_debug; -+ -+#if 1 -+#define DBG(x) do if (rms_debug) x ; while (0) -+#else -+#define DBG(x) -+#endif -+ -+#endif -+ -+#endif /* RMSCALL_H_INCLUDED */ -+ -+ -+ -+ -diff -urN clean/include/rms/rmsio.h linux-2.6.9/include/rms/rmsio.h ---- clean/include/rms/rmsio.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/include/rms/rmsio.h 2004-08-26 07:49:30.000000000 -0400 -@@ -0,0 +1,194 @@ -+/* -+ * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd. -+ * -+ * For licensing information please see the supplied COPYING file -+ * -+ */ -+ -+#ident "@(#)$Id: rmsio.h,v 1.7 2004/08/26 11:49:30 duncan Exp $" -+/* $Source: /cvs/master/quadrics/rmsmod/rmsio.h,v $*/ -+ -+ -+#ifndef __RMSMOD_RMSIO_H -+#define __RMSMOD_RMSIO_H -+ -+/* arg is corepath string */ -+#define RMSIO_SETCOREPATH _IOW ('r', 1, char) -+ -+typedef struct rmsio_getcorepath_struct -+{ -+ pid_t pid; -+ char *corepath; -+ int maxlen; -+} RMSIO_GETCOREPATH_STRUCT; -+#define RMSIO_GETCOREPATH _IOW ('r', 2, RMSIO_GETCOREPATH_STRUCT) -+ -+typedef struct rmsio_prgcreate_struct -+{ -+ int id; -+ uid_t uid; -+ int cpus; -+} RMSIO_PRGCREATE_STRUCT; -+#define RMSIO_PRGCREATE _IOW ('r', 3, RMSIO_PRGCREATE_STRUCT) -+ -+typedef struct rmsio_prginfo_struct -+{ -+ int id; -+ int maxpids; -+ pid_t *pids; -+ int *nprocs; -+} RMSIO_PRGINFO_STRUCT; -+#define RMSIO_PRGINFO _IOW ('r', 4, RMSIO_PRGINFO_STRUCT) -+ -+typedef struct rmsio_prgsignal_struct -+{ -+ int id; -+ int signo; -+} RMSIO_PRGSIGNAL_STRUCT; -+#define RMSIO_PRGSIGNAL _IOW ('r', 5, RMSIO_PRGSIGNAL_STRUCT) -+ -+typedef struct rmsio_prgaddcap_struct -+{ -+ int id; -+ int index; -+ ELAN_CAPABILITY *cap; -+} RMSIO_PRGADDCAP_STRUCT; -+#define RMSIO_PRGADDCAP _IOW ('r', 6, RMSIO_PRGADDCAP_STRUCT) -+typedef struct rmsio_setcap_struct -+{ -+ int index; -+ int ctx; -+} RMSIO_SETCAP_STRUCT; -+#define RMSIO_SETCAP _IOW ('r', 7, RMSIO_SETCAP_STRUCT) -+ -+typedef struct rmsio_getcap_struct -+{ -+ int index; -+ ELAN_CAPABILITY *cap; -+} RMSIO_GETCAP_STRUCT; -+#define RMSIO_GETCAP _IOW ('r', 8, RMSIO_GETCAP_STRUCT) -+ -+typedef struct rmsio_getcap_struct32 -+{ -+ int index; -+ unsigned int capptr; -+} RMSIO_GETCAP_STRUCT32; -+#define RMSIO_GETCAP32 _IOW ('r', 8, RMSIO_GETCAP_STRUCT32) -+ -+/* arg is pointer to ncaps */ -+#define RMSIO_NCAPS _IOW ('r', 9, int) -+ -+typedef struct rmsio_prggetstats_struct -+{ -+ int id; -+ prgstats_old_t *stats; -+} RMSIO_PRGGETSTATS_STRUCT; -+#define RMSIO_PRGGETSTATS _IOW ('r', 10, RMSIO_PRGGETSTATS_STRUCT) -+ -+/* arg is program id */ -+#define RMSIO_PRGSUSPEND _IOW ('r', 11, int) -+#define RMSIO_PRGRESUME _IOW ('r', 12, int) -+#define RMSIO_PRGDESTROY _IOW ('r', 13, int) -+ -+typedef struct rmsio_getprgid_struct -+{ -+ pid_t pid; -+ int *id; -+} RMSIO_GETPRGID_STRUCT; -+#define RMSIO_GETPRGID _IOW ('r', 14, RMSIO_GETPRGID_STRUCT) -+ -+typedef struct rmsio_getprgid_struct32 -+{ -+ pid_t pid; -+ unsigned int idptr; -+} RMSIO_GETPRGID_STRUCT32; -+#define RMSIO_GETPRGID32 _IOW ('r', 14, RMSIO_GETPRGID_STRUCT32) -+ -+/* arg is pointer to index */ -+#define RMSIO_GETMYCAP _IOW ('r', 15, int) -+ -+typedef struct rmsio_prgids_struct -+{ -+ int maxids; -+ int *prgids; -+ int *nprgs; -+} RMSIO_PRGIDS_STRUCT; -+#define RMSIO_PRGIDS _IOW ('r', 16, RMSIO_PRGIDS_STRUCT) -+ -+/* arg is pointer to vp */ -+#define RMSIO_ELANINITDONE _IOW ('r', 17, int) -+ -+typedef struct rmsio_prgelanpids_struct -+{ -+ int id; -+ int maxpids; -+ int *vps; -+ int *pids; -+ int *npids; -+} RMSIO_PRGELANPIDS_STRUCT; -+#define RMSIO_PRGELANPIDS _IOW ('r', 18, RMSIO_PRGELANPIDS_STRUCT) -+ -+typedef struct rmsio_setpset_struct -+{ -+ int id; -+ int psid; -+} RMSIO_SETPSET_STRUCT; -+#define RMSIO_SETPSET _IOW ('r', 19, RMSIO_SETPSET_STRUCT) -+ -+typedef struct rmsio_getpset_struct -+{ -+ int id; -+ int *psid; -+} RMSIO_GETPSET_STRUCT; -+#define RMSIO_GETPSET _IOW ('r', 20, RMSIO_GETPSET_STRUCT) -+ -+/* -+ * have to pass a pointer to the stats, the switch -+ * statement goes wrong in the module of the size -+ * is too large -+ */ -+typedef struct { -+ uint64_t ebytes; -+ uint64_t exfers; -+} elanstats_t; -+ -+typedef struct rmsio_setelanstats_struct -+{ -+ int id; -+ elanstats_t *estats; -+} RMSIO_SETELANSTATS_STRUCT; -+#define RMSIO_SETELANSTATS _IOW ('r', 21, RMSIO_SETELANSTATS_STRUCT) -+ -+typedef struct rmsio_prggetstats2_struct -+{ -+ int id; -+ prgstats_t *stats; -+} RMSIO_PRGGETSTATS2_STRUCT; -+#define RMSIO_PRGGETSTATS2 _IOW ('r', 22, RMSIO_PRGGETSTATS2_STRUCT) -+ -+typedef struct rmsio_modversion_struct -+{ -+ int *version; -+} RMSIO_MODVERSION_STRUCT; -+#define RMSIO_MODVERSION _IOW ('r', 23, RMSIO_MODVERSION_STRUCT) -+ -+typedef struct rmsio_proc_struct -+{ -+ int id; -+ pid_t pid; -+} RMSIO_PROC_STRUCT; -+#define RMSIO_ADDPROC _IOW ('r', 24, RMSIO_PROC_STRUCT) -+#define RMSIO_REMOVEPROC _IOW ('r', 25, RMSIO_PROC_STRUCT) -+ -+ -+ -+#endif /* __RMSMOD_RMSIO_H */ -+ -+ -+ -+ -+ -+ -+ + ++EXPORT_SYMBOL_GPL(ioproc_unregister_ops); +Index: linux-269-5502/mm/hugetlb.c +=================================================================== +--- linux-269-5502.orig/mm/hugetlb.c ++++ linux-269-5502/mm/hugetlb.c +@@ -10,6 +10,7 @@ + #include + #include + #include ++#include + + const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL; + static unsigned long nr_huge_pages, free_huge_pages; +@@ -260,6 +261,7 @@ void zap_hugepage_range(struct vm_area_s + struct mm_struct *mm = vma->vm_mm; + + spin_lock(&mm->page_table_lock); ++ ioproc_invalidate_range(vma, start, start + length); + unmap_hugepage_range(vma, start, start + length); + spin_unlock(&mm->page_table_lock); + } +Index: linux-269-5502/mm/Kconfig +=================================================================== +--- /dev/null ++++ linux-269-5502/mm/Kconfig +@@ -0,0 +1,15 @@ ++# ++# VM subsystem specific config ++# + -diff -urN clean/ipc/shm.c linux-2.6.9/ipc/shm.c ---- clean/ipc/shm.c 2005-05-13 13:39:10.000000000 -0400 -+++ linux-2.6.9/ipc/shm.c 2005-10-10 17:47:17.000000000 -0400 ++# Support for IO processors which have advanced RDMA capabilities ++# ++config IOPROC ++ bool "Enable IOPROC VM hooks" ++ depends on MMU ++ default y ++ help ++ This option enables hooks in the VM subsystem so that IO devices which ++ incorporate advanced RDMA capabilities can be kept in sync with CPU ++ page table changes. ++ See Documentation/vm/ioproc.txt for more details. +Index: linux-269-5502/mm/Makefile +=================================================================== +--- linux-269-5502.orig/mm/Makefile ++++ linux-269-5502/mm/Makefile +@@ -16,6 +16,7 @@ obj-$(CONFIG_SWAP) += page_io.o swap_sta + obj-$(CONFIG_X86_4G) += usercopy.o + obj-$(CONFIG_HUGETLBFS) += hugetlb.o + obj-$(CONFIG_NUMA) += mempolicy.o ++obj-$(CONFIG_IOPROC) += ioproc.o + obj-$(CONFIG_SHMEM) += shmem.o + obj-$(CONFIG_TINY_SHMEM) += tiny-shmem.o + +Index: linux-269-5502/mm/mprotect.c +=================================================================== +--- linux-269-5502.orig/mm/mprotect.c ++++ linux-269-5502/mm/mprotect.c +@@ -10,6 +10,7 @@ + + #include + #include ++#include + #include + #include + #include +@@ -100,6 +101,7 @@ change_protection(struct vm_area_struct + if (start >= end) + BUG(); + spin_lock(¤t->mm->page_table_lock); ++ ioproc_change_protection(vma, start, end, newprot); + do { + change_pmd_range(dir, start, end - start, newprot); + start = (start + PGDIR_SIZE) & PGDIR_MASK; +Index: linux-269-5502/mm/msync.c +=================================================================== +--- linux-269-5502.orig/mm/msync.c ++++ linux-269-5502/mm/msync.c +@@ -12,6 +12,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -115,6 +116,7 @@ static int filemap_sync(struct vm_area_s + + if (address >= end) + BUG(); ++ ioproc_sync_range(vma, address, end); + do { + error |= filemap_sync_pmd_range(dir, address, end, vma, flags); + address = (address + PGDIR_SIZE) & PGDIR_MASK; +Index: linux-269-5502/mm/mremap.c +=================================================================== +--- linux-269-5502.orig/mm/mremap.c ++++ linux-269-5502/mm/mremap.c +@@ -9,6 +9,7 @@ + + #include + #include ++#include + #include + #include + #include +@@ -148,6 +149,8 @@ static unsigned long move_page_tables(st + { + unsigned long offset; + ++ ioproc_invalidate_range(vma, old_addr, old_addr + len); ++ ioproc_invalidate_range(vma, new_addr, new_addr + len); + flush_cache_range(vma, old_addr, old_addr + len); + + /* +Index: linux-269-5502/mm/fremap.c +=================================================================== +--- linux-269-5502.orig/mm/fremap.c ++++ linux-269-5502/mm/fremap.c +@@ -12,6 +12,7 @@ + #include + #include + #include ++#include + #include + #include + +@@ -29,6 +30,7 @@ static inline void zap_pte(struct mm_str + if (pte_present(pte)) { + unsigned long pfn = pte_pfn(pte); + ++ ioproc_invalidate_page(vma, addr); + flush_cache_page(vma, addr); + pte = ptep_clear_flush(vma, addr, ptep); + if (pfn_valid(pfn)) { +@@ -93,6 +95,7 @@ int install_page(struct mm_struct *mm, s + pte_val = *pte; + pte_unmap(pte); + update_mmu_cache(vma, addr, pte_val); ++ ioproc_update_page(vma, addr); + + err = 0; + err_unlock: +@@ -132,6 +135,7 @@ int install_file_pte(struct mm_struct *m + pte_val = *pte; + pte_unmap(pte); + update_mmu_cache(vma, addr, pte_val); ++ ioproc_update_page(vma, addr); + spin_unlock(&mm->page_table_lock); + return 0; + +Index: linux-269-5502/mm/rmap.c +=================================================================== +--- linux-269-5502.orig/mm/rmap.c ++++ linux-269-5502/mm/rmap.c +@@ -51,6 +51,7 @@ + #include + #include + #include ++#include + #include + + #include +@@ -566,6 +567,7 @@ static int try_to_unmap_one(struct page + } + + /* Nuke the page table entry. */ ++ ioproc_invalidate_page(vma, address); + flush_cache_page(vma, address); + pteval = ptep_clear_flush(vma, address, pte); + +@@ -673,6 +675,7 @@ static void try_to_unmap_cluster(unsigne + continue; + + /* Nuke the page table entry. */ ++ ioproc_invalidate_page(vma, address); + flush_cache_page(vma, address); + pteval = ptep_clear_flush(vma, address, pte); + +Index: linux-269-5502/mm/memory.c +=================================================================== +--- linux-269-5502.orig/mm/memory.c ++++ linux-269-5502/mm/memory.c +@@ -43,6 +43,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -630,6 +631,7 @@ void zap_page_range(struct vm_area_struc + + lru_add_drain(); + spin_lock(&mm->page_table_lock); ++ ioproc_invalidate_range(vma, address, end); + tlb = tlb_gather_mmu(mm, 0); + unmap_vmas(&tlb, mm, vma, address, end, &nr_accounted, details); + tlb_finish_mmu(tlb, address, end); +@@ -998,6 +1000,7 @@ int zeromap_page_range(struct vm_area_st + BUG(); + + spin_lock(&mm->page_table_lock); ++ ioproc_invalidate_range(vma, beg, end); + do { + pmd_t *pmd = pmd_alloc(mm, dir, address); + error = -ENOMEM; +@@ -1012,6 +1015,7 @@ int zeromap_page_range(struct vm_area_st + /* + * Why flush? zeromap_pte_range has a BUG_ON for !pte_none() + */ ++ ioproc_update_range(vma, beg, end); + flush_tlb_range(vma, beg, end); + spin_unlock(&mm->page_table_lock); + return error; +@@ -1092,6 +1096,7 @@ int remap_page_range(struct vm_area_stru + vma->vm_flags |= VM_IO | VM_RESERVED; + + spin_lock(&mm->page_table_lock); ++ ioproc_invalidate_range(vma, beg, end); + do { + pmd_t *pmd = pmd_alloc(mm, dir, from); + error = -ENOMEM; +@@ -1106,6 +1111,7 @@ int remap_page_range(struct vm_area_stru + /* + * Why flush? remap_pte_range has a BUG_ON for !pte_none() + */ ++ ioproc_update_range(vma, beg, end); + flush_tlb_range(vma, beg, end); + spin_unlock(&mm->page_table_lock); + return error; +@@ -1194,6 +1200,7 @@ static int do_wp_page(struct mm_struct * + update_mmu_cache(vma, address, entry); + lazy_mmu_prot_update(entry); + pte_unmap(page_table); ++ ioproc_update_page(vma, address); + spin_unlock(&mm->page_table_lock); + return VM_FAULT_MINOR; + } +@@ -1226,6 +1233,7 @@ static int do_wp_page(struct mm_struct * + ++mm->rss; + else + page_remove_rmap(old_page); ++ ioproc_invalidate_page(vma, address); + break_cow(vma, new_page, address, page_table); + lru_cache_add_active(new_page); + page_add_anon_rmap(new_page, vma, address); +@@ -1234,6 +1242,7 @@ static int do_wp_page(struct mm_struct * + new_page = old_page; + } + pte_unmap(page_table); ++ ioproc_update_page(vma, address); + page_cache_release(new_page); + page_cache_release(old_page); + spin_unlock(&mm->page_table_lock); +@@ -1630,6 +1639,7 @@ static int do_swap_page(struct mm_struct + update_mmu_cache(vma, address, pte); + lazy_mmu_prot_update(pte); + pte_unmap(page_table); ++ ioproc_update_page(vma, address); + spin_unlock(&mm->page_table_lock); + out: + return ret; +@@ -1695,6 +1705,7 @@ do_anonymous_page(struct mm_struct *mm, + /* No need to invalidate - it was non-present before */ + update_mmu_cache(vma, addr, entry); + lazy_mmu_prot_update(entry); ++ ioproc_update_page(vma, addr); + spin_unlock(&mm->page_table_lock); + out: + return VM_FAULT_MINOR; +@@ -1813,6 +1824,7 @@ retry: + + /* no need to invalidate: a not-present page shouldn't be cached */ + update_mmu_cache(vma, address, entry); ++ ioproc_update_page(vma, address); + spin_unlock(&mm->page_table_lock); + out: + return ret; +@@ -1998,6 +2010,7 @@ int make_pages_present(unsigned long add + return ret; + return ret == len ? 0 : -1; + } ++EXPORT_SYMBOL(make_pages_present); + + /* + * Map a vmalloc()-space virtual address to the physical page. +Index: linux-269-5502/mm/mmap.c +=================================================================== +--- linux-269-5502.orig/mm/mmap.c ++++ linux-269-5502/mm/mmap.c +@@ -15,6 +15,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -1703,6 +1704,7 @@ static void unmap_region(struct mm_struc + unsigned long nr_accounted = 0; + + lru_add_drain(); ++ ioproc_invalidate_range(vma, start, end); + tlb = tlb_gather_mmu(mm, 0); + unmap_vmas(&tlb, mm, vma, start, end, &nr_accounted, NULL); + vm_unacct_memory(nr_accounted); +@@ -1995,6 +1997,7 @@ void exit_mmap(struct mm_struct *mm) + + spin_lock(&mm->page_table_lock); + ++ ioproc_release(mm); + tlb = tlb_gather_mmu(mm, 1); + flush_cache_mm(mm); + /* Use ~0UL here to ensure all VMAs in the mm are unmapped */ +Index: linux-269-5502/ipc/shm.c +=================================================================== +--- linux-269-5502.orig/ipc/shm.c ++++ linux-269-5502/ipc/shm.c @@ -26,6 +26,7 @@ #include #include @@ -96985,7 +1253,7 @@ diff -urN clean/ipc/shm.c linux-2.6.9/ipc/shm.c #include #include -@@ -850,6 +851,44 @@ +@@ -856,6 +857,44 @@ asmlinkage long sys_shmdt(char __user *s return retval; } @@ -97030,322 +1298,345 @@ diff -urN clean/ipc/shm.c linux-2.6.9/ipc/shm.c #ifdef CONFIG_PROC_FS static int sysvipc_shm_read_proc(char *buffer, char **start, off_t offset, int length, int *eof, void *data) { -diff -urN clean/kernel/exit.c linux-2.6.9/kernel/exit.c ---- clean/kernel/exit.c 2005-10-10 17:43:57.000000000 -0400 -+++ linux-2.6.9/kernel/exit.c 2005-10-10 17:47:17.000000000 -0400 -@@ -30,6 +30,8 @@ - #include - #include - -+#include -+ - extern void sem_exit (void); - extern struct task_struct *child_reaper; - -@@ -822,6 +824,8 @@ - #endif - current->tux_exit(); - } -+ /* Notify any ptrack callbacks of the process exit */ -+ ptrack_call_callbacks (PTRACK_PHASE_EXIT, NULL); - __exit_mm(tsk); +Index: linux-269-5502/include/linux/init_task.h +=================================================================== +--- linux-269-5502.orig/include/linux/init_task.h ++++ linux-269-5502/include/linux/init_task.h +@@ -2,6 +2,7 @@ + #define _LINUX__INIT_TASK_H - exit_sem(tsk); -diff -urN clean/kernel/fork.c linux-2.6.9/kernel/fork.c ---- clean/kernel/fork.c 2005-05-13 13:39:08.000000000 -0400 -+++ linux-2.6.9/kernel/fork.c 2005-10-10 17:47:17.000000000 -0400 -@@ -14,6 +14,7 @@ - #include - #include - #include + #include +#include - #include - #include - #include -@@ -430,6 +431,9 @@ - mm->page_table_lock = SPIN_LOCK_UNLOCKED; - mm->ioctx_list_lock = RW_LOCK_UNLOCKED; - mm->ioctx_list = NULL; -+#ifdef CONFIG_IOPROC -+ mm->ioproc_ops = NULL; -+#endif - mm->default_kioctx = (struct kioctx)INIT_KIOCTX(mm->default_kioctx, *mm); - mm->free_area_cache = TASK_UNMAPPED_BASE; -@@ -1264,6 +1268,11 @@ - set_tsk_thread_flag(p, TIF_SIGPENDING); - } + #define INIT_FILES \ + { \ +@@ -112,6 +113,7 @@ extern struct group_info init_groups; + .proc_lock = SPIN_LOCK_UNLOCKED, \ + .switch_lock = SPIN_LOCK_UNLOCKED, \ + .journal_info = NULL, \ ++ INIT_TASK_PTRACK(tsk) \ + } -+ if (ptrack_call_callbacks(PTRACK_PHASE_CLONE, p)) { -+ sigaddset(&p->pending.signal, SIGKILL); -+ set_tsk_thread_flag(p, TIF_SIGPENDING); -+ } -+ - if (!(clone_flags & CLONE_STOPPED)) - wake_up_new_task(p, clone_flags); - else -diff -urN clean/kernel/Kconfig linux-2.6.9/kernel/Kconfig ---- clean/kernel/Kconfig 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/kernel/Kconfig 2005-10-10 17:47:17.000000000 -0400 -@@ -0,0 +1,14 @@ -+# -+# Kernel subsystem specific config -+# -+ -+# Support for Process Tracking callbacks -+# -+config PTRACK -+ bool "Enable PTRACK process tracking hooks" -+ default y -+ help -+ This option enables hooks to be called when processes are -+ created and destoryed in order for a resource management -+ system to know which processes are a member of a "job" and -+ to be able to clean up when the job is terminated. -diff -urN clean/kernel/Makefile linux-2.6.9/kernel/Makefile ---- clean/kernel/Makefile 2005-05-13 13:39:07.000000000 -0400 -+++ linux-2.6.9/kernel/Makefile 2005-10-10 17:47:17.000000000 -0400 -@@ -26,6 +26,7 @@ - obj-$(CONFIG_AUDIT) += audit.o - obj-$(CONFIG_AUDITSYSCALL) += auditsc.o - obj-$(CONFIG_KPROBES) += kprobes.o -+obj-$(CONFIG_PTRACK) += ptrack.o - ifneq ($(CONFIG_IA64),y) - # According to Alan Modra , the -fno-omit-frame-pointer is -diff -urN clean/kernel/ptrack.c linux-2.6.9/kernel/ptrack.c ---- clean/kernel/ptrack.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/kernel/ptrack.c 2005-10-10 17:47:17.000000000 -0400 -@@ -0,0 +1,145 @@ +Index: linux-269-5502/include/linux/ioproc.h +=================================================================== +--- /dev/null ++++ linux-269-5502/include/linux/ioproc.h +@@ -0,0 +1,270 @@ +/* -+ * Copyright (C) 2000 Regents of the University of California ++ * Copyright (C) 2006 Quadrics Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ */ ++ ++/* ++ * Callbacks for IO processor page table updates. ++ */ ++ ++#ifndef __LINUX_IOPROC_H__ ++#define __LINUX_IOPROC_H__ ++ ++#include ++#include ++ ++typedef struct ioproc_ops { ++ struct ioproc_ops *next; ++ void *arg; ++ ++ void (*release) (void *arg, struct mm_struct * mm); ++ void (*sync_range) (void *arg, struct vm_area_struct * vma, ++ unsigned long start, unsigned long end); ++ void (*invalidate_range) (void *arg, struct vm_area_struct * vma, ++ unsigned long start, unsigned long end); ++ void (*update_range) (void *arg, struct vm_area_struct * vma, ++ unsigned long start, unsigned long end); ++ ++ void (*change_protection) (void *arg, struct vm_area_struct * vma, ++ unsigned long start, unsigned long end, ++ pgprot_t newprot); ++ ++ void (*sync_page) (void *arg, struct vm_area_struct * vma, ++ unsigned long address); ++ void (*invalidate_page) (void *arg, struct vm_area_struct * vma, ++ unsigned long address); ++ void (*update_page) (void *arg, struct vm_area_struct * vma, ++ unsigned long address); ++ ++} ioproc_ops_t; ++ ++/* IOPROC Registration ++ * ++ * Called by the IOPROC device driver to register its interest in page table ++ * changes for the process associated with the supplied mm_struct ++ * ++ * The caller should first allocate and fill out an ioproc_ops structure with ++ * the function pointers initialised to the device driver specific code for ++ * each callback. If the device driver doesn't have code for a particular ++ * callback then it should set the function pointer to be NULL. ++ * The ioproc_ops arg parameter will be passed unchanged as the first argument ++ * to each callback function invocation. ++ * ++ * The ioproc registration is not inherited across fork() and should be called ++ * once for each process that the IOPROC device driver is interested in. ++ * ++ * Must be called holding the mm->page_table_lock ++ */ ++extern int ioproc_register_ops(struct mm_struct *mm, struct ioproc_ops *ip); ++ ++/* IOPROC De-registration ++ * ++ * Called by the IOPROC device driver when it is no longer interested in page ++ * table changes for the process associated with the supplied mm_struct ++ * ++ * Normally this is not needed to be called as the ioproc_release() code will ++ * automatically unlink the ioproc_ops struct from the mm_struct as the ++ * process exits ++ * ++ * Must be called holding the mm->page_table_lock ++ */ ++extern int ioproc_unregister_ops(struct mm_struct *mm, struct ioproc_ops *ip); ++ ++#ifdef CONFIG_IOPROC ++ ++/* IOPROC Release ++ * ++ * Called during exit_mmap() as all vmas are torn down and unmapped. ++ * ++ * Also unlinks the ioproc_ops structure from the mm list as it goes. ++ * ++ * No need for locks as the mm can no longer be accessed at this point ++ * ++ */ ++static inline void ioproc_release(struct mm_struct *mm) ++{ ++ struct ioproc_ops *cp; ++ ++ while ((cp = mm->ioproc_ops) != NULL) { ++ mm->ioproc_ops = cp->next; ++ ++ if (cp->release) ++ cp->release(cp->arg, mm); ++ } ++} ++ ++/* IOPROC SYNC RANGE ++ * ++ * Called when a memory map is synchronised with its disk image i.e. when the ++ * msync() syscall is invoked. Any future read or write to the associated ++ * pages by the IOPROC should cause the page to be marked as referenced or ++ * modified. ++ * ++ * Called holding the mm->page_table_lock ++ */ ++static inline void ++ioproc_sync_range(struct vm_area_struct *vma, unsigned long start, ++ unsigned long end) ++{ ++ struct ioproc_ops *cp; ++ ++ for (cp = vma->vm_mm->ioproc_ops; cp; cp = cp->next) ++ if (cp->sync_range) ++ cp->sync_range(cp->arg, vma, start, end); ++} ++ ++/* IOPROC INVALIDATE RANGE ++ * ++ * Called whenever a valid PTE is unloaded e.g. when a page is unmapped by the ++ * user or paged out by the kernel. ++ * ++ * After this call the IOPROC must not access the physical memory again unless ++ * a new translation is loaded. ++ * ++ * Called holding the mm->page_table_lock ++ */ ++static inline void ++ioproc_invalidate_range(struct vm_area_struct *vma, unsigned long start, ++ unsigned long end) ++{ ++ struct ioproc_ops *cp; ++ ++ for (cp = vma->vm_mm->ioproc_ops; cp; cp = cp->next) ++ if (cp->invalidate_range) ++ cp->invalidate_range(cp->arg, vma, start, end); ++} ++ ++/* IOPROC UPDATE RANGE ++ * ++ * Called whenever a valid PTE is loaded e.g. mmaping memory, moving the brk ++ * up, when breaking COW or faulting in an anonymous page of memory. ++ * ++ * These give the IOPROC device driver the opportunity to load translations ++ * speculatively, which can improve performance by avoiding device translation ++ * faults. ++ * ++ * Called holding the mm->page_table_lock ++ */ ++static inline void ++ioproc_update_range(struct vm_area_struct *vma, unsigned long start, ++ unsigned long end) ++{ ++ struct ioproc_ops *cp; ++ ++ for (cp = vma->vm_mm->ioproc_ops; cp; cp = cp->next) ++ if (cp->update_range) ++ cp->update_range(cp->arg, vma, start, end); ++} ++ ++/* IOPROC CHANGE PROTECTION ++ * ++ * Called when the protection on a region of memory is changed i.e. when the ++ * mprotect() syscall is invoked. ++ * ++ * The IOPROC must not be able to write to a read-only page, so if the ++ * permissions are downgraded then it must honour them. If they are upgraded ++ * it can treat this in the same way as the ioproc_update_[range|sync]() calls ++ * ++ * Called holding the mm->page_table_lock ++ */ ++static inline void ++ioproc_change_protection(struct vm_area_struct *vma, unsigned long start, ++ unsigned long end, pgprot_t newprot) ++{ ++ struct ioproc_ops *cp; ++ ++ for (cp = vma->vm_mm->ioproc_ops; cp; cp = cp->next) ++ if (cp->change_protection) ++ cp->change_protection(cp->arg, vma, start, end, ++ newprot); ++} ++ ++/* IOPROC SYNC PAGE ++ * ++ * Called when a memory map is synchronised with its disk image i.e. when the ++ * msync() syscall is invoked. Any future read or write to the associated page ++ * by the IOPROC should cause the page to be marked as referenced or modified. + * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ * Not currently called as msync() calls ioproc_sync_range() instead + * -+ * Derived from exit_actn.c by -+ * Copyright (C) 2003 Quadrics Ltd. ++ * Called holding the mm->page_table_lock + */ -+ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+ -+int -+ptrack_register (ptrack_callback_t callback, void *arg) ++static inline void ++ioproc_sync_page(struct vm_area_struct *vma, unsigned long addr) +{ -+ struct ptrack_desc *desc = kmalloc (sizeof (struct ptrack_desc), GFP_KERNEL); -+ -+ if (desc == NULL) -+ return -ENOMEM; -+ -+ desc->callback = callback; -+ desc->arg = arg; -+ -+ list_add_tail (&desc->link, ¤t->ptrack_list); -+ -+ return 0; -+} ++ struct ioproc_ops *cp; + -+void -+ptrack_deregister (ptrack_callback_t callback, void *arg) -+{ -+ struct list_head *el, *nel; -+ -+ list_for_each_safe (el, nel, ¤t->ptrack_list) { -+ struct ptrack_desc *desc = list_entry (el, struct ptrack_desc, link); -+ -+ if (desc->callback == callback && desc->arg == arg) { -+ list_del (&desc->link); -+ kfree (desc); -+ } -+ } ++ for (cp = vma->vm_mm->ioproc_ops; cp; cp = cp->next) ++ if (cp->sync_page) ++ cp->sync_page(cp->arg, vma, addr); +} + -+int -+ptrack_registered (ptrack_callback_t callback, void *arg) -+{ -+ struct list_head *el; -+ -+ list_for_each (el, ¤t->ptrack_list) { -+ struct ptrack_desc *desc = list_entry (el, struct ptrack_desc, link); -+ -+ if (desc->callback == callback && desc->arg == arg) -+ return 1; -+ } -+ return 0; -+} -+ -+int -+ptrack_call_callbacks (int phase, struct task_struct *child) ++/* IOPROC INVALIDATE PAGE ++ * ++ * Called whenever a valid PTE is unloaded e.g. when a page is unmapped by the ++ * user or paged out by the kernel. ++ * ++ * After this call the IOPROC must not access the physical memory again unless ++ * a new translation is loaded. ++ * ++ * Called holding the mm->page_table_lock ++ */ ++static inline void ++ioproc_invalidate_page(struct vm_area_struct *vma, unsigned long addr) +{ -+ struct list_head *el, *nel; -+ struct ptrack_desc *new; -+ int res; -+ -+ if (phase == PTRACK_PHASE_CLONE) -+ INIT_LIST_HEAD (&child->ptrack_list); -+ -+ list_for_each_safe (el, nel, ¤t->ptrack_list) { -+ struct ptrack_desc *desc = list_entry (el, struct ptrack_desc, link); -+ -+ res = desc->callback (desc->arg, phase, child); -+ -+ switch (phase) -+ { -+ case PTRACK_PHASE_EXIT: -+ list_del (&desc->link); -+ kfree (desc); -+ break; -+ -+ case PTRACK_PHASE_CLONE: -+ switch (res) -+ { -+ case PTRACK_FINISHED: -+ break; ++ struct ioproc_ops *cp; + -+ case PTRACK_INNHERIT: -+ if ((new = kmalloc (sizeof (struct ptrack_desc), GFP_ATOMIC)) == NULL) -+ { -+ /* allocation failed - notify that this process is not going -+ * to be started by signalling clone failure. -+ */ -+ desc->callback (desc->arg, PTRACK_PHASE_CLONE_FAIL, child); -+ -+ goto failed; -+ } ++ for (cp = vma->vm_mm->ioproc_ops; cp; cp = cp->next) ++ if (cp->invalidate_page) ++ cp->invalidate_page(cp->arg, vma, addr); ++} + -+ new->callback = desc->callback; -+ new->arg = desc->arg; -+ -+ list_add_tail (&new->link, &child->ptrack_list); -+ break; ++/* IOPROC UPDATE PAGE ++ * ++ * Called whenever a valid PTE is loaded e.g. mmaping memory, moving the brk ++ * up, when breaking COW or faulting in an anoymous page of memory. ++ * ++ * These give the IOPROC device the opportunity to load translations ++ * speculatively, which can improve performance by avoiding device translation ++ * faults. ++ * ++ * Called holding the mm->page_table_lock ++ */ ++static inline void ++ioproc_update_page(struct vm_area_struct *vma, unsigned long addr) ++{ ++ struct ioproc_ops *cp; + -+ case PTRACK_DENIED: -+ goto failed; -+ } -+ break; -+ } -+ } ++ for (cp = vma->vm_mm->ioproc_ops; cp; cp = cp->next) ++ if (cp->update_page) ++ cp->update_page(cp->arg, vma, addr); ++} + -+ return 0; ++#else + -+ failed: -+ while (! list_empty (&child->ptrack_list)) -+ { -+ struct ptrack_desc *desc = list_entry (child->ptrack_list.next, struct ptrack_desc, link); -+ -+ desc->callback (desc->arg, PTRACK_PHASE_CLONE_FAIL, child); ++/* ! CONFIG_IOPROC so make all hooks empty */ + -+ list_del (&desc->link); -+ kfree (desc); -+ } -+ return 1; -+} -+EXPORT_SYMBOL(ptrack_register); -+EXPORT_SYMBOL(ptrack_deregister); -+EXPORT_SYMBOL(ptrack_registered); -diff -urN clean/kernel/signal.c linux-2.6.9/kernel/signal.c ---- clean/kernel/signal.c 2005-05-13 13:39:11.000000000 -0400 -+++ linux-2.6.9/kernel/signal.c 2005-10-10 17:47:17.000000000 -0400 -@@ -2266,6 +2266,7 @@ - - return kill_something_info(sig, &info, pid); - } -+EXPORT_SYMBOL_GPL(sys_kill); - - /** - * sys_tgkill - send signal to one specific thread -diff -urN clean/Makefile linux-2.6.9/Makefile ---- clean/Makefile 2005-05-13 13:39:19.000000000 -0400 -+++ linux-2.6.9/Makefile 2005-10-10 17:47:31.000000000 -0400 -@@ -1,7 +1,7 @@ - VERSION = 2 - PATCHLEVEL = 6 - SUBLEVEL = 9 --EXTRAVERSION = -prep -+EXTRAVERSION = -prep.qp2.2.5.11.3qsnet - NAME=AC 1 - - # *DOCUMENTATION* -diff -urN clean/mm/fremap.c linux-2.6.9/mm/fremap.c ---- clean/mm/fremap.c 2004-10-18 17:53:06.000000000 -0400 -+++ linux-2.6.9/mm/fremap.c 2005-10-10 17:47:17.000000000 -0400 -@@ -12,6 +12,7 @@ - #include - #include - #include -+#include - #include - #include - -@@ -29,6 +30,7 @@ - if (pte_present(pte)) { - unsigned long pfn = pte_pfn(pte); ++#define ioproc_release(mm) do { } while (0) ++#define ioproc_sync_range(vma, start, end) do { } while (0) ++#define ioproc_invalidate_range(vma, start, end) do { } while (0) ++#define ioproc_update_range(vma, start, end) do { } while (0) ++#define ioproc_change_protection(vma, start, end, prot) do { } while (0) ++#define ioproc_sync_page(vma, addr) do { } while (0) ++#define ioproc_invalidate_page(vma, addr) do { } while (0) ++#define ioproc_update_page(vma, addr) do { } while (0) ++ ++#endif /* CONFIG_IOPROC */ ++#endif /* __LINUX_IOPROC_H__ */ +Index: linux-269-5502/include/linux/sched.h +=================================================================== +--- linux-269-5502.orig/include/linux/sched.h ++++ linux-269-5502/include/linux/sched.h +@@ -185,6 +185,9 @@ extern signed long schedule_timeout_unin + asmlinkage void schedule(void); -+ ioproc_invalidate_page(vma, addr); - flush_cache_page(vma, addr); - pte = ptep_clear_flush(vma, addr, ptep); - if (pfn_valid(pfn)) { -@@ -93,6 +95,7 @@ - pte_val = *pte; - pte_unmap(pte); - update_mmu_cache(vma, addr, pte_val); -+ ioproc_update_page(vma, addr); + struct namespace; ++#ifdef CONFIG_IOPROC ++struct ioproc_ops; ++#endif - err = 0; - err_unlock: -@@ -132,6 +135,7 @@ - pte_val = *pte; - pte_unmap(pte); - update_mmu_cache(vma, addr, pte_val); -+ ioproc_update_page(vma, addr); - spin_unlock(&mm->page_table_lock); - return 0; + /* Maximum number of active map areas.. This is a random (large) number */ + #define DEFAULT_MAX_MAP_COUNT 65536 +@@ -260,6 +263,11 @@ struct mm_struct { + struct kioctx *ioctx_list; -diff -urN clean/mm/hugetlb.c linux-2.6.9/mm/hugetlb.c ---- clean/mm/hugetlb.c 2004-10-18 17:54:37.000000000 -0400 -+++ linux-2.6.9/mm/hugetlb.c 2005-10-10 17:47:17.000000000 -0400 -@@ -10,6 +10,7 @@ - #include - #include - #include -+#include + struct kioctx default_kioctx; ++ ++#ifdef CONFIG_IOPROC ++ /* hooks for io devices with advanced RDMA capabilities */ ++ struct ioproc_ops *ioproc_ops; ++#endif + }; - const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL; - static unsigned long nr_huge_pages, free_huge_pages; -@@ -254,6 +255,7 @@ - struct mm_struct *mm = vma->vm_mm; + extern int mmlist_nr; +@@ -635,6 +643,10 @@ struct task_struct { + struct mempolicy *mempolicy; + short il_next; /* could be shared with used_math */ + #endif ++#ifdef CONFIG_PTRACK ++/* process tracking callback */ ++ struct list_head ptrack_list; ++#endif + }; - spin_lock(&mm->page_table_lock); -+ ioproc_invalidate_range(vma, start, start + length); - unmap_hugepage_range(vma, start, start + length); - spin_unlock(&mm->page_table_lock); - } -diff -urN clean/mm/ioproc.c linux-2.6.9/mm/ioproc.c ---- clean/mm/ioproc.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/mm/ioproc.c 2005-10-10 17:47:17.000000000 -0400 -@@ -0,0 +1,58 @@ -+/* -*- linux-c -*- -+ * -+ * Copyright (C) 2002-2004 Quadrics Ltd. + static inline pid_t process_group(struct task_struct *tsk) +Index: linux-269-5502/include/linux/ptrack.h +=================================================================== +--- /dev/null ++++ linux-269-5502/include/linux/ptrack.h +@@ -0,0 +1,65 @@ ++/* ++ * Copyright (C) 2000 Regents of the University of California + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by @@ -97361,292 +1652,90 @@ diff -urN clean/mm/ioproc.c linux-2.6.9/mm/ioproc.c + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * ++ * Derived from exit_actn.c by ++ * Copyright (C) 2003 Quadrics Ltd. + * + */ ++#ifndef __LINUX_PTRACK_H ++#define __LINUX_PTRACK_H + -+/* -+ * Registration for IO processor page table updates. ++/* ++ * Process tracking - this allows a module to keep track of processes ++ * in order that it can manage all tasks derived from a single process. + */ + -+#include -+#include ++#define PTRACK_PHASE_CLONE 1 ++#define PTRACK_PHASE_CLONE_FAIL 2 ++#define PTRACK_PHASE_EXEC 3 ++#define PTRACK_PHASE_EXIT 4 + -+#include -+#include ++#define PTRACK_FINISHED 0 ++#define PTRACK_INNHERIT 1 ++#define PTRACK_DENIED 2 + -+int -+ioproc_register_ops(struct mm_struct *mm, struct ioproc_ops *ip) -+{ -+ ip->next = mm->ioproc_ops; -+ mm->ioproc_ops = ip; ++#ifdef CONFIG_PTRACK + -+ return 0; -+} ++typedef int (*ptrack_callback_t)(void *arg, int phase, struct task_struct *child); + -+EXPORT_SYMBOL_GPL(ioproc_register_ops); ++struct ptrack_desc { ++ struct list_head link; ++ ptrack_callback_t callback; ++ void *arg; ++}; + -+int -+ioproc_unregister_ops(struct mm_struct *mm, struct ioproc_ops *ip) -+{ -+ struct ioproc_ops **tmp; ++extern int ptrack_register (ptrack_callback_t callback, void *arg); ++extern void ptrack_deregister (ptrack_callback_t callback, void *arg); ++extern int ptrack_registered (ptrack_callback_t callback, void *arg); + -+ for (tmp = &mm->ioproc_ops; *tmp && *tmp != ip; tmp= &(*tmp)->next) -+ ; -+ if (*tmp) { -+ *tmp = ip->next; -+ return 0; -+ } ++extern int ptrack_call_callbacks (int phase, struct task_struct *child); + -+ return -EINVAL; -+} ++#define INIT_TASK_PTRACK(tsk) \ ++ .ptrack_list = LIST_HEAD_INIT(tsk.ptrack_list) + -+EXPORT_SYMBOL_GPL(ioproc_unregister_ops); -diff -urN clean/mm/Kconfig linux-2.6.9/mm/Kconfig ---- clean/mm/Kconfig 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.9/mm/Kconfig 2005-10-10 17:47:17.000000000 -0400 -@@ -0,0 +1,15 @@ -+# -+# VM subsystem specific config -+# ++#else ++#define ptrack_call_callbacks(phase, child) (0) + -+# Support for IO processors which have advanced RDMA capabilities -+# -+config IOPROC -+ bool "Enable IOPROC VM hooks" -+ depends on MMU -+ default y -+ help -+ This option enables hooks in the VM subsystem so that IO devices which -+ incorporate advanced RDMA capabilities can be kept in sync with CPU -+ page table changes. -+ See Documentation/vm/ioproc.txt for more details. -diff -urN clean/mm/Makefile linux-2.6.9/mm/Makefile ---- clean/mm/Makefile 2005-05-13 13:39:02.000000000 -0400 -+++ linux-2.6.9/mm/Makefile 2005-10-10 17:47:17.000000000 -0400 -@@ -16,6 +16,7 @@ - obj-$(CONFIG_X86_4G) += usercopy.o - obj-$(CONFIG_HUGETLBFS) += hugetlb.o - obj-$(CONFIG_NUMA) += mempolicy.o -+obj-$(CONFIG_IOPROC) += ioproc.o - obj-$(CONFIG_SHMEM) += shmem.o - obj-$(CONFIG_TINY_SHMEM) += tiny-shmem.o - -diff -urN clean/mm/memory.c linux-2.6.9/mm/memory.c ---- clean/mm/memory.c 2005-05-13 13:39:08.000000000 -0400 -+++ linux-2.6.9/mm/memory.c 2005-10-10 17:47:17.000000000 -0400 -@@ -43,6 +43,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -619,6 +620,7 @@ - - lru_add_drain(); - spin_lock(&mm->page_table_lock); -+ ioproc_invalidate_range(vma, address, end); - tlb = tlb_gather_mmu(mm, 0); - unmap_vmas(&tlb, mm, vma, address, end, &nr_accounted, details); - tlb_finish_mmu(tlb, address, end); -@@ -968,6 +970,7 @@ - BUG(); - - spin_lock(&mm->page_table_lock); -+ ioproc_invalidate_range(vma, beg, end); - do { - pmd_t *pmd = pmd_alloc(mm, dir, address); - error = -ENOMEM; -@@ -982,6 +985,7 @@ - /* - * Why flush? zeromap_pte_range has a BUG_ON for !pte_none() - */ -+ ioproc_update_range(vma, beg, end); - flush_tlb_range(vma, beg, end); - spin_unlock(&mm->page_table_lock); - return error; -@@ -1062,6 +1066,7 @@ - vma->vm_flags |= VM_IO | VM_RESERVED; - - spin_lock(&mm->page_table_lock); -+ ioproc_invalidate_range(vma, beg, end); - do { - pmd_t *pmd = pmd_alloc(mm, dir, from); - error = -ENOMEM; -@@ -1076,6 +1081,7 @@ - /* - * Why flush? remap_pte_range has a BUG_ON for !pte_none() - */ -+ ioproc_update_range(vma, beg, end); - flush_tlb_range(vma, beg, end); - spin_unlock(&mm->page_table_lock); - return error; -@@ -1162,6 +1168,7 @@ - ptep_set_access_flags(vma, address, page_table, entry, 1); - update_mmu_cache(vma, address, entry); - pte_unmap(page_table); -+ ioproc_update_page(vma, address); - spin_unlock(&mm->page_table_lock); - return VM_FAULT_MINOR; - } -@@ -1194,6 +1201,7 @@ - ++mm->rss; - else - page_remove_rmap(old_page); -+ ioproc_invalidate_page(vma, address); - break_cow(vma, new_page, address, page_table); - lru_cache_add_active(new_page); - page_add_anon_rmap(new_page, vma, address); -@@ -1202,6 +1210,7 @@ - new_page = old_page; - } - pte_unmap(page_table); -+ ioproc_update_page(vma, address); - page_cache_release(new_page); - page_cache_release(old_page); - spin_unlock(&mm->page_table_lock); -@@ -1495,6 +1504,7 @@ - /* No need to invalidate - it was non-present before */ - update_mmu_cache(vma, address, pte); - pte_unmap(page_table); -+ ioproc_update_page(vma, address); - spin_unlock(&mm->page_table_lock); - out: - return ret; -@@ -1555,6 +1565,7 @@ - - /* No need to invalidate - it was non-present before */ - update_mmu_cache(vma, addr, entry); -+ ioproc_update_page(vma, addr); - lazy_mmu_prot_update(entry); - spin_unlock(&mm->page_table_lock); - out: -@@ -1673,6 +1684,7 @@ - - /* no need to invalidate: a not-present page shouldn't be cached */ - update_mmu_cache(vma, address, entry); -+ ioproc_update_page(vma, address); - lazy_mmu_prot_update(entry); - spin_unlock(&mm->page_table_lock); - out: -@@ -1853,6 +1865,7 @@ - return ret; - return ret == len ? 0 : -1; - } -+EXPORT_SYMBOL(make_pages_present); - - /* - * Map a vmalloc()-space virtual address to the physical page. -diff -urN clean/mm/mmap.c linux-2.6.9/mm/mmap.c ---- clean/mm/mmap.c 2005-05-13 13:39:10.000000000 -0400 -+++ linux-2.6.9/mm/mmap.c 2005-10-10 17:47:17.000000000 -0400 -@@ -15,6 +15,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -1680,6 +1681,7 @@ - unsigned long nr_accounted = 0; - - lru_add_drain(); -+ ioproc_invalidate_range(vma, start, end); - tlb = tlb_gather_mmu(mm, 0); - unmap_vmas(&tlb, mm, vma, start, end, &nr_accounted, NULL); - vm_unacct_memory(nr_accounted); -@@ -1965,6 +1967,7 @@ - - spin_lock(&mm->page_table_lock); - -+ ioproc_release(mm); - tlb = tlb_gather_mmu(mm, 1); - flush_cache_mm(mm); - /* Use ~0UL here to ensure all VMAs in the mm are unmapped */ -diff -urN clean/mm/mprotect.c linux-2.6.9/mm/mprotect.c ---- clean/mm/mprotect.c 2005-05-13 13:39:02.000000000 -0400 -+++ linux-2.6.9/mm/mprotect.c 2005-10-10 17:47:17.000000000 -0400 -@@ -10,6 +10,7 @@ - - #include - #include -+#include - #include - #include - #include -@@ -99,6 +100,7 @@ - if (start >= end) - BUG(); - spin_lock(¤t->mm->page_table_lock); -+ ioproc_change_protection(vma, start, end, newprot); - do { - change_pmd_range(dir, start, end - start, newprot); - start = (start + PGDIR_SIZE) & PGDIR_MASK; -diff -urN clean/mm/mremap.c linux-2.6.9/mm/mremap.c ---- clean/mm/mremap.c 2005-05-13 13:39:02.000000000 -0400 -+++ linux-2.6.9/mm/mremap.c 2005-10-10 17:47:17.000000000 -0400 -@@ -9,6 +9,7 @@ - - #include - #include -+#include - #include - #include - #include -@@ -148,6 +149,8 @@ - { - unsigned long offset; - -+ ioproc_invalidate_range(vma, old_addr, old_addr + len); -+ ioproc_invalidate_range(vma, new_addr, new_addr + len); - flush_cache_range(vma, old_addr, old_addr + len); - - /* -diff -urN clean/mm/msync.c linux-2.6.9/mm/msync.c ---- clean/mm/msync.c 2004-10-18 17:53:51.000000000 -0400 -+++ linux-2.6.9/mm/msync.c 2005-10-10 17:47:17.000000000 -0400 -@@ -12,6 +12,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -115,6 +116,7 @@ - - if (address >= end) - BUG(); -+ ioproc_sync_range(vma, address, end); - do { - error |= filemap_sync_pmd_range(dir, address, end, vma, flags); - address = (address + PGDIR_SIZE) & PGDIR_MASK; -diff -urN clean/mm/rmap.c linux-2.6.9/mm/rmap.c ---- clean/mm/rmap.c 2005-05-13 13:39:08.000000000 -0400 -+++ linux-2.6.9/mm/rmap.c 2005-10-10 17:47:17.000000000 -0400 -@@ -51,6 +51,7 @@ - #include - #include - #include -+#include - #include - - #include -@@ -566,6 +567,7 @@ - } - - /* Nuke the page table entry. */ -+ ioproc_invalidate_page(vma, address); - flush_cache_page(vma, address); - pteval = ptep_clear_flush(vma, address, pte); - -@@ -673,6 +675,7 @@ - continue; ++#define INIT_TASK_PTRACK(tsk) ++ ++#endif ++ ++#endif /* __LINUX_PTRACK_H */ +Index: linux-269-5502/include/asm-ia64/param.h +=================================================================== +--- linux-269-5502.orig/include/asm-ia64/param.h ++++ linux-269-5502/include/asm-ia64/param.h +@@ -27,7 +27,7 @@ + */ + # define HZ 32 + # else +-# define HZ 1024 ++# define HZ 100 + # endif + # define USER_HZ HZ + # define CLOCKS_PER_SEC HZ /* frequency at which times() counts */ +Index: linux-269-5502/include/asm-i386/param.h +=================================================================== +--- linux-269-5502.orig/include/asm-i386/param.h ++++ linux-269-5502/include/asm-i386/param.h +@@ -2,7 +2,7 @@ + #define _ASMi386_PARAM_H - /* Nuke the page table entry. */ -+ ioproc_invalidate_page(vma, address); - flush_cache_page(vma, address); - pteval = ptep_clear_flush(vma, address, pte); + #ifdef __KERNEL__ +-# define HZ 1000 /* Internal kernel timer frequency */ ++# define HZ 100 /* Internal kernel timer frequency */ + # define USER_HZ 100 /* .. some user interfaces are in "ticks" */ + # define CLOCKS_PER_SEC (USER_HZ) /* like times() */ + #endif +Index: linux-269-5502/include/asm-x86_64/param.h +=================================================================== +--- linux-269-5502.orig/include/asm-x86_64/param.h ++++ linux-269-5502/include/asm-x86_64/param.h +@@ -2,7 +2,7 @@ + #define _ASMx86_64_PARAM_H + #ifdef __KERNEL__ +-# define HZ 1000 /* Internal kernel timer frequency */ ++# define HZ 100 /* Internal kernel timer frequency */ + # define USER_HZ 100 /* .. some user interfaces are in "ticks */ + #define CLOCKS_PER_SEC (USER_HZ) /* like times() */ + #endif